|
@@ -32,6 +32,7 @@
|
|
|
|
|
|
#include <linux/platform_device.h>
|
|
|
#include <linux/acpi.h>
|
|
|
+#include <linux/etherdevice.h>
|
|
|
#include <rdma/ib_umem.h>
|
|
|
#include "hns_roce_common.h"
|
|
|
#include "hns_roce_device.h"
|
|
@@ -72,6 +73,8 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|
|
int nreq = 0;
|
|
|
u32 ind = 0;
|
|
|
int ret = 0;
|
|
|
+ u8 *smac;
|
|
|
+ int loopback;
|
|
|
|
|
|
if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
|
|
|
ibqp->qp_type != IB_QPT_RC)) {
|
|
@@ -129,6 +132,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|
|
UD_SEND_WQE_U32_8_DMAC_5_M,
|
|
|
UD_SEND_WQE_U32_8_DMAC_5_S,
|
|
|
ah->av.mac[5]);
|
|
|
+
|
|
|
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
|
|
|
+ loopback = ether_addr_equal_unaligned(ah->av.mac,
|
|
|
+ smac) ? 1 : 0;
|
|
|
+ roce_set_bit(ud_sq_wqe->u32_8,
|
|
|
+ UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
|
|
|
+ loopback);
|
|
|
+
|
|
|
roce_set_field(ud_sq_wqe->u32_8,
|
|
|
UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
|
|
|
UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
|
|
@@ -284,6 +295,8 @@ out:
|
|
|
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
|
|
|
SQ_DOORBELL_U32_4_SQ_HEAD_S,
|
|
|
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
|
|
|
+ roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
|
|
|
+ SQ_DOORBELL_U32_4_SL_S, qp->sl);
|
|
|
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
|
|
|
SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
|
|
|
roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
|
|
@@ -611,6 +624,213 @@ ext_sdb_buf_fail_out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
|
|
|
+ struct ib_pd *pd)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct ib_qp_init_attr init_attr;
|
|
|
+ struct ib_qp *qp;
|
|
|
+
|
|
|
+ memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
|
|
|
+ init_attr.qp_type = IB_QPT_RC;
|
|
|
+ init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
|
|
|
+ init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
|
|
|
+ init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
|
|
|
+
|
|
|
+ qp = hns_roce_create_qp(pd, &init_attr, NULL);
|
|
|
+ if (IS_ERR(qp)) {
|
|
|
+ dev_err(dev, "Create loop qp for mr free failed!");
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return to_hr_qp(qp);
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct hns_roce_caps *caps = &hr_dev->caps;
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct ib_cq_init_attr cq_init_attr;
|
|
|
+ struct hns_roce_free_mr *free_mr;
|
|
|
+ struct ib_qp_attr attr = { 0 };
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct hns_roce_qp *hr_qp;
|
|
|
+ struct ib_cq *cq;
|
|
|
+ struct ib_pd *pd;
|
|
|
+ u64 subnet_prefix;
|
|
|
+ int attr_mask = 0;
|
|
|
+ int i;
|
|
|
+ int ret;
|
|
|
+ u8 phy_port;
|
|
|
+ u8 sl;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ free_mr = &priv->free_mr;
|
|
|
+
|
|
|
+ /* Reserved cq for loop qp */
|
|
|
+ cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
|
|
|
+ cq_init_attr.comp_vector = 0;
|
|
|
+ cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
|
|
|
+ if (IS_ERR(cq)) {
|
|
|
+ dev_err(dev, "Create cq for reseved loop qp failed!");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ free_mr->mr_free_cq = to_hr_cq(cq);
|
|
|
+ free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
|
|
|
+ free_mr->mr_free_cq->ib_cq.uobject = NULL;
|
|
|
+ free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
|
|
|
+ free_mr->mr_free_cq->ib_cq.event_handler = NULL;
|
|
|
+ free_mr->mr_free_cq->ib_cq.cq_context = NULL;
|
|
|
+ atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
|
|
|
+
|
|
|
+ pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
|
|
|
+ if (IS_ERR(pd)) {
|
|
|
+ dev_err(dev, "Create pd for reseved loop qp failed!");
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto alloc_pd_failed;
|
|
|
+ }
|
|
|
+ free_mr->mr_free_pd = to_hr_pd(pd);
|
|
|
+ free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
|
|
|
+ free_mr->mr_free_pd->ibpd.uobject = NULL;
|
|
|
+ atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
|
|
|
+
|
|
|
+ attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
|
|
|
+ attr.pkey_index = 0;
|
|
|
+ attr.min_rnr_timer = 0;
|
|
|
+ /* Disable read ability */
|
|
|
+ attr.max_dest_rd_atomic = 0;
|
|
|
+ attr.max_rd_atomic = 0;
|
|
|
+ /* Use arbitrary values as rq_psn and sq_psn */
|
|
|
+ attr.rq_psn = 0x0808;
|
|
|
+ attr.sq_psn = 0x0808;
|
|
|
+ attr.retry_cnt = 7;
|
|
|
+ attr.rnr_retry = 7;
|
|
|
+ attr.timeout = 0x12;
|
|
|
+ attr.path_mtu = IB_MTU_256;
|
|
|
+ attr.ah_attr.ah_flags = 1;
|
|
|
+ attr.ah_attr.static_rate = 3;
|
|
|
+ attr.ah_attr.grh.sgid_index = 0;
|
|
|
+ attr.ah_attr.grh.hop_limit = 1;
|
|
|
+ attr.ah_attr.grh.flow_label = 0;
|
|
|
+ attr.ah_attr.grh.traffic_class = 0;
|
|
|
+
|
|
|
+ subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
|
|
|
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
|
|
|
+ free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
|
|
|
+ if (IS_ERR(free_mr->mr_free_qp[i])) {
|
|
|
+ dev_err(dev, "Create loop qp failed!\n");
|
|
|
+ goto create_lp_qp_failed;
|
|
|
+ }
|
|
|
+ hr_qp = free_mr->mr_free_qp[i];
|
|
|
+
|
|
|
+ sl = i / caps->num_ports;
|
|
|
+
|
|
|
+ if (caps->num_ports == HNS_ROCE_MAX_PORTS)
|
|
|
+ phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
|
|
|
+ (i % caps->num_ports);
|
|
|
+ else
|
|
|
+ phy_port = i % caps->num_ports;
|
|
|
+
|
|
|
+ hr_qp->port = phy_port + 1;
|
|
|
+ hr_qp->phy_port = phy_port;
|
|
|
+ hr_qp->ibqp.qp_type = IB_QPT_RC;
|
|
|
+ hr_qp->ibqp.device = &hr_dev->ib_dev;
|
|
|
+ hr_qp->ibqp.uobject = NULL;
|
|
|
+ atomic_set(&hr_qp->ibqp.usecnt, 0);
|
|
|
+ hr_qp->ibqp.pd = pd;
|
|
|
+ hr_qp->ibqp.recv_cq = cq;
|
|
|
+ hr_qp->ibqp.send_cq = cq;
|
|
|
+
|
|
|
+ attr.ah_attr.port_num = phy_port + 1;
|
|
|
+ attr.ah_attr.sl = sl;
|
|
|
+ attr.port_num = phy_port + 1;
|
|
|
+
|
|
|
+ attr.dest_qp_num = hr_qp->qpn;
|
|
|
+ memcpy(attr.ah_attr.dmac, hr_dev->dev_addr[phy_port],
|
|
|
+ MAC_ADDR_OCTET_NUM);
|
|
|
+
|
|
|
+ memcpy(attr.ah_attr.grh.dgid.raw,
|
|
|
+ &subnet_prefix, sizeof(u64));
|
|
|
+ memcpy(&attr.ah_attr.grh.dgid.raw[8],
|
|
|
+ hr_dev->dev_addr[phy_port], 3);
|
|
|
+ memcpy(&attr.ah_attr.grh.dgid.raw[13],
|
|
|
+ hr_dev->dev_addr[phy_port] + 3, 3);
|
|
|
+ attr.ah_attr.grh.dgid.raw[11] = 0xff;
|
|
|
+ attr.ah_attr.grh.dgid.raw[12] = 0xfe;
|
|
|
+ attr.ah_attr.grh.dgid.raw[8] ^= 2;
|
|
|
+
|
|
|
+ attr_mask |= IB_QP_PORT;
|
|
|
+
|
|
|
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
|
|
|
+ IB_QPS_RESET, IB_QPS_INIT);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
|
|
|
+ goto create_lp_qp_failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
|
|
|
+ IB_QPS_INIT, IB_QPS_RTR);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
|
|
|
+ goto create_lp_qp_failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
|
|
|
+ IB_QPS_RTR, IB_QPS_RTS);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "modify qp failed(%d)!\n", ret);
|
|
|
+ goto create_lp_qp_failed;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+create_lp_qp_failed:
|
|
|
+ for (i -= 1; i >= 0; i--) {
|
|
|
+ hr_qp = free_mr->mr_free_qp[i];
|
|
|
+ if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
|
|
|
+ dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (hns_roce_dealloc_pd(pd))
|
|
|
+ dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
|
|
|
+
|
|
|
+alloc_pd_failed:
|
|
|
+ if (hns_roce_ib_destroy_cq(cq))
|
|
|
+ dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
|
|
|
+
|
|
|
+ return -EINVAL;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct hns_roce_free_mr *free_mr;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct hns_roce_qp *hr_qp;
|
|
|
+ int ret;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ free_mr = &priv->free_mr;
|
|
|
+
|
|
|
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
|
|
|
+ hr_qp = free_mr->mr_free_qp[i];
|
|
|
+ ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
|
|
|
+ if (ret)
|
|
|
+ dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
|
|
|
+ i, ret);
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
|
|
|
+ if (ret)
|
|
|
+ dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
|
|
|
+
|
|
|
+ ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
|
|
|
+ if (ret)
|
|
|
+ dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
|
|
|
+}
|
|
|
+
|
|
|
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
|
|
|
{
|
|
|
struct device *dev = &hr_dev->pdev->dev;
|
|
@@ -648,6 +868,223 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct hns_roce_recreate_lp_qp_work *lp_qp_work;
|
|
|
+ struct hns_roce_dev *hr_dev;
|
|
|
+
|
|
|
+ lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
|
|
|
+ work);
|
|
|
+ hr_dev = to_hr_dev(lp_qp_work->ib_dev);
|
|
|
+
|
|
|
+ hns_roce_v1_release_lp_qp(hr_dev);
|
|
|
+
|
|
|
+ if (hns_roce_v1_rsv_lp_qp(hr_dev))
|
|
|
+ dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
|
|
|
+
|
|
|
+ if (lp_qp_work->comp_flag)
|
|
|
+ complete(lp_qp_work->comp);
|
|
|
+
|
|
|
+ kfree(lp_qp_work);
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct hns_roce_recreate_lp_qp_work *lp_qp_work;
|
|
|
+ struct hns_roce_free_mr *free_mr;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct completion comp;
|
|
|
+ unsigned long end =
|
|
|
+ msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ free_mr = &priv->free_mr;
|
|
|
+
|
|
|
+ lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
|
|
|
+ GFP_KERNEL);
|
|
|
+
|
|
|
+ INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
|
|
|
+
|
|
|
+ lp_qp_work->ib_dev = &(hr_dev->ib_dev);
|
|
|
+ lp_qp_work->comp = ∁
|
|
|
+ lp_qp_work->comp_flag = 1;
|
|
|
+
|
|
|
+ init_completion(lp_qp_work->comp);
|
|
|
+
|
|
|
+ queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
|
|
|
+
|
|
|
+ while (time_before_eq(jiffies, end)) {
|
|
|
+ if (try_wait_for_completion(&comp))
|
|
|
+ return 0;
|
|
|
+ msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
|
|
|
+ }
|
|
|
+
|
|
|
+ lp_qp_work->comp_flag = 0;
|
|
|
+ if (try_wait_for_completion(&comp))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
|
|
|
+ return -ETIMEDOUT;
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
|
|
|
+{
|
|
|
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct ib_send_wr send_wr, *bad_wr;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ memset(&send_wr, 0, sizeof(send_wr));
|
|
|
+ send_wr.next = NULL;
|
|
|
+ send_wr.num_sge = 0;
|
|
|
+ send_wr.send_flags = 0;
|
|
|
+ send_wr.sg_list = NULL;
|
|
|
+ send_wr.wr_id = (unsigned long long)&send_wr;
|
|
|
+ send_wr.opcode = IB_WR_RDMA_WRITE;
|
|
|
+
|
|
|
+ ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct hns_roce_mr_free_work *mr_work;
|
|
|
+ struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
|
|
|
+ struct hns_roce_free_mr *free_mr;
|
|
|
+ struct hns_roce_cq *mr_free_cq;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct hns_roce_dev *hr_dev;
|
|
|
+ struct hns_roce_mr *hr_mr;
|
|
|
+ struct hns_roce_qp *hr_qp;
|
|
|
+ struct device *dev;
|
|
|
+ unsigned long end =
|
|
|
+ msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
|
|
|
+ int i;
|
|
|
+ int ret;
|
|
|
+ int ne;
|
|
|
+
|
|
|
+ mr_work = container_of(work, struct hns_roce_mr_free_work, work);
|
|
|
+ hr_mr = (struct hns_roce_mr *)mr_work->mr;
|
|
|
+ hr_dev = to_hr_dev(mr_work->ib_dev);
|
|
|
+ dev = &hr_dev->pdev->dev;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ free_mr = &priv->free_mr;
|
|
|
+ mr_free_cq = free_mr->mr_free_cq;
|
|
|
+
|
|
|
+ for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
|
|
|
+ hr_qp = free_mr->mr_free_qp[i];
|
|
|
+ ret = hns_roce_v1_send_lp_wqe(hr_qp);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev,
|
|
|
+ "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
|
|
|
+ hr_qp->qpn, ret);
|
|
|
+ goto free_work;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ ne = HNS_ROCE_V1_RESV_QP;
|
|
|
+ do {
|
|
|
+ ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
|
|
|
+ if (ret < 0) {
|
|
|
+ dev_err(dev,
|
|
|
+ "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
|
|
|
+ hr_qp->qpn, ret, hr_mr->key, ne);
|
|
|
+ goto free_work;
|
|
|
+ }
|
|
|
+ ne -= ret;
|
|
|
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
|
|
|
+ } while (ne && time_before_eq(jiffies, end));
|
|
|
+
|
|
|
+ if (ne != 0)
|
|
|
+ dev_err(dev,
|
|
|
+ "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
|
|
|
+ hr_mr->key, ne);
|
|
|
+
|
|
|
+free_work:
|
|
|
+ if (mr_work->comp_flag)
|
|
|
+ complete(mr_work->comp);
|
|
|
+ kfree(mr_work);
|
|
|
+}
|
|
|
+
|
|
|
+int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct hns_roce_mr_free_work *mr_work;
|
|
|
+ struct hns_roce_free_mr *free_mr;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct completion comp;
|
|
|
+ unsigned long end =
|
|
|
+ msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
|
|
|
+ unsigned long start = jiffies;
|
|
|
+ int npages;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ free_mr = &priv->free_mr;
|
|
|
+
|
|
|
+ if (mr->enabled) {
|
|
|
+ if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
|
|
|
+ & (hr_dev->caps.num_mtpts - 1)))
|
|
|
+ dev_warn(dev, "HW2SW_MPT failed!\n");
|
|
|
+ }
|
|
|
+
|
|
|
+ mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
|
|
|
+ if (!mr_work) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto free_mr;
|
|
|
+ }
|
|
|
+
|
|
|
+ INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
|
|
|
+
|
|
|
+ mr_work->ib_dev = &(hr_dev->ib_dev);
|
|
|
+ mr_work->comp = ∁
|
|
|
+ mr_work->comp_flag = 1;
|
|
|
+ mr_work->mr = (void *)mr;
|
|
|
+ init_completion(mr_work->comp);
|
|
|
+
|
|
|
+ queue_work(free_mr->free_mr_wq, &(mr_work->work));
|
|
|
+
|
|
|
+ while (time_before_eq(jiffies, end)) {
|
|
|
+ if (try_wait_for_completion(&comp))
|
|
|
+ goto free_mr;
|
|
|
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
|
|
|
+ }
|
|
|
+
|
|
|
+ mr_work->comp_flag = 0;
|
|
|
+ if (try_wait_for_completion(&comp))
|
|
|
+ goto free_mr;
|
|
|
+
|
|
|
+ dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
|
|
|
+ ret = -ETIMEDOUT;
|
|
|
+
|
|
|
+free_mr:
|
|
|
+ dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
|
|
|
+ mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
|
|
|
+
|
|
|
+ if (mr->size != ~0ULL) {
|
|
|
+ npages = ib_umem_page_count(mr->umem);
|
|
|
+ dma_free_coherent(dev, npages * 8, mr->pbl_buf,
|
|
|
+ mr->pbl_dma_addr);
|
|
|
+ }
|
|
|
+
|
|
|
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
|
|
|
+ key_to_hw_index(mr->key), 0);
|
|
|
+
|
|
|
+ if (mr->umem)
|
|
|
+ ib_umem_release(mr->umem);
|
|
|
+
|
|
|
+ kfree(mr);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
|
|
|
{
|
|
|
struct device *dev = &hr_dev->pdev->dev;
|
|
@@ -849,6 +1286,85 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
|
|
|
priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
|
|
|
}
|
|
|
|
|
|
+static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct hns_roce_buf_list *tptr_buf;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ tptr_buf = &priv->tptr_table.tptr_buf;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This buffer will be used for CQ's tptr(tail pointer), also
|
|
|
+ * named ci(customer index). Every CQ will use 2 bytes to save
|
|
|
+ * cqe ci in hip06. Hardware will read this area to get new ci
|
|
|
+ * when the queue is almost full.
|
|
|
+ */
|
|
|
+ tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
|
|
|
+ &tptr_buf->map, GFP_KERNEL);
|
|
|
+ if (!tptr_buf->buf)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ hr_dev->tptr_dma_addr = tptr_buf->map;
|
|
|
+ hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct hns_roce_buf_list *tptr_buf;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ tptr_buf = &priv->tptr_table.tptr_buf;
|
|
|
+
|
|
|
+ dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
|
|
|
+ tptr_buf->buf, tptr_buf->map);
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct hns_roce_free_mr *free_mr;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ free_mr = &priv->free_mr;
|
|
|
+
|
|
|
+ free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
|
|
|
+ if (!free_mr->free_mr_wq) {
|
|
|
+ dev_err(dev, "Create free mr workqueue failed!\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = hns_roce_v1_rsv_lp_qp(hr_dev);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
|
|
|
+ flush_workqueue(free_mr->free_mr_wq);
|
|
|
+ destroy_workqueue(free_mr->free_mr_wq);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct hns_roce_free_mr *free_mr;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ free_mr = &priv->free_mr;
|
|
|
+
|
|
|
+ flush_workqueue(free_mr->free_mr_wq);
|
|
|
+ destroy_workqueue(free_mr->free_mr_wq);
|
|
|
+
|
|
|
+ hns_roce_v1_release_lp_qp(hr_dev);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* hns_roce_v1_reset - reset RoCE
|
|
|
* @hr_dev: RoCE device struct pointer
|
|
@@ -898,6 +1414,38 @@ int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct hns_roce_des_qp *des_qp;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ des_qp = &priv->des_qp;
|
|
|
+
|
|
|
+ des_qp->requeue_flag = 1;
|
|
|
+ des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
|
|
|
+ if (!des_qp->qp_wq) {
|
|
|
+ dev_err(dev, "Create destroy qp workqueue failed!\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct hns_roce_des_qp *des_qp;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ des_qp = &priv->des_qp;
|
|
|
+
|
|
|
+ des_qp->requeue_flag = 0;
|
|
|
+ flush_workqueue(des_qp->qp_wq);
|
|
|
+ destroy_workqueue(des_qp->qp_wq);
|
|
|
+}
|
|
|
+
|
|
|
void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
|
|
|
{
|
|
|
int i = 0;
|
|
@@ -906,12 +1454,11 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
|
|
|
hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
|
|
|
hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
|
|
|
ROCEE_VENDOR_PART_ID_REG));
|
|
|
- hr_dev->hw_rev = le32_to_cpu(roce_read(hr_dev, ROCEE_HW_VERSION_REG));
|
|
|
-
|
|
|
hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
|
|
|
ROCEE_SYS_IMAGE_GUID_L_REG)) |
|
|
|
((u64)le32_to_cpu(roce_read(hr_dev,
|
|
|
ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
|
|
|
+ hr_dev->hw_rev = HNS_ROCE_HW_VER1;
|
|
|
|
|
|
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
|
|
|
caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
|
|
@@ -1001,18 +1548,44 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
|
|
|
goto error_failed_raq_init;
|
|
|
}
|
|
|
|
|
|
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
|
|
|
-
|
|
|
ret = hns_roce_bt_init(hr_dev);
|
|
|
if (ret) {
|
|
|
dev_err(dev, "bt init failed!\n");
|
|
|
goto error_failed_bt_init;
|
|
|
}
|
|
|
|
|
|
+ ret = hns_roce_tptr_init(hr_dev);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "tptr init failed!\n");
|
|
|
+ goto error_failed_tptr_init;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = hns_roce_des_qp_init(hr_dev);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "des qp init failed!\n");
|
|
|
+ goto error_failed_des_qp_init;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = hns_roce_free_mr_init(hr_dev);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "free mr init failed!\n");
|
|
|
+ goto error_failed_free_mr_init;
|
|
|
+ }
|
|
|
+
|
|
|
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
+error_failed_free_mr_init:
|
|
|
+ hns_roce_des_qp_free(hr_dev);
|
|
|
+
|
|
|
+error_failed_des_qp_init:
|
|
|
+ hns_roce_tptr_free(hr_dev);
|
|
|
+
|
|
|
+error_failed_tptr_init:
|
|
|
+ hns_roce_bt_free(hr_dev);
|
|
|
+
|
|
|
error_failed_bt_init:
|
|
|
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
|
|
|
hns_roce_raq_free(hr_dev);
|
|
|
|
|
|
error_failed_raq_init:
|
|
@@ -1022,8 +1595,11 @@ error_failed_raq_init:
|
|
|
|
|
|
void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
|
|
|
{
|
|
|
- hns_roce_bt_free(hr_dev);
|
|
|
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
|
|
|
+ hns_roce_free_mr_free(hr_dev);
|
|
|
+ hns_roce_des_qp_free(hr_dev);
|
|
|
+ hns_roce_tptr_free(hr_dev);
|
|
|
+ hns_roce_bt_free(hr_dev);
|
|
|
hns_roce_raq_free(hr_dev);
|
|
|
hns_roce_db_free(hr_dev);
|
|
|
}
|
|
@@ -1061,6 +1637,14 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
|
|
|
u32 *p;
|
|
|
u32 val;
|
|
|
|
|
|
+ /*
|
|
|
+ * When mac changed, loopback may fail
|
|
|
+ * because of smac not equal to dmac.
|
|
|
+ * We Need to release and create reserved qp again.
|
|
|
+ */
|
|
|
+ if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
|
|
|
+ dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
|
|
|
+
|
|
|
p = (u32 *)(&addr[0]);
|
|
|
reg_smac_l = *p;
|
|
|
roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
|
|
@@ -1293,9 +1877,9 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Now backwards through the CQ, removing CQ entries
|
|
|
- * that match our QP by overwriting them with next entries.
|
|
|
- */
|
|
|
+ * Now backwards through the CQ, removing CQ entries
|
|
|
+ * that match our QP by overwriting them with next entries.
|
|
|
+ */
|
|
|
while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
|
|
|
cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
|
|
|
if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
|
|
@@ -1317,9 +1901,9 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
|
|
|
if (nfreed) {
|
|
|
hr_cq->cons_index += nfreed;
|
|
|
/*
|
|
|
- * Make sure update of buffer contents is done before
|
|
|
- * updating consumer index.
|
|
|
- */
|
|
|
+ * Make sure update of buffer contents is done before
|
|
|
+ * updating consumer index.
|
|
|
+ */
|
|
|
wmb();
|
|
|
|
|
|
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
|
|
@@ -1339,14 +1923,21 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
|
|
|
dma_addr_t dma_handle, int nent, u32 vector)
|
|
|
{
|
|
|
struct hns_roce_cq_context *cq_context = NULL;
|
|
|
- void __iomem *tptr_addr;
|
|
|
+ struct hns_roce_buf_list *tptr_buf;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ dma_addr_t tptr_dma_addr;
|
|
|
+ int offset;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ tptr_buf = &priv->tptr_table.tptr_buf;
|
|
|
|
|
|
cq_context = mb_buf;
|
|
|
memset(cq_context, 0, sizeof(*cq_context));
|
|
|
|
|
|
- tptr_addr = 0;
|
|
|
- hr_dev->priv_addr = tptr_addr;
|
|
|
- hr_cq->tptr_addr = tptr_addr;
|
|
|
+ /* Get the tptr for this CQ. */
|
|
|
+ offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
|
|
|
+ tptr_dma_addr = tptr_buf->map + offset;
|
|
|
+ hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
|
|
|
|
|
|
/* Register cq_context members */
|
|
|
roce_set_field(cq_context->cqc_byte_4,
|
|
@@ -1390,10 +1981,10 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
|
|
|
roce_set_field(cq_context->cqc_byte_20,
|
|
|
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
|
|
|
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
|
|
|
- (u64)tptr_addr >> 44);
|
|
|
+ tptr_dma_addr >> 44);
|
|
|
cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
|
|
|
|
|
|
- cq_context->cqe_tptr_addr_l = (u32)((u64)tptr_addr >> 12);
|
|
|
+ cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
|
|
|
|
|
|
roce_set_field(cq_context->cqc_byte_32,
|
|
|
CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
|
|
@@ -1407,7 +1998,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
|
|
|
roce_set_bit(cq_context->cqc_byte_32,
|
|
|
CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
|
|
|
0);
|
|
|
- /*The initial value of cq's ci is 0 */
|
|
|
+ /* The initial value of cq's ci is 0 */
|
|
|
roce_set_field(cq_context->cqc_byte_32,
|
|
|
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
|
|
|
CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
|
|
@@ -1424,9 +2015,9 @@ int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
|
|
notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
|
|
|
IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
|
|
|
/*
|
|
|
- * flags = 0; Notification Flag = 1, next
|
|
|
- * flags = 1; Notification Flag = 0, solocited
|
|
|
- */
|
|
|
+ * flags = 0; Notification Flag = 1, next
|
|
|
+ * flags = 1; Notification Flag = 0, solocited
|
|
|
+ */
|
|
|
doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
|
|
|
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
|
|
|
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
|
|
@@ -1581,10 +2172,10 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
|
|
|
wq = &(*cur_qp)->sq;
|
|
|
if ((*cur_qp)->sq_signal_bits) {
|
|
|
/*
|
|
|
- * If sg_signal_bit is 1,
|
|
|
- * firstly tail pointer updated to wqe
|
|
|
- * which current cqe correspond to
|
|
|
- */
|
|
|
+ * If sg_signal_bit is 1,
|
|
|
+ * firstly tail pointer updated to wqe
|
|
|
+ * which current cqe correspond to
|
|
|
+ */
|
|
|
wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
|
|
|
CQE_BYTE_4_WQE_INDEX_M,
|
|
|
CQE_BYTE_4_WQE_INDEX_S);
|
|
@@ -1659,8 +2250,14 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (npolled)
|
|
|
+ if (npolled) {
|
|
|
+ *hr_cq->tptr_addr = hr_cq->cons_index &
|
|
|
+ ((hr_cq->cq_depth << 1) - 1);
|
|
|
+
|
|
|
+ /* Memroy barrier */
|
|
|
+ wmb();
|
|
|
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
|
|
|
+ }
|
|
|
|
|
|
spin_unlock_irqrestore(&hr_cq->lock, flags);
|
|
|
|
|
@@ -1799,12 +2396,12 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
|
|
|
if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
|
|
|
return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
|
|
|
HNS_ROCE_CMD_2RST_QP,
|
|
|
- HNS_ROCE_CMD_TIME_CLASS_A);
|
|
|
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
|
|
|
|
|
|
if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
|
|
|
return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
|
|
|
HNS_ROCE_CMD_2ERR_QP,
|
|
|
- HNS_ROCE_CMD_TIME_CLASS_A);
|
|
|
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
|
|
|
|
|
|
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
|
|
if (IS_ERR(mailbox))
|
|
@@ -1814,7 +2411,7 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
|
|
|
|
|
|
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
|
|
|
op[cur_state][new_state],
|
|
|
- HNS_ROCE_CMD_TIME_CLASS_C);
|
|
|
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
|
|
|
|
|
|
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
|
|
return ret;
|
|
@@ -2000,11 +2597,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- *Reset to init
|
|
|
- * Mandatory param:
|
|
|
- * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
|
|
|
- * Optional param: NA
|
|
|
- */
|
|
|
+ * Reset to init
|
|
|
+ * Mandatory param:
|
|
|
+ * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
|
|
|
+ * Optional param: NA
|
|
|
+ */
|
|
|
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
|
|
|
roce_set_field(context->qpc_bytes_4,
|
|
|
QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
|
|
@@ -2172,24 +2769,14 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
|
|
QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
|
|
|
hr_qp->sq_signal_bits);
|
|
|
|
|
|
- for (port = 0; port < hr_dev->caps.num_ports; port++) {
|
|
|
- smac = (u8 *)hr_dev->dev_addr[port];
|
|
|
- dev_dbg(dev, "smac: %2x: %2x: %2x: %2x: %2x: %2x\n",
|
|
|
- smac[0], smac[1], smac[2], smac[3], smac[4],
|
|
|
- smac[5]);
|
|
|
- if ((dmac[0] == smac[0]) && (dmac[1] == smac[1]) &&
|
|
|
- (dmac[2] == smac[2]) && (dmac[3] == smac[3]) &&
|
|
|
- (dmac[4] == smac[4]) && (dmac[5] == smac[5])) {
|
|
|
- roce_set_bit(context->qpc_bytes_32,
|
|
|
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S,
|
|
|
- 1);
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (hr_dev->loop_idc == 0x1)
|
|
|
+ port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
|
|
|
+ hr_qp->port;
|
|
|
+ smac = (u8 *)hr_dev->dev_addr[port];
|
|
|
+ /* when dmac equals smac or loop_idc is 1, it should loopback */
|
|
|
+ if (ether_addr_equal_unaligned(dmac, smac) ||
|
|
|
+ hr_dev->loop_idc == 0x1)
|
|
|
roce_set_bit(context->qpc_bytes_32,
|
|
|
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
|
|
|
+ QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
|
|
|
|
|
|
roce_set_bit(context->qpc_bytes_32,
|
|
|
QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
|
|
@@ -2509,7 +3096,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
|
|
/* Every status migrate must change state */
|
|
|
roce_set_field(context->qpc_bytes_144,
|
|
|
QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
|
|
|
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, attr->qp_state);
|
|
|
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
|
|
|
|
|
|
/* SW pass context to HW */
|
|
|
ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
|
|
@@ -2522,9 +3109,9 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Use rst2init to instead of init2init with drv,
|
|
|
- * need to hw to flash RQ HEAD by DB again
|
|
|
- */
|
|
|
+ * Use rst2init to instead of init2init with drv,
|
|
|
+ * need to hw to flash RQ HEAD by DB again
|
|
|
+ */
|
|
|
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
|
|
|
/* Memory barrier */
|
|
|
wmb();
|
|
@@ -2619,7 +3206,7 @@ static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
|
|
|
|
|
|
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
|
|
|
HNS_ROCE_CMD_QUERY_QP,
|
|
|
- HNS_ROCE_CMD_TIME_CLASS_A);
|
|
|
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
|
|
|
if (!ret)
|
|
|
memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
|
|
|
else
|
|
@@ -2630,8 +3217,78 @@ static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|
|
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
|
|
|
+static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|
|
+ int qp_attr_mask,
|
|
|
+ struct ib_qp_init_attr *qp_init_attr)
|
|
|
+{
|
|
|
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
|
|
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
|
|
+ struct hns_roce_sqp_context context;
|
|
|
+ u32 addr;
|
|
|
+
|
|
|
+ mutex_lock(&hr_qp->mutex);
|
|
|
+
|
|
|
+ if (hr_qp->state == IB_QPS_RESET) {
|
|
|
+ qp_attr->qp_state = IB_QPS_RESET;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+
|
|
|
+ addr = ROCEE_QP1C_CFG0_0_REG +
|
|
|
+ hr_qp->port * sizeof(struct hns_roce_sqp_context);
|
|
|
+ context.qp1c_bytes_4 = roce_read(hr_dev, addr);
|
|
|
+ context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
|
|
|
+ context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
|
|
|
+ context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
|
|
|
+ context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
|
|
|
+ context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
|
|
|
+ context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
|
|
|
+ context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
|
|
|
+ context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
|
|
|
+ context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
|
|
|
+
|
|
|
+ hr_qp->state = roce_get_field(context.qp1c_bytes_4,
|
|
|
+ QP1C_BYTES_4_QP_STATE_M,
|
|
|
+ QP1C_BYTES_4_QP_STATE_S);
|
|
|
+ qp_attr->qp_state = hr_qp->state;
|
|
|
+ qp_attr->path_mtu = IB_MTU_256;
|
|
|
+ qp_attr->path_mig_state = IB_MIG_ARMED;
|
|
|
+ qp_attr->qkey = QKEY_VAL;
|
|
|
+ qp_attr->rq_psn = 0;
|
|
|
+ qp_attr->sq_psn = 0;
|
|
|
+ qp_attr->dest_qp_num = 1;
|
|
|
+ qp_attr->qp_access_flags = 6;
|
|
|
+
|
|
|
+ qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
|
|
|
+ QP1C_BYTES_20_PKEY_IDX_M,
|
|
|
+ QP1C_BYTES_20_PKEY_IDX_S);
|
|
|
+ qp_attr->port_num = hr_qp->port + 1;
|
|
|
+ qp_attr->sq_draining = 0;
|
|
|
+ qp_attr->max_rd_atomic = 0;
|
|
|
+ qp_attr->max_dest_rd_atomic = 0;
|
|
|
+ qp_attr->min_rnr_timer = 0;
|
|
|
+ qp_attr->timeout = 0;
|
|
|
+ qp_attr->retry_cnt = 0;
|
|
|
+ qp_attr->rnr_retry = 0;
|
|
|
+ qp_attr->alt_timeout = 0;
|
|
|
+
|
|
|
+done:
|
|
|
+ qp_attr->cur_qp_state = qp_attr->qp_state;
|
|
|
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
|
|
|
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
|
|
|
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
|
|
|
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
|
|
|
+ qp_attr->cap.max_inline_data = 0;
|
|
|
+ qp_init_attr->cap = qp_attr->cap;
|
|
|
+ qp_init_attr->create_flags = 0;
|
|
|
+
|
|
|
+ mutex_unlock(&hr_qp->mutex);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|
|
+ int qp_attr_mask,
|
|
|
+ struct ib_qp_init_attr *qp_init_attr)
|
|
|
{
|
|
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
|
|
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
|
@@ -2725,9 +3382,7 @@ int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|
|
qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
|
|
|
QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
|
|
|
QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
|
|
|
- qp_attr->port_num = (u8)roce_get_field(context->qpc_bytes_156,
|
|
|
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
|
|
|
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) + 1;
|
|
|
+ qp_attr->port_num = hr_qp->port + 1;
|
|
|
qp_attr->sq_draining = 0;
|
|
|
qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
|
|
|
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
|
|
@@ -2767,134 +3422,397 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void hns_roce_v1_destroy_qp_common(struct hns_roce_dev *hr_dev,
|
|
|
- struct hns_roce_qp *hr_qp,
|
|
|
- int is_user)
|
|
|
+int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|
|
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
|
|
|
+{
|
|
|
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
|
|
+
|
|
|
+ return hr_qp->doorbell_qpn <= 1 ?
|
|
|
+ hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
|
|
|
+ hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
|
|
|
+}
|
|
|
+
|
|
|
+static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
|
|
|
+ struct hns_roce_qp *hr_qp,
|
|
|
+ u32 sdb_issue_ptr,
|
|
|
+ u32 *sdb_inv_cnt,
|
|
|
+ u32 *wait_stage)
|
|
|
{
|
|
|
- u32 sdbinvcnt;
|
|
|
- unsigned long end = 0;
|
|
|
- u32 sdbinvcnt_val;
|
|
|
- u32 sdbsendptr_val;
|
|
|
- u32 sdbisusepr_val;
|
|
|
- struct hns_roce_cq *send_cq, *recv_cq;
|
|
|
struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ u32 sdb_retry_cnt, old_retry;
|
|
|
+ u32 sdb_send_ptr, old_send;
|
|
|
+ u32 success_flags = 0;
|
|
|
+ u32 cur_cnt, old_cnt;
|
|
|
+ unsigned long end;
|
|
|
+ u32 send_ptr;
|
|
|
+ u32 inv_cnt;
|
|
|
+ u32 tsp_st;
|
|
|
+
|
|
|
+ if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
|
|
|
+ *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
|
|
|
+ dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
|
|
|
+ hr_qp->qpn, *wait_stage);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
- if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
|
|
|
- if (hr_qp->state != IB_QPS_RESET) {
|
|
|
- /*
|
|
|
- * Set qp to ERR,
|
|
|
- * waiting for hw complete processing all dbs
|
|
|
- */
|
|
|
- if (hns_roce_v1_qp_modify(hr_dev, NULL,
|
|
|
- to_hns_roce_state(
|
|
|
- (enum ib_qp_state)hr_qp->state),
|
|
|
- HNS_ROCE_QP_STATE_ERR, NULL,
|
|
|
- hr_qp))
|
|
|
- dev_err(dev, "modify QP %06lx to ERR failed.\n",
|
|
|
- hr_qp->qpn);
|
|
|
-
|
|
|
- /* Record issued doorbell */
|
|
|
- sdbisusepr_val = roce_read(hr_dev,
|
|
|
- ROCEE_SDB_ISSUE_PTR_REG);
|
|
|
- /*
|
|
|
- * Query db process status,
|
|
|
- * until hw process completely
|
|
|
- */
|
|
|
- end = msecs_to_jiffies(
|
|
|
- HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS) + jiffies;
|
|
|
- do {
|
|
|
- sdbsendptr_val = roce_read(hr_dev,
|
|
|
+ /* Calculate the total timeout for the entire verification process */
|
|
|
+ end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
|
|
|
+
|
|
|
+ if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
|
|
|
+ /* Query db process status, until hw process completely */
|
|
|
+ sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
|
|
|
+ while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
|
|
|
+ ROCEE_SDB_PTR_CMP_BITS)) {
|
|
|
+ if (!time_before(jiffies, end)) {
|
|
|
+ dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
|
|
|
+ hr_qp->qpn, sdb_issue_ptr,
|
|
|
+ sdb_send_ptr);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
|
|
|
+ sdb_send_ptr = roce_read(hr_dev,
|
|
|
ROCEE_SDB_SEND_PTR_REG);
|
|
|
- if (!time_before(jiffies, end)) {
|
|
|
- dev_err(dev, "destroy qp(0x%lx) timeout!!!",
|
|
|
- hr_qp->qpn);
|
|
|
- break;
|
|
|
- }
|
|
|
- } while ((short)(roce_get_field(sdbsendptr_val,
|
|
|
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
|
|
|
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) -
|
|
|
- roce_get_field(sdbisusepr_val,
|
|
|
- ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
|
|
|
- ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
|
|
|
- ) < 0);
|
|
|
+ }
|
|
|
|
|
|
- /* Get list pointer */
|
|
|
- sdbinvcnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
|
|
|
+ if (roce_get_field(sdb_issue_ptr,
|
|
|
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
|
|
|
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
|
|
|
+ roce_get_field(sdb_send_ptr,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
|
|
|
+ old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
|
|
|
+ old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
|
|
|
|
|
|
- /* Query db's list status, until hw reversal */
|
|
|
do {
|
|
|
- sdbinvcnt_val = roce_read(hr_dev,
|
|
|
- ROCEE_SDB_INV_CNT_REG);
|
|
|
+ tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
|
|
|
+ if (roce_get_bit(tsp_st,
|
|
|
+ ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
|
|
|
+ *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
if (!time_before(jiffies, end)) {
|
|
|
- dev_err(dev, "destroy qp(0x%lx) timeout!!!",
|
|
|
- hr_qp->qpn);
|
|
|
- dev_err(dev, "SdbInvCnt = 0x%x\n",
|
|
|
- sdbinvcnt_val);
|
|
|
- break;
|
|
|
+ dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
|
|
|
+ "issue 0x%x send 0x%x.\n",
|
|
|
+ hr_qp->qpn, sdb_issue_ptr,
|
|
|
+ sdb_send_ptr);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
|
|
|
+
|
|
|
+ sdb_send_ptr = roce_read(hr_dev,
|
|
|
+ ROCEE_SDB_SEND_PTR_REG);
|
|
|
+ sdb_retry_cnt = roce_read(hr_dev,
|
|
|
+ ROCEE_SDB_RETRY_CNT_REG);
|
|
|
+ cur_cnt = roce_get_field(sdb_send_ptr,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
|
|
|
+ roce_get_field(sdb_retry_cnt,
|
|
|
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
|
|
|
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
|
|
|
+ if (!roce_get_bit(tsp_st,
|
|
|
+ ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
|
|
|
+ old_cnt = roce_get_field(old_send,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
|
|
|
+ roce_get_field(old_retry,
|
|
|
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
|
|
|
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
|
|
|
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
|
|
|
+ success_flags = 1;
|
|
|
+ } else {
|
|
|
+ old_cnt = roce_get_field(old_send,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
|
|
|
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
|
|
|
+ success_flags = 1;
|
|
|
+ else {
|
|
|
+ send_ptr = roce_get_field(old_send,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
|
|
|
+ roce_get_field(sdb_retry_cnt,
|
|
|
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
|
|
|
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
|
|
|
+ roce_set_field(old_send,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
|
|
|
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
|
|
|
+ send_ptr);
|
|
|
+ }
|
|
|
}
|
|
|
- } while ((short)(roce_get_field(sdbinvcnt_val,
|
|
|
- ROCEE_SDB_INV_CNT_SDB_INV_CNT_M,
|
|
|
- ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) -
|
|
|
- (sdbinvcnt + SDB_INV_CNT_OFFSET)) < 0);
|
|
|
-
|
|
|
- /* Modify qp to reset before destroying qp */
|
|
|
- if (hns_roce_v1_qp_modify(hr_dev, NULL,
|
|
|
- to_hns_roce_state(
|
|
|
- (enum ib_qp_state)hr_qp->state),
|
|
|
- HNS_ROCE_QP_STATE_RST, NULL, hr_qp))
|
|
|
- dev_err(dev, "modify QP %06lx to RESET failed.\n",
|
|
|
- hr_qp->qpn);
|
|
|
+ } while (!success_flags);
|
|
|
}
|
|
|
+
|
|
|
+ *wait_stage = HNS_ROCE_V1_DB_STAGE2;
|
|
|
+
|
|
|
+ /* Get list pointer */
|
|
|
+ *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
|
|
|
+ dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
|
|
|
+ hr_qp->qpn, *sdb_inv_cnt);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
|
|
|
+ /* Query db's list status, until hw reversal */
|
|
|
+ inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
|
|
|
+ while (roce_hw_index_cmp_lt(inv_cnt,
|
|
|
+ *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
|
|
|
+ ROCEE_SDB_CNT_CMP_BITS)) {
|
|
|
+ if (!time_before(jiffies, end)) {
|
|
|
+ dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
|
|
|
+ hr_qp->qpn, inv_cnt);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
|
|
|
+ inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
|
|
|
+ }
|
|
|
+
|
|
|
+ *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
|
|
|
+ struct hns_roce_qp *hr_qp,
|
|
|
+ struct hns_roce_qp_work *qp_work_entry,
|
|
|
+ int *is_timeout)
|
|
|
+{
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ u32 sdb_issue_ptr;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (hr_qp->state != IB_QPS_RESET) {
|
|
|
+ /* Set qp to ERR, waiting for hw complete processing all dbs */
|
|
|
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
|
|
|
+ IB_QPS_ERR);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
|
|
|
+ hr_qp->qpn);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Record issued doorbell */
|
|
|
+ sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
|
|
|
+ qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
|
|
|
+ qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
|
|
|
+
|
|
|
+ /* Query db process status, until hw process completely */
|
|
|
+ ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
|
|
|
+ &qp_work_entry->sdb_inv_cnt,
|
|
|
+ &qp_work_entry->db_wait_stage);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
|
|
|
+ hr_qp->qpn);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
|
|
|
+ qp_work_entry->sche_cnt = 0;
|
|
|
+ *is_timeout = 1;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Modify qp to reset before destroying qp */
|
|
|
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
|
|
|
+ IB_QPS_RESET);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
|
|
|
+ hr_qp->qpn);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct hns_roce_qp_work *qp_work_entry;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct hns_roce_dev *hr_dev;
|
|
|
+ struct hns_roce_qp *hr_qp;
|
|
|
+ struct device *dev;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
|
|
|
+ hr_dev = to_hr_dev(qp_work_entry->ib_dev);
|
|
|
+ dev = &hr_dev->pdev->dev;
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ hr_qp = qp_work_entry->qp;
|
|
|
+
|
|
|
+ dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn);
|
|
|
+
|
|
|
+ qp_work_entry->sche_cnt++;
|
|
|
+
|
|
|
+ /* Query db process status, until hw process completely */
|
|
|
+ ret = check_qp_db_process_status(hr_dev, hr_qp,
|
|
|
+ qp_work_entry->sdb_issue_ptr,
|
|
|
+ &qp_work_entry->sdb_inv_cnt,
|
|
|
+ &qp_work_entry->db_wait_stage);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
|
|
|
+ hr_qp->qpn);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
|
|
|
+ priv->des_qp.requeue_flag) {
|
|
|
+ queue_work(priv->des_qp.qp_wq, work);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Modify qp to reset before destroying qp */
|
|
|
+ ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
|
|
|
+ IB_QPS_RESET);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ hns_roce_qp_remove(hr_dev, hr_qp);
|
|
|
+ hns_roce_qp_free(hr_dev, hr_qp);
|
|
|
+
|
|
|
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
|
|
|
+ /* RC QP, release QPN */
|
|
|
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
|
|
|
+ kfree(hr_qp);
|
|
|
+ } else
|
|
|
+ kfree(hr_to_hr_sqp(hr_qp));
|
|
|
+
|
|
|
+ kfree(qp_work_entry);
|
|
|
+
|
|
|
+ dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn);
|
|
|
+}
|
|
|
+
|
|
|
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
|
|
|
+{
|
|
|
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
|
|
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ struct hns_roce_qp_work qp_work_entry;
|
|
|
+ struct hns_roce_qp_work *qp_work;
|
|
|
+ struct hns_roce_v1_priv *priv;
|
|
|
+ struct hns_roce_cq *send_cq, *recv_cq;
|
|
|
+ int is_user = !!ibqp->pd->uobject;
|
|
|
+ int is_timeout = 0;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "QP reset state check failed(%d)!\n", ret);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
|
|
|
recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
|
|
|
|
|
|
hns_roce_lock_cqs(send_cq, recv_cq);
|
|
|
-
|
|
|
if (!is_user) {
|
|
|
__hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
|
|
|
to_hr_srq(hr_qp->ibqp.srq) : NULL);
|
|
|
if (send_cq != recv_cq)
|
|
|
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
|
|
|
}
|
|
|
-
|
|
|
- hns_roce_qp_remove(hr_dev, hr_qp);
|
|
|
-
|
|
|
hns_roce_unlock_cqs(send_cq, recv_cq);
|
|
|
|
|
|
- hns_roce_qp_free(hr_dev, hr_qp);
|
|
|
+ if (!is_timeout) {
|
|
|
+ hns_roce_qp_remove(hr_dev, hr_qp);
|
|
|
+ hns_roce_qp_free(hr_dev, hr_qp);
|
|
|
|
|
|
- /* Not special_QP, free their QPN */
|
|
|
- if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
|
|
|
- (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
|
|
|
- (hr_qp->ibqp.qp_type == IB_QPT_UD))
|
|
|
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
|
|
|
+ /* RC QP, release QPN */
|
|
|
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC)
|
|
|
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
|
|
|
+ }
|
|
|
|
|
|
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
|
|
|
|
|
|
- if (is_user) {
|
|
|
+ if (is_user)
|
|
|
ib_umem_release(hr_qp->umem);
|
|
|
- } else {
|
|
|
+ else {
|
|
|
kfree(hr_qp->sq.wrid);
|
|
|
kfree(hr_qp->rq.wrid);
|
|
|
+
|
|
|
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
|
|
|
}
|
|
|
+
|
|
|
+ if (!is_timeout) {
|
|
|
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC)
|
|
|
+ kfree(hr_qp);
|
|
|
+ else
|
|
|
+ kfree(hr_to_hr_sqp(hr_qp));
|
|
|
+ } else {
|
|
|
+ qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
|
|
|
+ if (!qp_work)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
|
|
|
+ qp_work->ib_dev = &hr_dev->ib_dev;
|
|
|
+ qp_work->qp = hr_qp;
|
|
|
+ qp_work->db_wait_stage = qp_work_entry.db_wait_stage;
|
|
|
+ qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr;
|
|
|
+ qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
|
|
|
+ qp_work->sche_cnt = qp_work_entry.sche_cnt;
|
|
|
+
|
|
|
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
|
|
|
+ queue_work(priv->des_qp.qp_wq, &qp_work->work);
|
|
|
+ dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
|
|
|
+int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
|
|
|
{
|
|
|
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
|
|
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
|
|
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
|
|
|
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
|
|
|
+ struct device *dev = &hr_dev->pdev->dev;
|
|
|
+ u32 cqe_cnt_ori;
|
|
|
+ u32 cqe_cnt_cur;
|
|
|
+ u32 cq_buf_size;
|
|
|
+ int wait_time = 0;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
- hns_roce_v1_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
|
|
|
+ hns_roce_free_cq(hr_dev, hr_cq);
|
|
|
|
|
|
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
|
|
|
- kfree(hr_to_hr_sqp(hr_qp));
|
|
|
- else
|
|
|
- kfree(hr_qp);
|
|
|
+ /*
|
|
|
+ * Before freeing cq buffer, we need to ensure that the outstanding CQE
|
|
|
+ * have been written by checking the CQE counter.
|
|
|
+ */
|
|
|
+ cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
|
|
|
+ while (1) {
|
|
|
+ if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
|
|
|
+ HNS_ROCE_CQE_WCMD_EMPTY_BIT)
|
|
|
+ break;
|
|
|
|
|
|
- return 0;
|
|
|
+ cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
|
|
|
+ if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
|
|
|
+ break;
|
|
|
+
|
|
|
+ msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
|
|
|
+ if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
|
|
|
+ dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
|
|
|
+ hr_cq->cqn);
|
|
|
+ ret = -ETIMEDOUT;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ wait_time++;
|
|
|
+ }
|
|
|
+
|
|
|
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
|
|
|
+
|
|
|
+ if (ibcq->uobject)
|
|
|
+ ib_umem_release(hr_cq->umem);
|
|
|
+ else {
|
|
|
+ /* Free the buff of stored cq */
|
|
|
+ cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
|
|
|
+ hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
|
|
|
+ }
|
|
|
+
|
|
|
+ kfree(hr_cq);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
struct hns_roce_v1_priv hr_v1_priv;
|
|
@@ -2917,5 +3835,7 @@ struct hns_roce_hw hns_roce_hw_v1 = {
|
|
|
.post_recv = hns_roce_v1_post_recv,
|
|
|
.req_notify_cq = hns_roce_v1_req_notify_cq,
|
|
|
.poll_cq = hns_roce_v1_poll_cq,
|
|
|
+ .dereg_mr = hns_roce_v1_dereg_mr,
|
|
|
+ .destroy_cq = hns_roce_v1_destroy_cq,
|
|
|
.priv = &hr_v1_priv,
|
|
|
};
|