浏览代码

Merge branch 'for-next-early' into for-next

The early for-next branch was based on v4.14-rc2, while the shared pull
request I got from Mellanox used a v4.14-rc4 base.  I'm making the
branch that was the shared Mellanox pull request the new for-next branch
and merging the early for-next branch into it.

Signed-off-by: Doug Ledford <dledford@redhat.com>
Doug Ledford 7 年之前
父节点
当前提交
754137a769
共有 100 个文件被更改,包括 7967 次插入1849 次删除
  1. 1 0
      MAINTAINERS
  2. 1 1
      drivers/infiniband/Kconfig
  3. 13 14
      drivers/infiniband/core/cm.c
  4. 7 4
      drivers/infiniband/core/cma.c
  5. 0 3
      drivers/infiniband/core/iwcm.c
  6. 10 14
      drivers/infiniband/core/rw.c
  7. 10 1
      drivers/infiniband/core/user_mad.c
  8. 21 14
      drivers/infiniband/core/uverbs.h
  9. 53 72
      drivers/infiniband/core/uverbs_cmd.c
  10. 3 10
      drivers/infiniband/core/uverbs_ioctl.c
  11. 1 1
      drivers/infiniband/core/uverbs_ioctl_merge.c
  12. 10 12
      drivers/infiniband/core/uverbs_main.c
  13. 5 5
      drivers/infiniband/core/uverbs_marshall.c
  14. 2 1
      drivers/infiniband/core/uverbs_std_types.c
  15. 5 7
      drivers/infiniband/hw/bnxt_re/ib_verbs.c
  16. 3 2
      drivers/infiniband/hw/bnxt_re/main.c
  17. 1 6
      drivers/infiniband/hw/bnxt_re/qplib_fp.c
  18. 4 8
      drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
  19. 3 3
      drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
  20. 1 1
      drivers/infiniband/hw/bnxt_re/qplib_res.h
  21. 2 3
      drivers/infiniband/hw/bnxt_re/qplib_sp.c
  22. 1 1
      drivers/infiniband/hw/bnxt_re/roce_hsi.h
  23. 1 1
      drivers/infiniband/hw/cxgb3/Kconfig
  24. 2 4
      drivers/infiniband/hw/cxgb3/cxio_hal.c
  25. 3 3
      drivers/infiniband/hw/cxgb3/iwch_cm.c
  26. 3 0
      drivers/infiniband/hw/cxgb3/iwch_qp.c
  27. 1 1
      drivers/infiniband/hw/cxgb4/Kconfig
  28. 144 148
      drivers/infiniband/hw/cxgb4/cm.c
  29. 59 58
      drivers/infiniband/hw/cxgb4/cq.c
  30. 31 10
      drivers/infiniband/hw/cxgb4/device.c
  31. 1 1
      drivers/infiniband/hw/cxgb4/ev.c
  32. 69 14
      drivers/infiniband/hw/cxgb4/iw_cxgb4.h
  33. 160 108
      drivers/infiniband/hw/cxgb4/mem.c
  34. 18 18
      drivers/infiniband/hw/cxgb4/provider.c
  35. 68 73
      drivers/infiniband/hw/cxgb4/qp.c
  36. 23 23
      drivers/infiniband/hw/cxgb4/resource.c
  37. 8 12
      drivers/infiniband/hw/cxgb4/t4.h
  38. 116 108
      drivers/infiniband/hw/hfi1/chip.c
  39. 4 0
      drivers/infiniband/hw/hfi1/chip.h
  40. 1 0
      drivers/infiniband/hw/hfi1/common.h
  41. 2 2
      drivers/infiniband/hw/hfi1/debugfs.c
  42. 8 2
      drivers/infiniband/hw/hfi1/driver.c
  43. 280 206
      drivers/infiniband/hw/hfi1/file_ops.c
  44. 92 21
      drivers/infiniband/hw/hfi1/firmware.c
  45. 14 12
      drivers/infiniband/hw/hfi1/hfi.h
  46. 21 22
      drivers/infiniband/hw/hfi1/init.c
  47. 6 4
      drivers/infiniband/hw/hfi1/mad.c
  48. 1 2
      drivers/infiniband/hw/hfi1/rc.c
  49. 0 9
      drivers/infiniband/hw/hfi1/ruc.c
  50. 9 10
      drivers/infiniband/hw/hfi1/sdma.c
  51. 1 1
      drivers/infiniband/hw/hfi1/sysfs.c
  52. 12 15
      drivers/infiniband/hw/hfi1/trace.c
  53. 10 0
      drivers/infiniband/hw/hfi1/trace.h
  54. 26 23
      drivers/infiniband/hw/hfi1/trace_ibhdrs.h
  55. 1 10
      drivers/infiniband/hw/hfi1/trace_rx.h
  56. 0 1
      drivers/infiniband/hw/hfi1/uc.c
  57. 0 2
      drivers/infiniband/hw/hfi1/ud.c
  58. 2 7
      drivers/infiniband/hw/hfi1/user_exp_rcv.c
  59. 40 22
      drivers/infiniband/hw/hfi1/user_sdma.c
  60. 20 9
      drivers/infiniband/hw/hfi1/user_sdma.h
  61. 8 24
      drivers/infiniband/hw/hfi1/verbs.c
  62. 2 0
      drivers/infiniband/hw/hfi1/verbs_txreq.h
  63. 5 2
      drivers/infiniband/hw/hfi1/vnic_main.c
  64. 23 2
      drivers/infiniband/hw/hns/Kconfig
  65. 7 1
      drivers/infiniband/hw/hns/Makefile
  66. 1 1
      drivers/infiniband/hw/hns/hns_roce_ah.c
  67. 5 3
      drivers/infiniband/hw/hns/hns_roce_alloc.c
  68. 12 95
      drivers/infiniband/hw/hns/hns_roce_cmd.c
  69. 50 0
      drivers/infiniband/hw/hns/hns_roce_cmd.h
  70. 23 0
      drivers/infiniband/hw/hns/hns_roce_common.h
  71. 45 27
      drivers/infiniband/hw/hns/hns_roce_cq.c
  72. 89 12
      drivers/infiniband/hw/hns/hns_roce_device.h
  73. 3 3
      drivers/infiniband/hw/hns/hns_roce_eq.c
  74. 672 27
      drivers/infiniband/hw/hns/hns_roce_hem.c
  75. 30 2
      drivers/infiniband/hw/hns/hns_roce_hem.h
  76. 481 118
      drivers/infiniband/hw/hns/hns_roce_hw_v1.c
  77. 5 0
      drivers/infiniband/hw/hns/hns_roce_hw_v1.h
  78. 3133 0
      drivers/infiniband/hw/hns/hns_roce_hw_v2.c
  79. 1165 0
      drivers/infiniband/hw/hns/hns_roce_hw_v2.h
  80. 92 250
      drivers/infiniband/hw/hns/hns_roce_main.c
  81. 465 47
      drivers/infiniband/hw/hns/hns_roce_mr.c
  82. 14 6
      drivers/infiniband/hw/hns/hns_roce_pd.c
  83. 137 42
      drivers/infiniband/hw/hns/hns_roce_qp.c
  84. 1 0
      drivers/infiniband/hw/i40iw/Kconfig
  85. 13 7
      drivers/infiniband/hw/i40iw/i40iw_cm.c
  86. 1 0
      drivers/infiniband/hw/i40iw/i40iw_cm.h
  87. 1 0
      drivers/infiniband/hw/i40iw/i40iw_hw.c
  88. 1 0
      drivers/infiniband/hw/i40iw/i40iw_puda.c
  89. 13 0
      drivers/infiniband/hw/i40iw/i40iw_uk.c
  90. 1 0
      drivers/infiniband/hw/i40iw/i40iw_user.h
  91. 9 4
      drivers/infiniband/hw/i40iw/i40iw_utils.c
  92. 13 0
      drivers/infiniband/hw/i40iw/i40iw_verbs.c
  93. 2 0
      drivers/infiniband/hw/mlx4/cq.c
  94. 1 0
      drivers/infiniband/hw/mlx4/mcg.c
  95. 2 0
      drivers/infiniband/hw/mlx5/cq.c
  96. 2 2
      drivers/infiniband/hw/mlx5/mr.c
  97. 1 2
      drivers/infiniband/hw/mlx5/qp.c
  98. 5 5
      drivers/infiniband/hw/mthca/mthca_main.c
  99. 19 14
      drivers/infiniband/hw/nes/nes.c
  100. 1 8
      drivers/infiniband/hw/nes/nes_cm.c

+ 1 - 0
MAINTAINERS

@@ -11081,6 +11081,7 @@ F:	drivers/net/ethernet/qlogic/qede/
 
 QLOGIC QL4xxx RDMA DRIVER
 M:	Ram Amrani <Ram.Amrani@cavium.com>
+M:	Michal Kalderon <Michal.Kalderon@cavium.com>
 M:	Ariel Elior <Ariel.Elior@cavium.com>
 L:	linux-rdma@vger.kernel.org
 S:	Supported

+ 1 - 1
drivers/infiniband/Kconfig

@@ -1,6 +1,5 @@
 menuconfig INFINIBAND
 	tristate "InfiniBand support"
-	depends on PCI || BROKEN
 	depends on HAS_IOMEM
 	depends on NET
 	depends on INET
@@ -46,6 +45,7 @@ config INFINIBAND_EXP_USER_ACCESS
 config INFINIBAND_USER_MEM
 	bool
 	depends on INFINIBAND_USER_ACCESS != n
+	depends on MMU
 	default y
 
 config INFINIBAND_ON_DEMAND_PAGING

+ 13 - 14
drivers/infiniband/core/cm.c

@@ -1472,31 +1472,29 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
 
 	if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
 		sa_path_set_dlid(primary_path,
-				 htonl(ntohs(req_msg->primary_local_lid)));
+				 ntohs(req_msg->primary_local_lid));
 		sa_path_set_slid(primary_path,
-				 htonl(ntohs(req_msg->primary_remote_lid)));
+				 ntohs(req_msg->primary_remote_lid));
 	} else {
 		lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
-		sa_path_set_dlid(primary_path, cpu_to_be32(lid));
+		sa_path_set_dlid(primary_path, lid);
 
 		lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
-		sa_path_set_slid(primary_path, cpu_to_be32(lid));
+		sa_path_set_slid(primary_path, lid);
 	}
 
 	if (!cm_req_has_alt_path(req_msg))
 		return;
 
 	if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
-		sa_path_set_dlid(alt_path,
-				 htonl(ntohs(req_msg->alt_local_lid)));
-		sa_path_set_slid(alt_path,
-				 htonl(ntohs(req_msg->alt_remote_lid)));
+		sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
+		sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
 	} else {
 		lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
-		sa_path_set_dlid(alt_path, cpu_to_be32(lid));
+		sa_path_set_dlid(alt_path, lid);
 
 		lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
-		sa_path_set_slid(alt_path, cpu_to_be32(lid));
+		sa_path_set_slid(alt_path, lid);
 	}
 }
 
@@ -2810,6 +2808,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
 			msg_response = CM_MSG_RESPONSE_OTHER;
 			break;
 		}
+		/* fall through */
 	default:
 		ret = -EINVAL;
 		goto error1;
@@ -3037,14 +3036,14 @@ static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
 	u32 lid;
 
 	if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
-		sa_path_set_dlid(path, htonl(ntohs(lap_msg->alt_local_lid)));
-		sa_path_set_slid(path, htonl(ntohs(lap_msg->alt_remote_lid)));
+		sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
+		sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
 	} else {
 		lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
-		sa_path_set_dlid(path, cpu_to_be32(lid));
+		sa_path_set_dlid(path, lid);
 
 		lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
-		sa_path_set_slid(path, cpu_to_be32(lid));
+		sa_path_set_slid(path, lid);
 	}
 }
 

+ 7 - 4
drivers/infiniband/core/cma.c

@@ -1540,7 +1540,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
 	return id_priv;
 }
 
-static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
+static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
 {
 	return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
 }
@@ -1942,7 +1942,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
 	struct rdma_id_private *listen_id, *conn_id = NULL;
 	struct rdma_cm_event event;
 	struct net_device *net_dev;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
 	if (IS_ERR(listen_id))
@@ -3440,7 +3441,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
 	struct ib_cm_sidr_req_param req;
 	struct ib_cm_id	*id;
 	void *private_data;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	memset(&req, 0, sizeof req);
 	offset = cma_user_data_offset(id_priv);
@@ -3497,7 +3499,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
 	struct rdma_route *route;
 	void *private_data;
 	struct ib_cm_id	*id;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	memset(&req, 0, sizeof req);
 	offset = cma_user_data_offset(id_priv);

+ 0 - 3
drivers/infiniband/core/iwcm.c

@@ -447,9 +447,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
  */
 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
 {
-	struct iwcm_id_private *cm_id_priv;
-
-	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 	destroy_cm_id(cm_id);
 }
 EXPORT_SYMBOL(iw_destroy_cm_id);

+ 10 - 14
drivers/infiniband/core/rw.c

@@ -384,21 +384,17 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
 	count += ret;
 	prev_wr = &ctx->sig->data.reg_wr.wr;
 
-	if (prot_sg_cnt) {
-		ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
-				prot_sg, prot_sg_cnt, 0);
-		if (ret < 0)
-			goto out_destroy_data_mr;
-		count += ret;
+	ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
+				  prot_sg, prot_sg_cnt, 0);
+	if (ret < 0)
+		goto out_destroy_data_mr;
+	count += ret;
 
-		if (ctx->sig->prot.inv_wr.next)
-			prev_wr->next = &ctx->sig->prot.inv_wr;
-		else
-			prev_wr->next = &ctx->sig->prot.reg_wr.wr;
-		prev_wr = &ctx->sig->prot.reg_wr.wr;
-	} else {
-		ctx->sig->prot.mr = NULL;
-	}
+	if (ctx->sig->prot.inv_wr.next)
+		prev_wr->next = &ctx->sig->prot.inv_wr;
+	else
+		prev_wr->next = &ctx->sig->prot.reg_wr.wr;
+	prev_wr = &ctx->sig->prot.reg_wr.wr;
 
 	ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs);
 	if (!ctx->sig->sig_mr) {

+ 10 - 1
drivers/infiniband/core/user_mad.c

@@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent,
 	packet->mad.hdr.status	   = 0;
 	packet->mad.hdr.length	   = hdr_size(file) + mad_recv_wc->mad_len;
 	packet->mad.hdr.qpn	   = cpu_to_be32(mad_recv_wc->wc->src_qp);
-	packet->mad.hdr.lid	   = ib_lid_be16(mad_recv_wc->wc->slid);
+	/*
+	 * On OPA devices it is okay to lose the upper 16 bits of LID as this
+	 * information is obtained elsewhere. Mask off the upper 16 bits.
+	 */
+	if (agent->device->port_immutable[agent->port_num].core_cap_flags &
+	    RDMA_CORE_PORT_INTEL_OPA)
+		packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
+						  mad_recv_wc->wc->slid);
+	else
+		packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
 	packet->mad.hdr.sl	   = mad_recv_wc->wc->sl;
 	packet->mad.hdr.path_bits  = mad_recv_wc->wc->dlid_path_bits;
 	packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;

+ 21 - 14
drivers/infiniband/core/uverbs.h

@@ -47,21 +47,28 @@
 #include <rdma/ib_umem.h>
 #include <rdma/ib_user_verbs.h>
 
-#define INIT_UDATA(udata, ibuf, obuf, ilen, olen)			\
-	do {								\
-		(udata)->inbuf  = (const void __user *) (ibuf);		\
-		(udata)->outbuf = (void __user *) (obuf);		\
-		(udata)->inlen  = (ilen);				\
-		(udata)->outlen = (olen);				\
-	} while (0)
+static inline void
+ib_uverbs_init_udata(struct ib_udata *udata,
+		     const void __user *ibuf,
+		     void __user *obuf,
+		     size_t ilen, size_t olen)
+{
+	udata->inbuf  = ibuf;
+	udata->outbuf = obuf;
+	udata->inlen  = ilen;
+	udata->outlen = olen;
+}
 
-#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen)			\
-	do {									\
-		(udata)->inbuf  = (ilen) ? (const void __user *) (ibuf) : NULL;	\
-		(udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL;	\
-		(udata)->inlen  = (ilen);					\
-		(udata)->outlen = (olen);					\
-	} while (0)
+static inline void
+ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
+				 const void __user *ibuf,
+				 void __user *obuf,
+				 size_t ilen, size_t olen)
+{
+	ib_uverbs_init_udata(udata,
+			     ilen ? ibuf : NULL, olen ? obuf : NULL,
+			     ilen, olen);
+}
 
 /*
  * Our lifetime rules for these structs are the following:

+ 53 - 72
drivers/infiniband/core/uverbs_cmd.c

@@ -91,8 +91,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
 		goto err;
 	}
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - sizeof(resp));
 
@@ -141,8 +141,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
 		goto err_fd;
 	}
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp)) {
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
 		ret = -EFAULT;
 		goto err_file;
 	}
@@ -238,8 +237,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
 	memset(&resp, 0, sizeof resp);
 	copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		return -EFAULT;
 
 	return in_len;
@@ -295,8 +293,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
 	resp.link_layer      = rdma_port_get_link_layer(ib_dev,
 							cmd.port_num);
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		return -EFAULT;
 
 	return in_len;
@@ -320,8 +317,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
                    in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
                    out_len - sizeof(resp));
 
@@ -344,8 +341,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
 	memset(&resp, 0, sizeof resp);
 	resp.pd_handle = uobj->id;
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp)) {
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
 		ret = -EFAULT;
 		goto err_copy;
 	}
@@ -490,8 +486,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
                    in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
                    out_len - sizeof(resp));
 
@@ -556,8 +552,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
 		atomic_inc(&xrcd->usecnt);
 	}
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp)) {
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
 		ret = -EFAULT;
 		goto err_copy;
 	}
@@ -655,8 +650,8 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
                    in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
                    out_len - sizeof(resp));
 
@@ -705,8 +700,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
 	resp.rkey      = mr->rkey;
 	resp.mr_handle = uobj->id;
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp)) {
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
 		ret = -EFAULT;
 		goto err_copy;
 	}
@@ -748,8 +742,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof(cmd)))
 		return -EFAULT;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
                    in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
                    out_len - sizeof(resp));
 
@@ -800,8 +794,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
 	resp.lkey      = mr->lkey;
 	resp.rkey      = mr->rkey;
 
-	if (copy_to_user((void __user *)(unsigned long)cmd.response,
-			 &resp, sizeof(resp)))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
 		ret = -EFAULT;
 	else
 		ret = in_len;
@@ -867,8 +860,8 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
 		goto err_free;
 	}
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long)cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - sizeof(resp));
 
@@ -889,8 +882,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
 	resp.rkey      = mw->rkey;
 	resp.mw_handle = uobj->id;
 
-	if (copy_to_user((void __user *)(unsigned long)cmd.response,
-			 &resp, sizeof(resp))) {
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
 		ret = -EFAULT;
 		goto err_copy;
 	}
@@ -956,8 +948,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
 			       uobj_file.uobj);
 	ib_uverbs_init_event_queue(&ev_file->ev_queue);
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp)) {
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
 		uobj_alloc_abort(uobj);
 		return -EFAULT;
 	}
@@ -1087,10 +1078,11 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof(cmd)))
 		return -EFAULT;
 
-	INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
+	ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
+			     sizeof(cmd), sizeof(resp));
 
-	INIT_UDATA(&uhw, buf + sizeof(cmd),
-		   (unsigned long)cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - sizeof(resp));
 
@@ -1173,8 +1165,8 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - sizeof(resp));
 
@@ -1188,8 +1180,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
 
 	resp.cqe = cq->cqe;
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp.cqe))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
 		ret = -EFAULT;
 
 out:
@@ -1249,7 +1240,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
 		return -EINVAL;
 
 	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
-	header_ptr = (void __user *)(unsigned long) cmd.response;
+	header_ptr = u64_to_user_ptr(cmd.response);
 	data_ptr = header_ptr + sizeof resp;
 
 	memset(&resp, 0, sizeof resp);
@@ -1343,8 +1334,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
 	resp.async_events_reported = obj->async_events_reported;
 
 	uverbs_uobject_put(uobj);
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		return -EFAULT;
 
 	return in_len;
@@ -1650,10 +1640,10 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof(cmd)))
 		return -EFAULT;
 
-	INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
-		   resp_size);
-	INIT_UDATA(&uhw, buf + sizeof(cmd),
-		   (unsigned long)cmd.response + resp_size,
+	ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
+		   sizeof(cmd), resp_size);
+	ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + resp_size,
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - resp_size);
 
@@ -1750,8 +1740,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - sizeof(resp));
 
@@ -1795,8 +1785,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
 	resp.qpn       = qp->qp_num;
 	resp.qp_handle = obj->uevent.uobject.id;
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp)) {
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
 		ret = -EFAULT;
 		goto err_destroy;
 	}
@@ -1911,8 +1900,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
 	resp.max_inline_data        = init_attr->cap.max_inline_data;
 	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		ret = -EFAULT;
 
 out:
@@ -2042,7 +2030,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
 	    ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
 		return -EOPNOTSUPP;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
 		   in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len);
 
@@ -2126,8 +2114,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
 	resp.events_reported = obj->uevent.events_reported;
 	uverbs_uobject_put(uobj);
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		return -EFAULT;
 
 	return in_len;
@@ -2311,8 +2298,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
 				break;
 		}
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		ret = -EFAULT;
 
 out_put:
@@ -2460,8 +2446,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
 		}
 	}
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		ret = -EFAULT;
 
 out:
@@ -2510,8 +2495,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
 				break;
 		}
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		ret = -EFAULT;
 
 out:
@@ -2547,8 +2531,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
 	if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
 		return -EINVAL;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long)cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - sizeof(resp));
 
@@ -2592,8 +2576,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
 
 	resp.ah_handle = uobj->id;
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp)) {
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
 		ret = -EFAULT;
 		goto err_copy;
 	}
@@ -3619,8 +3602,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
 	xcmd.max_sge	 = cmd.max_sge;
 	xcmd.srq_limit	 = cmd.srq_limit;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - sizeof(resp));
 
@@ -3646,8 +3629,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	INIT_UDATA(&udata, buf + sizeof(cmd),
-		   (unsigned long) cmd.response + sizeof(resp),
+	ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+		   u64_to_user_ptr(cmd.response) + sizeof(resp),
 		   in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
 		   out_len - sizeof(resp));
 
@@ -3672,7 +3655,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
-	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
+	ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
 		   out_len);
 
 	srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
@@ -3723,8 +3706,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
 	resp.max_sge   = attr.max_sge;
 	resp.srq_limit = attr.srq_limit;
 
-	if (copy_to_user((void __user *) (unsigned long) cmd.response,
-			 &resp, sizeof resp))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
 		return -EFAULT;
 
 	return in_len;
@@ -3765,8 +3747,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 	}
 	resp.events_reported = obj->events_reported;
 	uverbs_uobject_put(uobj);
-	if (copy_to_user((void __user *)(unsigned long)cmd.response,
-			 &resp, sizeof(resp)))
+	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
 		return -EFAULT;
 
 	return in_len;

+ 3 - 10
drivers/infiniband/core/uverbs_ioctl.c

@@ -241,9 +241,7 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
 	struct uverbs_attr *curr_attr;
 	unsigned long *curr_bitmap;
 	size_t ctx_size;
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
 	uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
-#endif
 
 	if (hdr->reserved)
 		return -EINVAL;
@@ -269,13 +267,10 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
 			(method_spec->num_child_attrs / BITS_PER_LONG +
 			 method_spec->num_buckets);
 
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
 	if (ctx_size <= UVERBS_OPTIMIZE_USING_STACK_SZ)
 		ctx = (void *)data;
-
 	if (!ctx)
-#endif
-	ctx = kmalloc(ctx_size, GFP_KERNEL);
+		ctx = kmalloc(ctx_size, GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
@@ -311,10 +306,8 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
 	err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
 				   file, method_spec, ctx->uverbs_attr_bundle);
 out:
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
-	if (ctx_size > UVERBS_OPTIMIZE_USING_STACK_SZ)
-#endif
-	kfree(ctx);
+	if (ctx != (void *)data)
+		kfree(ctx);
 	return err;
 }
 

+ 1 - 1
drivers/infiniband/core/uverbs_ioctl_merge.c

@@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
 				 min_id) ||
 			    WARN(attr_obj_with_special_access &&
 				 !(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
-				 "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy aceess but isn't mandatory\n",
+				 "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
 				 min_id) ||
 			    WARN(IS_ATTR_OBJECT(attr) &&
 				 attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ,

+ 10 - 12
drivers/infiniband/core/uverbs_main.c

@@ -763,7 +763,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
 			}
 
 			if (!access_ok(VERIFY_WRITE,
-				       (void __user *) (unsigned long) ex_hdr.response,
+				       u64_to_user_ptr(ex_hdr.response),
 				       (hdr.out_words + ex_hdr.provider_out_words) * 8)) {
 				ret = -EFAULT;
 				goto out;
@@ -775,19 +775,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
 			}
 		}
 
-		INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
-				       hdr.in_words * 8, hdr.out_words * 8);
+		ib_uverbs_init_udata_buf_or_null(&ucore, buf,
+					u64_to_user_ptr(ex_hdr.response),
+					hdr.in_words * 8, hdr.out_words * 8);
 
-		INIT_UDATA_BUF_OR_NULL(&uhw,
-				       buf + ucore.inlen,
-				       (unsigned long) ex_hdr.response + ucore.outlen,
-				       ex_hdr.provider_in_words * 8,
-				       ex_hdr.provider_out_words * 8);
+		ib_uverbs_init_udata_buf_or_null(&uhw,
+					buf + ucore.inlen,
+					u64_to_user_ptr(ex_hdr.response) + ucore.outlen,
+					ex_hdr.provider_in_words * 8,
+					ex_hdr.provider_out_words * 8);
 
-		ret = uverbs_ex_cmd_table[command](file,
-						   ib_dev,
-						   &ucore,
-						   &uhw);
+		ret = uverbs_ex_cmd_table[command](file, ib_dev, &ucore, &uhw);
 		if (!ret)
 			ret = written_count;
 	} else {

+ 5 - 5
drivers/infiniband/core/uverbs_marshall.c

@@ -176,18 +176,18 @@ EXPORT_SYMBOL(ib_copy_path_rec_to_user);
 void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
 				struct ib_user_path_rec *src)
 {
-	__be32 slid, dlid;
+	u32 slid, dlid;
 
 	memset(dst, 0, sizeof(*dst));
 	if ((ib_is_opa_gid((union ib_gid *)src->sgid)) ||
 	    (ib_is_opa_gid((union ib_gid *)src->dgid))) {
 		dst->rec_type = SA_PATH_REC_TYPE_OPA;
-		slid = htonl(opa_get_lid_from_gid((union ib_gid *)src->sgid));
-		dlid = htonl(opa_get_lid_from_gid((union ib_gid *)src->dgid));
+		slid = opa_get_lid_from_gid((union ib_gid *)src->sgid);
+		dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid);
 	} else {
 		dst->rec_type = SA_PATH_REC_TYPE_IB;
-		slid = htonl(ntohs(src->slid));
-		dlid = htonl(ntohs(src->dlid));
+		slid = ntohs(src->slid);
+		dlid = ntohs(src->dlid);
 	}
 	memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
 	memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);

+ 2 - 1
drivers/infiniband/core/uverbs_std_types.c

@@ -246,7 +246,8 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
 		outbuf_len = uhw_out->ptr_attr.len;
 	}
 
-	INIT_UDATA_BUF_OR_NULL(udata, inbuf, outbuf, inbuf_len, outbuf_len);
+	ib_uverbs_init_udata_buf_or_null(udata, inbuf, outbuf, inbuf_len,
+					 outbuf_len);
 }
 
 static int uverbs_create_cq_handler(struct ib_device *ib_dev,

+ 5 - 7
drivers/infiniband/hw/bnxt_re/ib_verbs.c

@@ -1635,7 +1635,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
 	u8 ip_version = 0;
 	u16 vlan_id = 0xFFFF;
 	void *buf;
-	int i, rc = 0, size;
+	int i, rc = 0;
 
 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
 
@@ -1752,7 +1752,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
 	/* Pack the QP1 to the transmit buffer */
 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
 	if (buf) {
-		size = ib_ud_header_pack(&qp->qp1_hdr, buf);
+		ib_ud_header_pack(&qp->qp1_hdr, buf);
 		for (i = wqe->num_sge; i; i--) {
 			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
 			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
@@ -2208,7 +2208,7 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
 				       struct ib_recv_wr *wr)
 {
 	struct bnxt_qplib_swqe wqe;
-	int rc = 0, payload_sz = 0;
+	int rc = 0;
 
 	memset(&wqe, 0, sizeof(wqe));
 	while (wr) {
@@ -2223,8 +2223,7 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
 			rc = -EINVAL;
 			break;
 		}
-		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
-					       wr->num_sge);
+		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
 		wqe.wr_id = wr->wr_id;
 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
 
@@ -2561,7 +2560,7 @@ static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
 				     u16 raweth_qp1_flags2)
 {
-	bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
+	bool is_ipv6 = false, is_ipv4 = false;
 
 	/* raweth_qp1_flags Bit 9-6 indicates itype */
 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
@@ -2572,7 +2571,6 @@ static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
 	    raweth_qp1_flags2 &
 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
-		is_udp = true;
 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
 		(raweth_qp1_flags2 &
 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?

+ 3 - 2
drivers/infiniband/hw/bnxt_re/main.c

@@ -1071,9 +1071,10 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 	 */
 	rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
 					   BNXT_RE_MAX_QPC_COUNT);
-	if (rc)
+	if (rc) {
+		pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
 		goto fail;
-
+	}
 	rc = bnxt_re_net_ring_alloc
 			(rdev, rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr,
 			 rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count,

+ 1 - 6
drivers/infiniband/hw/bnxt_re/qplib_fp.c

@@ -160,11 +160,6 @@ void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
 
 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
 {
-	struct bnxt_qplib_cq *scq, *rcq;
-
-	scq = qp->scq;
-	rcq = qp->rcq;
-
 	if (qp->sq.flushed) {
 		qp->sq.flushed = false;
 		list_del(&qp->sq_flush);
@@ -1360,7 +1355,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
 
 			break;
 		}
-		/* else, just fall thru */
+		/* fall thru */
 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
 	{

+ 4 - 8
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c

@@ -88,7 +88,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
 	unsigned long flags;
 	u32 size, opcode;
 	u16 cookie, cbit;
-	int pg, idx;
 	u8 *preq;
 
 	opcode = req->opcode;
@@ -149,9 +148,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
 	preq = (u8 *)req;
 	size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
 	do {
-		pg = 0;
-		idx = 0;
-
 		/* Locate the next cmdq slot */
 		sw_prod = HWQ_CMP(cmdq->prod, cmdq);
 		cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
@@ -172,14 +168,14 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
 	rcfw->seq_num++;
 
 	cmdq_prod = cmdq->prod;
-	if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
+	if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
 		/* The very first doorbell write
 		 * is required to set this flag
 		 * which prompts the FW to reset
 		 * its internal pointers
 		 */
-		cmdq_prod |= FIRMWARE_FIRST_FLAG;
-		rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
+		cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
+		clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
 	}
 
 	/* ring CMDQ DB */
@@ -622,7 +618,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 
 	/* General */
 	rcfw->seq_num = 0;
-	rcfw->flags = FIRMWARE_FIRST_FLAG;
+	set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
 	bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
 				  sizeof(unsigned long));
 	rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);

+ 3 - 3
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h

@@ -162,9 +162,9 @@ struct bnxt_qplib_rcfw {
 	unsigned long		*cmdq_bitmap;
 	u32			bmap_size;
 	unsigned long		flags;
-#define FIRMWARE_INITIALIZED_FLAG	BIT(0)
-#define FIRMWARE_FIRST_FLAG		BIT(31)
-#define FIRMWARE_TIMED_OUT		BIT(3)
+#define FIRMWARE_INITIALIZED_FLAG	0
+#define FIRMWARE_FIRST_FLAG		31
+#define FIRMWARE_TIMED_OUT		3
 	wait_queue_head_t	waitq;
 	int			(*aeq_handler)(struct bnxt_qplib_rcfw *,
 					       struct creq_func_event *);

+ 1 - 1
drivers/infiniband/hw/bnxt_re/qplib_res.h

@@ -169,7 +169,7 @@ struct bnxt_qplib_ctx {
 	u32				cq_count;
 	struct bnxt_qplib_hwq		cq_tbl;
 	struct bnxt_qplib_hwq		tim_tbl;
-#define MAX_TQM_ALLOC_REQ		32
+#define MAX_TQM_ALLOC_REQ		48
 #define MAX_TQM_ALLOC_BLK_SIZE		8
 	u8				tqm_count[MAX_TQM_ALLOC_REQ];
 	struct bnxt_qplib_hwq		tqm_pde;

+ 2 - 3
drivers/infiniband/hw/bnxt_re/qplib_sp.c

@@ -720,13 +720,12 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
 	struct cmdq_map_tc_to_cos req;
 	struct creq_map_tc_to_cos_resp resp;
 	u16 cmd_flags = 0;
-	int rc = 0;
 
 	RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
 	req.cos0 = cpu_to_le16(cids[0]);
 	req.cos1 = cpu_to_le16(cids[1]);
 
-	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-					  (void *)&resp, NULL, 0);
+	bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
+				     0);
 	return 0;
 }

+ 1 - 1
drivers/infiniband/hw/bnxt_re/roce_hsi.h

@@ -2644,7 +2644,7 @@ struct creq_query_func_resp_sb {
 	u8 l2_db_space_size;
 	__le16 max_srq;
 	__le32 max_gid;
-	__le32 tqm_alloc_reqs[8];
+	__le32 tqm_alloc_reqs[12];
 };
 
 /* Set resources command response (16 bytes) */

+ 1 - 1
drivers/infiniband/hw/cxgb3/Kconfig

@@ -1,6 +1,6 @@
 config INFINIBAND_CXGB3
 	tristate "Chelsio RDMA Driver"
-	depends on CHELSIO_T3 && INET
+	depends on CHELSIO_T3
 	select GENERIC_ALLOCATOR
 	---help---
 	  This is an iWARP/RDMA driver for the Chelsio T3 1GbE and

+ 2 - 4
drivers/infiniband/hw/cxgb3/cxio_hal.c

@@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
 
 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
 {
-	__u32 ptr;
+	__u32 ptr = wq->sq_rptr + count;
 	int flushed = 0;
-	struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
+	struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
 
-	ptr = wq->sq_rptr + count;
-	sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
 	while (ptr != wq->sq_wptr) {
 		sqp->signaled = 0;
 		insert_sq_cqe(wq, cq, sqp);

+ 3 - 3
drivers/infiniband/hw/cxgb3/iwch_cm.c

@@ -1760,8 +1760,8 @@ static void ep_timeout(unsigned long arg)
 
 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
 {
-	int err;
 	struct iwch_ep *ep = to_ep(cm_id);
+
 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 
 	if (state_read(&ep->com) == DEAD) {
@@ -1772,8 +1772,8 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
 	if (mpa_rev == 0)
 		abort_connection(ep, NULL, GFP_KERNEL);
 	else {
-		err = send_mpa_reject(ep, pdata, pdata_len);
-		err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
+		send_mpa_reject(ep, pdata, pdata_len);
+		iwch_ep_disconnect(ep, 0, GFP_KERNEL);
 	}
 	put_ep(&ep->com);
 	return 0;

+ 3 - 0
drivers/infiniband/hw/cxgb3/iwch_qp.c

@@ -722,10 +722,13 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
  */
 static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
 				struct iwch_cq *schp)
+	__releases(&qhp->lock)
+	__acquires(&qhp->lock)
 {
 	int count;
 	int flushed;
 
+	lockdep_assert_held(&qhp->lock);
 
 	pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
 	/* take a ref on the qhp since we must release the lock */

+ 1 - 1
drivers/infiniband/hw/cxgb4/Kconfig

@@ -1,6 +1,6 @@
 config INFINIBAND_CXGB4
 	tristate "Chelsio T4/T5 RDMA Driver"
-	depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
+	depends on CHELSIO_T4 && INET
 	select CHELSIO_LIB
 	select GENERIC_ALLOCATOR
 	---help---

+ 144 - 148
drivers/infiniband/hw/cxgb4/cm.c

@@ -99,10 +99,6 @@ module_param(enable_tcp_window_scaling, int, 0644);
 MODULE_PARM_DESC(enable_tcp_window_scaling,
 		 "Enable tcp window scaling (default=1)");
 
-int c4iw_debug;
-module_param(c4iw_debug, int, 0644);
-MODULE_PARM_DESC(c4iw_debug, "obsolete");
-
 static int peer2peer = 1;
 module_param(peer2peer, int, 0644);
 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
@@ -180,7 +176,7 @@ static void ref_qp(struct c4iw_ep *ep)
 
 static void start_ep_timer(struct c4iw_ep *ep)
 {
-	pr_debug("%s ep %p\n", __func__, ep);
+	pr_debug("ep %p\n", ep);
 	if (timer_pending(&ep->timer)) {
 		pr_err("%s timer already started! ep %p\n",
 		       __func__, ep);
@@ -196,7 +192,7 @@ static void start_ep_timer(struct c4iw_ep *ep)
 
 static int stop_ep_timer(struct c4iw_ep *ep)
 {
-	pr_debug("%s ep %p stopping\n", __func__, ep);
+	pr_debug("ep %p stopping\n", ep);
 	del_timer_sync(&ep->timer);
 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
 		c4iw_put_ep(&ep->com);
@@ -212,7 +208,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
 
 	if (c4iw_fatal_error(rdev)) {
 		kfree_skb(skb);
-		pr_debug("%s - device in error state - dropping\n", __func__);
+		pr_err("%s - device in error state - dropping\n", __func__);
 		return -EIO;
 	}
 	error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
@@ -229,7 +225,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
 
 	if (c4iw_fatal_error(rdev)) {
 		kfree_skb(skb);
-		pr_debug("%s - device in error state - dropping\n", __func__);
+		pr_err("%s - device in error state - dropping\n", __func__);
 		return -EIO;
 	}
 	error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
@@ -263,10 +259,10 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
 	if (ep->emss < 128)
 		ep->emss = 128;
 	if (ep->emss & 7)
-		pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
-			 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
-	pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
-		 ep->mss, ep->emss);
+		pr_warn("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+			TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+	pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
+		 ep->emss);
 }
 
 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
@@ -287,7 +283,7 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
 {
 	mutex_lock(&epc->mutex);
-	pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+	pr_debug("%s -> %s\n", states[epc->state], states[new]);
 	__state_set(epc, new);
 	mutex_unlock(&epc->mutex);
 	return;
@@ -318,11 +314,18 @@ static void *alloc_ep(int size, gfp_t gfp)
 
 	epc = kzalloc(size, gfp);
 	if (epc) {
+		epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
+		if (!epc->wr_waitp) {
+			kfree(epc);
+			epc = NULL;
+			goto out;
+		}
 		kref_init(&epc->kref);
 		mutex_init(&epc->mutex);
-		c4iw_init_wr_wait(&epc->wr_wait);
+		c4iw_init_wr_wait(epc->wr_waitp);
 	}
-	pr_debug("%s alloc ep %p\n", __func__, epc);
+	pr_debug("alloc ep %p\n", epc);
+out:
 	return epc;
 }
 
@@ -384,7 +387,7 @@ void _c4iw_free_ep(struct kref *kref)
 	struct c4iw_ep *ep;
 
 	ep = container_of(kref, struct c4iw_ep, com.kref);
-	pr_debug("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
+	pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
 	if (test_bit(QP_REFERENCED, &ep->com.flags))
 		deref_qp(ep);
 	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
@@ -407,6 +410,7 @@ void _c4iw_free_ep(struct kref *kref)
 	}
 	if (!skb_queue_empty(&ep->com.ep_skb_list))
 		skb_queue_purge(&ep->com.ep_skb_list);
+	c4iw_put_wr_wait(ep->com.wr_waitp);
 	kfree(ep);
 }
 
@@ -570,7 +574,7 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
 	struct c4iw_rdev *rdev = &ep->com.dev->rdev;
 	struct cpl_abort_req *req = cplhdr(skb);
 
-	pr_debug("%s rdev %p\n", __func__, rdev);
+	pr_debug("rdev %p\n", rdev);
 	req->cmd = CPL_ABORT_NO_RST;
 	skb_get(skb);
 	ret = c4iw_ofld_send(rdev, skb);
@@ -647,7 +651,7 @@ static int send_halfclose(struct c4iw_ep *ep)
 	struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
 	u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	if (WARN_ON(!skb))
 		return -ENOMEM;
 
@@ -662,7 +666,7 @@ static int send_abort(struct c4iw_ep *ep)
 	u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
 	struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	if (WARN_ON(!req_skb))
 		return -ENOMEM;
 
@@ -725,7 +729,7 @@ static int send_connect(struct c4iw_ep *ep)
 			roundup(sizev4, 16) :
 			roundup(sizev6, 16);
 
-	pr_debug("%s ep %p atid %u\n", __func__, ep, ep->atid);
+	pr_debug("ep %p atid %u\n", ep, ep->atid);
 
 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
 	if (!skb) {
@@ -824,13 +828,13 @@ static int send_connect(struct c4iw_ep *ep)
 				t5req->params =
 					  cpu_to_be64(FILTER_TUPLE_V(params));
 				t5req->rsvd = cpu_to_be32(isn);
-			pr_debug("%s snd_isn %u\n", __func__, t5req->rsvd);
+				pr_debug("snd_isn %u\n", t5req->rsvd);
 				t5req->opt2 = cpu_to_be32(opt2);
 			} else {
 				t6req->params =
 					  cpu_to_be64(FILTER_TUPLE_V(params));
 				t6req->rsvd = cpu_to_be32(isn);
-			pr_debug("%s snd_isn %u\n", __func__, t6req->rsvd);
+				pr_debug("snd_isn %u\n", t6req->rsvd);
 				t6req->opt2 = cpu_to_be32(opt2);
 			}
 		}
@@ -877,13 +881,13 @@ static int send_connect(struct c4iw_ep *ep)
 				t5req6->params =
 					    cpu_to_be64(FILTER_TUPLE_V(params));
 				t5req6->rsvd = cpu_to_be32(isn);
-			pr_debug("%s snd_isn %u\n", __func__, t5req6->rsvd);
+				pr_debug("snd_isn %u\n", t5req6->rsvd);
 				t5req6->opt2 = cpu_to_be32(opt2);
 			} else {
 				t6req6->params =
 					    cpu_to_be64(FILTER_TUPLE_V(params));
 				t6req6->rsvd = cpu_to_be32(isn);
-			pr_debug("%s snd_isn %u\n", __func__, t6req6->rsvd);
+				pr_debug("snd_isn %u\n", t6req6->rsvd);
 				t6req6->opt2 = cpu_to_be32(opt2);
 			}
 
@@ -907,8 +911,8 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
 	struct mpa_message *mpa;
 	struct mpa_v2_conn_params mpa_v2_params;
 
-	pr_debug("%s ep %p tid %u pd_len %d\n",
-		 __func__, ep, ep->hwtid, ep->plen);
+	pr_debug("ep %p tid %u pd_len %d\n",
+		 ep, ep->hwtid, ep->plen);
 
 	BUG_ON(skb_cloned(skb));
 
@@ -961,7 +965,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
 	if (mpa_rev_to_use == 2) {
 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
 					       sizeof (struct mpa_v2_conn_params));
-		pr_debug("%s initiator ird %u ord %u\n", __func__, ep->ird,
+		pr_debug("initiator ird %u ord %u\n", ep->ird,
 			 ep->ord);
 		mpa_v2_params.ird = htons((u16)ep->ird);
 		mpa_v2_params.ord = htons((u16)ep->ord);
@@ -1014,8 +1018,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
 	struct sk_buff *skb;
 	struct mpa_v2_conn_params mpa_v2_params;
 
-	pr_debug("%s ep %p tid %u pd_len %d\n",
-		 __func__, ep, ep->hwtid, ep->plen);
+	pr_debug("ep %p tid %u pd_len %d\n",
+		 ep, ep->hwtid, ep->plen);
 
 	mpalen = sizeof(*mpa) + plen;
 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
@@ -1094,8 +1098,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
 	struct sk_buff *skb;
 	struct mpa_v2_conn_params mpa_v2_params;
 
-	pr_debug("%s ep %p tid %u pd_len %d\n",
-		 __func__, ep, ep->hwtid, ep->plen);
+	pr_debug("ep %p tid %u pd_len %d\n",
+		 ep, ep->hwtid, ep->plen);
 
 	mpalen = sizeof(*mpa) + plen;
 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
@@ -1185,7 +1189,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
 
 	ep = lookup_atid(t, atid);
 
-	pr_debug("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+	pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
 		 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
 
 	mutex_lock(&ep->com.mutex);
@@ -1229,7 +1233,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
 {
 	struct iw_cm_event event;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_CLOSE;
 	event.status = status;
@@ -1246,7 +1250,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
 {
 	struct iw_cm_event event;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_DISCONNECT;
 	if (ep->com.cm_id) {
@@ -1261,7 +1265,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
 {
 	struct iw_cm_event event;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_CLOSE;
 	event.status = -ECONNRESET;
@@ -1278,8 +1282,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
 {
 	struct iw_cm_event event;
 
-	pr_debug("%s ep %p tid %u status %d\n",
-		 __func__, ep, ep->hwtid, status);
+	pr_debug("ep %p tid %u status %d\n",
+		 ep, ep->hwtid, status);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_CONNECT_REPLY;
 	event.status = status;
@@ -1308,7 +1312,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
 		}
 	}
 
-	pr_debug("%s ep %p tid %u status %d\n", __func__, ep,
+	pr_debug("ep %p tid %u status %d\n", ep,
 		 ep->hwtid, status);
 	set_bit(CONN_RPL_UPCALL, &ep->com.history);
 	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
@@ -1322,7 +1326,7 @@ static int connect_request_upcall(struct c4iw_ep *ep)
 	struct iw_cm_event event;
 	int ret;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
 	memcpy(&event.local_addr, &ep->com.local_addr,
@@ -1359,13 +1363,13 @@ static void established_upcall(struct c4iw_ep *ep)
 {
 	struct iw_cm_event event;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	memset(&event, 0, sizeof(event));
 	event.event = IW_CM_EVENT_ESTABLISHED;
 	event.ird = ep->ord;
 	event.ord = ep->ird;
 	if (ep->com.cm_id) {
-		pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+		pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 		set_bit(ESTAB_UPCALL, &ep->com.history);
 	}
@@ -1377,8 +1381,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
 	u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
 	u32 credit_dack;
 
-	pr_debug("%s ep %p tid %u credits %u\n",
-		 __func__, ep, ep->hwtid, credits);
+	pr_debug("ep %p tid %u credits %u\n",
+		 ep, ep->hwtid, credits);
 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
 	if (!skb) {
 		pr_err("update_rx_credits - cannot alloc skb!\n");
@@ -1429,7 +1433,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 	int err;
 	int disconnect = 0;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 
 	/*
 	 * If we get more than the supported amount of private data
@@ -1527,8 +1531,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 				MPA_V2_IRD_ORD_MASK;
 			resp_ord = ntohs(mpa_v2_params->ord) &
 				MPA_V2_IRD_ORD_MASK;
-			pr_debug("%s responder ird %u ord %u ep ird %u ord %u\n",
-				 __func__,
+			pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
 				 resp_ird, resp_ord, ep->ird, ep->ord);
 
 			/*
@@ -1573,8 +1576,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 		if (peer2peer)
 			ep->mpa_attr.p2p_type = p2p_type;
 
-	pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
-		 __func__, ep->mpa_attr.crc_enabled,
+	pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
+		 ep->mpa_attr.crc_enabled,
 		 ep->mpa_attr.recv_marker_enabled,
 		 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
 		 ep->mpa_attr.p2p_type, p2p_type);
@@ -1670,7 +1673,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
 	struct mpa_v2_conn_params *mpa_v2_params;
 	u16 plen;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 
 	/*
 	 * If we get more than the supported amount of private data
@@ -1679,7 +1682,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
 		goto err_stop_timer;
 
-	pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+	pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
 
 	/*
 	 * Copy the new data into our accumulation buffer.
@@ -1695,7 +1698,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
 	if (ep->mpa_pkt_len < sizeof(*mpa))
 		return 0;
 
-	pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+	pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
 	mpa = (struct mpa_message *) ep->mpa_pkt;
 
 	/*
@@ -1758,8 +1761,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
 				MPA_V2_IRD_ORD_MASK;
 			ep->ord = min_t(u32, ep->ord,
 					cur_max_read_depth(ep->com.dev));
-			pr_debug("%s initiator ird %u ord %u\n",
-				 __func__, ep->ird, ep->ord);
+			pr_debug("initiator ird %u ord %u\n",
+				 ep->ird, ep->ord);
 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
 				if (peer2peer) {
 					if (ntohs(mpa_v2_params->ord) &
@@ -1776,8 +1779,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
 		if (peer2peer)
 			ep->mpa_attr.p2p_type = p2p_type;
 
-	pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
-		 __func__,
+	pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
 		 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
 		 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
 		 ep->mpa_attr.p2p_type);
@@ -1816,7 +1818,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
 	ep = get_ep_from_tid(dev, tid);
 	if (!ep)
 		return 0;
-	pr_debug("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+	pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
 	skb_pull(skb, sizeof(*hdr));
 	skb_trim(skb, dlen);
 	mutex_lock(&ep->com.mutex);
@@ -1870,11 +1872,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 		pr_warn("Abort rpl to freed endpoint\n");
 		return 0;
 	}
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	mutex_lock(&ep->com.mutex);
 	switch (ep->com.state) {
 	case ABORTING:
-		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+		c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
 		__state_set(&ep->com, DEAD);
 		release = 1;
 		break;
@@ -1994,8 +1996,8 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
 {
 	ep->snd_win = snd_win;
 	ep->rcv_win = rcv_win;
-	pr_debug("%s snd_win %d rcv_win %d\n",
-		 __func__, ep->snd_win, ep->rcv_win);
+	pr_debug("snd_win %d rcv_win %d\n",
+		 ep->snd_win, ep->rcv_win);
 }
 
 #define ACT_OPEN_RETRY_COUNT 2
@@ -2100,9 +2102,9 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
 	int iptype;
 	__u8 *ra;
 
-	pr_debug("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
+	pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
 	init_timer(&ep->timer);
-	c4iw_init_wr_wait(&ep->com.wr_wait);
+	c4iw_init_wr_wait(ep->com.wr_waitp);
 
 	/* When MPA revision is different on nodes, the node with MPA_rev=2
 	 * tries to reconnect with MPA_rev 1 for the same EP through
@@ -2163,8 +2165,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
 		goto fail4;
 	}
 
-	pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
-		 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+	pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+		 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
 		 ep->l2t->idx);
 
 	state_set(&ep->com, CONNECTING);
@@ -2215,12 +2217,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 	la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
 	ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
 
-	pr_debug("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
+	pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
 		 status, status2errno(status));
 
 	if (cxgb_is_neg_adv(status)) {
-		pr_debug("%s Connection problems for atid %u status %u (%s)\n",
-			 __func__, atid, status, neg_adv_str(status));
+		pr_debug("Connection problems for atid %u status %u (%s)\n",
+			 atid, status, neg_adv_str(status));
 		ep->stats.connect_neg_adv++;
 		mutex_lock(&dev->rdev.stats.lock);
 		dev->rdev.stats.neg_adv++;
@@ -2316,12 +2318,12 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 	struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
 
 	if (!ep) {
-		pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+		pr_warn("%s stid %d lookup failure!\n", __func__, stid);
 		goto out;
 	}
-	pr_debug("%s ep %p status %d error %d\n", __func__, ep,
+	pr_debug("ep %p status %d error %d\n", ep,
 		 rpl->status, status2errno(rpl->status));
-	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+	c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
 	c4iw_put_ep(&ep->com);
 out:
 	return 0;
@@ -2334,11 +2336,11 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 	struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
 
 	if (!ep) {
-		pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+		pr_warn("%s stid %d lookup failure!\n", __func__, stid);
 		goto out;
 	}
-	pr_debug("%s ep %p\n", __func__, ep);
-	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+	pr_debug("ep %p\n", ep);
+	c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
 	c4iw_put_ep(&ep->com);
 out:
 	return 0;
@@ -2356,7 +2358,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
 	int win;
 	enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	BUG_ON(skb_cloned(skb));
 
 	skb_get(skb);
@@ -2427,7 +2429,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
 		if (peer2peer)
 			isn += 4;
 		rpl5->iss = cpu_to_be32(isn);
-		pr_debug("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
+		pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
 	}
 
 	rpl->opt0 = cpu_to_be64(opt0);
@@ -2440,7 +2442,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
 
 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
 {
-	pr_debug("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
+	pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
 	BUG_ON(skb_cloned(skb));
 	skb_trim(skb, sizeof(struct cpl_tid_release));
 	release_tid(&dev->rdev, hwtid, skb);
@@ -2466,13 +2468,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 
 	parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
 	if (!parent_ep) {
-		pr_debug("%s connect request on invalid stid %d\n",
-			 __func__, stid);
+		pr_err("%s connect request on invalid stid %d\n",
+		       __func__, stid);
 		goto reject;
 	}
 
 	if (state_read(&parent_ep->com) != LISTEN) {
-		pr_debug("%s - listening ep not in LISTEN\n", __func__);
+		pr_err("%s - listening ep not in LISTEN\n", __func__);
 		goto reject;
 	}
 
@@ -2481,16 +2483,16 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 
 	/* Find output route */
 	if (iptype == 4)  {
-		pr_debug("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
-			 , __func__, parent_ep, hwtid,
+		pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
+			 , parent_ep, hwtid,
 			 local_ip, peer_ip, ntohs(local_port),
 			 ntohs(peer_port), peer_mss);
 		dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
 				      *(__be32 *)local_ip, *(__be32 *)peer_ip,
 				      local_port, peer_port, tos);
 	} else {
-		pr_debug("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
-			 , __func__, parent_ep, hwtid,
+		pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
+			 , parent_ep, hwtid,
 			 local_ip, peer_ip, ntohs(local_port),
 			 ntohs(peer_port), peer_mss);
 		dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
@@ -2576,7 +2578,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 	child_ep->dst = dst;
 	child_ep->hwtid = hwtid;
 
-	pr_debug("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
+	pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
 		 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
 
 	init_timer(&child_ep->timer);
@@ -2613,11 +2615,11 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
 	int ret;
 
 	ep = get_ep_from_tid(dev, tid);
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	ep->snd_seq = be32_to_cpu(req->snd_isn);
 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
 
-	pr_debug("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
+	pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid,
 		 ntohs(req->tcp_opt));
 
 	set_emss(ep, ntohs(req->tcp_opt));
@@ -2650,7 +2652,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
 	if (!ep)
 		return 0;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 	dst_confirm(ep->dst);
 
 	set_bit(PEER_CLOSE, &ep->com.history);
@@ -2673,12 +2675,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
 		 */
 		__state_set(&ep->com, CLOSING);
 		pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
-		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+		c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
 		break;
 	case MPA_REP_SENT:
 		__state_set(&ep->com, CLOSING);
 		pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
-		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+		c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
 		break;
 	case FPDU_MODE:
 		start_ep_timer(ep);
@@ -2741,16 +2743,16 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 		return 0;
 
 	if (cxgb_is_neg_adv(req->status)) {
-		pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
-			 __func__, ep->hwtid, req->status,
-			 neg_adv_str(req->status));
+		pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
+			__func__, ep->hwtid, req->status,
+			neg_adv_str(req->status));
 		ep->stats.abort_neg_adv++;
 		mutex_lock(&dev->rdev.stats.lock);
 		dev->rdev.stats.neg_adv++;
 		mutex_unlock(&dev->rdev.stats.lock);
 		goto deref_ep;
 	}
-	pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+	pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
 		 ep->com.state);
 	set_bit(PEER_ABORT, &ep->com.history);
 
@@ -2760,7 +2762,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 	 * MPA_REQ_SENT
 	 */
 	if (ep->com.state != MPA_REQ_SENT)
-		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+		c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
 
 	mutex_lock(&ep->com.mutex);
 	switch (ep->com.state) {
@@ -2783,8 +2785,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 			 * do some housekeeping so as to re-initiate the
 			 * connection
 			 */
-			pr_debug("%s: mpa_rev=%d. Retrying with mpav1\n",
-				 __func__, mpa_rev);
+			pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
+				__func__, mpa_rev);
 			ep->retry_with_mpa_v1 = 1;
 		}
 		break;
@@ -2810,7 +2812,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 	case ABORTING:
 		break;
 	case DEAD:
-		pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+		pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
 		mutex_unlock(&ep->com.mutex);
 		goto deref_ep;
 	default:
@@ -2875,7 +2877,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 	if (!ep)
 		return 0;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 
 	/* The cm_id may be null if we failed to connect */
 	mutex_lock(&ep->com.mutex);
@@ -2950,19 +2952,19 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
 	ep = get_ep_from_tid(dev, tid);
 	if (!ep)
 		return 0;
-	pr_debug("%s ep %p tid %u credits %u\n",
-		 __func__, ep, ep->hwtid, credits);
+	pr_debug("ep %p tid %u credits %u\n",
+		 ep, ep->hwtid, credits);
 	if (credits == 0) {
-		pr_debug("%s 0 credit ack ep %p tid %u state %u\n",
-			 __func__, ep, ep->hwtid, state_read(&ep->com));
+		pr_debug("0 credit ack ep %p tid %u state %u\n",
+			 ep, ep->hwtid, state_read(&ep->com));
 		goto out;
 	}
 
 	dst_confirm(ep->dst);
 	if (ep->mpa_skb) {
-		pr_debug("%s last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
-			 __func__, ep, ep->hwtid,
-			 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+		pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
+			 ep, ep->hwtid, state_read(&ep->com),
+			 ep->mpa_attr.initiator ? 1 : 0);
 		mutex_lock(&ep->com.mutex);
 		kfree_skb(ep->mpa_skb);
 		ep->mpa_skb = NULL;
@@ -2980,7 +2982,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
 	int abort;
 	struct c4iw_ep *ep = to_ep(cm_id);
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 
 	mutex_lock(&ep->com.mutex);
 	if (ep->com.state != MPA_REQ_RCVD) {
@@ -3011,7 +3013,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
 	int abort = 0;
 
-	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
 
 	mutex_lock(&ep->com.mutex);
 	if (ep->com.state != MPA_REQ_RCVD) {
@@ -3064,7 +3066,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 			ep->ird = 1;
 	}
 
-	pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+	pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
 
 	ep->com.cm_id = cm_id;
 	ref_cm_id(&ep->com);
@@ -3220,12 +3222,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 	ep->com.dev = dev;
 	ep->com.qp = get_qhp(dev, conn_param->qpn);
 	if (!ep->com.qp) {
-		pr_debug("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
+		pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
 		err = -EINVAL;
 		goto fail2;
 	}
 	ref_qp(ep);
-	pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
+	pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
 		 ep->com.qp, cm_id);
 
 	/*
@@ -3263,8 +3265,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 		}
 
 		/* find a route */
-		pr_debug("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
-			 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
+		pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
+			 &laddr->sin_addr, ntohs(laddr->sin_port),
 			 ra, ntohs(raddr->sin_port));
 		ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
 					  laddr->sin_addr.s_addr,
@@ -3285,8 +3287,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 		}
 
 		/* find a route */
-		pr_debug("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
-			 __func__, laddr6->sin6_addr.s6_addr,
+		pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
+			 laddr6->sin6_addr.s6_addr,
 			 ntohs(laddr6->sin6_port),
 			 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
 		ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
@@ -3309,8 +3311,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 		goto fail4;
 	}
 
-	pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
-		 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+	pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+		 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
 		 ep->l2t->idx);
 
 	state_set(&ep->com, CONNECTING);
@@ -3348,14 +3350,14 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
 		if (err)
 			return err;
 	}
-	c4iw_init_wr_wait(&ep->com.wr_wait);
+	c4iw_init_wr_wait(ep->com.wr_waitp);
 	err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
 				   ep->stid, &sin6->sin6_addr,
 				   sin6->sin6_port,
 				   ep->com.dev->rdev.lldi.rxq_ids[0]);
 	if (!err)
 		err = c4iw_wait_for_reply(&ep->com.dev->rdev,
-					  &ep->com.wr_wait,
+					  ep->com.wr_waitp,
 					  0, 0, __func__);
 	else if (err > 0)
 		err = net_xmit_errno(err);
@@ -3391,13 +3393,13 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
 			}
 		} while (err == -EBUSY);
 	} else {
-		c4iw_init_wr_wait(&ep->com.wr_wait);
+		c4iw_init_wr_wait(ep->com.wr_waitp);
 		err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
 				ep->stid, sin->sin_addr.s_addr, sin->sin_port,
 				0, ep->com.dev->rdev.lldi.rxq_ids[0]);
 		if (!err)
 			err = c4iw_wait_for_reply(&ep->com.dev->rdev,
-						  &ep->com.wr_wait,
+						  ep->com.wr_waitp,
 						  0, 0, __func__);
 		else if (err > 0)
 			err = net_xmit_errno(err);
@@ -3424,7 +3426,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
 		goto fail1;
 	}
 	skb_queue_head_init(&ep->com.ep_skb_list);
-	pr_debug("%s ep %p\n", __func__, ep);
+	pr_debug("ep %p\n", ep);
 	ep->com.cm_id = cm_id;
 	ref_cm_id(&ep->com);
 	ep->com.dev = dev;
@@ -3478,7 +3480,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
 	int err;
 	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
 
-	pr_debug("%s ep %p\n", __func__, ep);
+	pr_debug("ep %p\n", ep);
 
 	might_sleep();
 	state_set(&ep->com, DEAD);
@@ -3489,13 +3491,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
 			ep->com.dev->rdev.lldi.rxq_ids[0], 0);
 	} else {
 		struct sockaddr_in6 *sin6;
-		c4iw_init_wr_wait(&ep->com.wr_wait);
+		c4iw_init_wr_wait(ep->com.wr_waitp);
 		err = cxgb4_remove_server(
 				ep->com.dev->rdev.lldi.ports[0], ep->stid,
 				ep->com.dev->rdev.lldi.rxq_ids[0], 0);
 		if (err)
 			goto done;
-		err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
+		err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
 					  0, 0, __func__);
 		sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
@@ -3519,7 +3521,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
 
 	mutex_lock(&ep->com.mutex);
 
-	pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
+	pr_debug("ep %p state %s, abrupt %d\n", ep,
 		 states[ep->com.state], abrupt);
 
 	/*
@@ -3573,8 +3575,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
 	case MORIBUND:
 	case ABORTING:
 	case DEAD:
-		pr_debug("%s ignoring disconnect ep %p state %u\n",
-			 __func__, ep, ep->com.state);
+		pr_info("%s ignoring disconnect ep %p state %u\n",
+			__func__, ep, ep->com.state);
 		break;
 	default:
 		BUG();
@@ -3636,6 +3638,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
 			send_fw_act_open_req(ep, atid);
 			return;
 		}
+		/* fall through */
 	case FW_EADDRINUSE:
 		set_bit(ACT_RETRY_INUSE, &ep->com.history);
 		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
@@ -3678,7 +3681,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
 	rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
 	BUG_ON(!rpl_skb);
 	if (req->retval) {
-		pr_debug("%s passive open failure %d\n", __func__, req->retval);
+		pr_err("%s passive open failure %d\n", __func__, req->retval);
 		mutex_lock(&dev->rdev.stats.lock);
 		dev->rdev.stats.pas_ofld_conn_fails++;
 		mutex_unlock(&dev->rdev.stats.lock);
@@ -3874,7 +3877,6 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
 	struct net_device *pdev;
 	u16 rss_qid, eth_hdr_len;
 	int step;
-	u32 tx_chan;
 	struct neighbour *neigh;
 
 	/* Drop all non-SYN packets */
@@ -3895,8 +3897,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
 
 	lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
 	if (!lep) {
-		pr_debug("%s connect request on invalid stid %d\n",
-			 __func__, stid);
+		pr_warn("%s connect request on invalid stid %d\n",
+			__func__, stid);
 		goto reject;
 	}
 
@@ -3933,7 +3935,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
 	skb_set_transport_header(skb, (void *)tcph - (void *)rss);
 	skb_get(skb);
 
-	pr_debug("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
+	pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
 		 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
 		 ntohs(tcph->source), iph->tos);
 
@@ -3941,15 +3943,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
 			      iph->daddr, iph->saddr, tcph->dest,
 			      tcph->source, iph->tos);
 	if (!dst) {
-		pr_err("%s - failed to find dst entry!\n",
-		       __func__);
+		pr_err("%s - failed to find dst entry!\n", __func__);
 		goto reject;
 	}
 	neigh = dst_neigh_lookup_skb(dst, skb);
 
 	if (!neigh) {
-		pr_err("%s - failed to allocate neigh!\n",
-		       __func__);
+		pr_err("%s - failed to allocate neigh!\n", __func__);
 		goto free_dst;
 	}
 
@@ -3958,14 +3958,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
 				    pdev, 0);
 		pi = (struct port_info *)netdev_priv(pdev);
-		tx_chan = cxgb4_port_chan(pdev);
 		dev_put(pdev);
 	} else {
 		pdev = get_real_dev(neigh->dev);
 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
 					pdev, 0);
 		pi = (struct port_info *)netdev_priv(pdev);
-		tx_chan = cxgb4_port_chan(pdev);
 	}
 	neigh_release(neigh);
 	if (!e) {
@@ -4032,8 +4030,7 @@ static void process_timeout(struct c4iw_ep *ep)
 	int abort = 1;
 
 	mutex_lock(&ep->com.mutex);
-	pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
-		 ep->com.state);
+	pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
 	set_bit(TIMEDOUT, &ep->com.history);
 	switch (ep->com.state) {
 	case MPA_REQ_SENT:
@@ -4176,15 +4173,15 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
 	struct c4iw_wr_wait *wr_waitp;
 	int ret;
 
-	pr_debug("%s type %u\n", __func__, rpl->type);
+	pr_debug("type %u\n", rpl->type);
 
 	switch (rpl->type) {
 	case FW6_TYPE_WR_RPL:
 		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
 		wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
-		pr_debug("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+		pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
 		if (wr_waitp)
-			c4iw_wake_up(wr_waitp, ret ? -ret : 0);
+			c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
 		kfree_skb(skb);
 		break;
 	case FW6_TYPE_CQE:
@@ -4214,15 +4211,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
 		return 0;
 	}
 	if (cxgb_is_neg_adv(req->status)) {
-		pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
-			 __func__, ep->hwtid, req->status,
+		pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
+			__func__, ep->hwtid, req->status,
 			 neg_adv_str(req->status));
 		goto out;
 	}
-	pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
-		 ep->com.state);
+	pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
 
-	c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+	c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
 out:
 	sched(dev, skb);
 	return 0;

+ 59 - 58
drivers/infiniband/hw/cxgb4/cq.c

@@ -33,12 +33,12 @@
 #include "iw_cxgb4.h"
 
 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
-		      struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
+		      struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
+		      struct c4iw_wr_wait *wr_waitp)
 {
 	struct fw_ri_res_wr *res_wr;
 	struct fw_ri_res *res;
 	int wr_len;
-	struct c4iw_wr_wait wr_wait;
 	int ret;
 
 	wr_len = sizeof *res_wr + sizeof *res;
@@ -50,17 +50,14 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 			FW_RI_RES_WR_NRES_V(1) |
 			FW_WR_COMPL_F);
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-	res_wr->cookie = (uintptr_t)&wr_wait;
+	res_wr->cookie = (uintptr_t)wr_waitp;
 	res = res_wr->res;
 	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 	res->u.cq.op = FW_RI_RES_OP_RESET;
 	res->u.cq.iqid = cpu_to_be32(cq->cqid);
 
-	c4iw_init_wr_wait(&wr_wait);
-	ret = c4iw_ofld_send(rdev, skb);
-	if (!ret) {
-		ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
-	}
+	c4iw_init_wr_wait(wr_waitp);
+	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
 
 	kfree(cq->sw_queue);
 	dma_free_coherent(&(rdev->lldi.pdev->dev),
@@ -71,13 +68,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 }
 
 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
-		     struct c4iw_dev_ucontext *uctx)
+		     struct c4iw_dev_ucontext *uctx,
+		     struct c4iw_wr_wait *wr_waitp)
 {
 	struct fw_ri_res_wr *res_wr;
 	struct fw_ri_res *res;
 	int wr_len;
 	int user = (uctx != &rdev->uctx);
-	struct c4iw_wr_wait wr_wait;
 	int ret;
 	struct sk_buff *skb;
 
@@ -119,7 +116,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 			FW_RI_RES_WR_NRES_V(1) |
 			FW_WR_COMPL_F);
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-	res_wr->cookie = (uintptr_t)&wr_wait;
+	res_wr->cookie = (uintptr_t)wr_waitp;
 	res = res_wr->res;
 	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
 	res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -139,13 +136,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 	res->u.cq.iqsize = cpu_to_be16(cq->size);
 	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 
-	c4iw_init_wr_wait(&wr_wait);
-
-	ret = c4iw_ofld_send(rdev, skb);
-	if (ret)
-		goto err4;
-	pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait);
-	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+	c4iw_init_wr_wait(wr_waitp);
+	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
 	if (ret)
 		goto err4;
 
@@ -178,7 +170,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
 {
 	struct t4_cqe cqe;
 
-	pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+	pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
 		 wq, cq, cq->sw_cidx, cq->sw_pidx);
 	memset(&cqe, 0, sizeof(cqe));
 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
@@ -197,7 +189,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
 	int in_use = wq->rq.in_use - count;
 
 	BUG_ON(in_use < 0);
-	pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
+	pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
 		 wq, cq, wq->rq.in_use, count);
 	while (in_use--) {
 		insert_recv_cqe(wq, cq);
@@ -211,7 +203,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
 {
 	struct t4_cqe cqe;
 
-	pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+	pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
 		 wq, cq, cq->sw_cidx, cq->sw_pidx);
 	memset(&cqe, 0, sizeof(cqe));
 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
@@ -281,8 +273,8 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
 			/*
 			 * Insert this completed cqe into the swcq.
 			 */
-			pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n",
-				 __func__, cidx, cq->sw_pidx);
+			pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
+				 cidx, cq->sw_pidx);
 			swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
 			cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
 			t4_swcq_produce(cq);
@@ -337,7 +329,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 	struct t4_swsqe *swsqe;
 	int ret;
 
-	pr_debug("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
+	pr_debug("cqid 0x%x\n", chp->cq.cqid);
 	ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
 
 	/*
@@ -430,7 +422,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 	u32 ptr;
 
 	*count = 0;
-	pr_debug("%s count zero %d\n", __func__, *count);
+	pr_debug("count zero %d\n", *count);
 	ptr = cq->sw_cidx;
 	while (ptr != cq->sw_pidx) {
 		cqe = &cq->sw_queue[ptr];
@@ -440,7 +432,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 		if (++ptr == cq->size)
 			ptr = 0;
 	}
-	pr_debug("%s cq %p count %d\n", __func__, cq, *count);
+	pr_debug("cq %p count %d\n", cq, *count);
 }
 
 /*
@@ -471,8 +463,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 	if (ret)
 		return ret;
 
-	pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
-		 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
+	pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
+		 CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
 		 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
 		 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
 		 CQE_WRID_LOW(hw_cqe));
@@ -603,8 +595,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
 		struct t4_swsqe *swsqe;
 
-		pr_debug("%s out of order completion going in sw_sq at idx %u\n",
-			 __func__, CQE_WRID_SQ_IDX(hw_cqe));
+		pr_debug("out of order completion going in sw_sq at idx %u\n",
+			 CQE_WRID_SQ_IDX(hw_cqe));
 		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
 		swsqe->cqe = *hw_cqe;
 		swsqe->complete = 1;
@@ -638,13 +630,13 @@ proc_cqe:
 		BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
 
 		wq->sq.cidx = (uint16_t)idx;
-		pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx);
+		pr_debug("completing sq idx %u\n", wq->sq.cidx);
 		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
 		if (c4iw_wr_log)
 			c4iw_log_wr_stats(wq, hw_cqe);
 		t4_sq_consume(wq);
 	} else {
-		pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx);
+		pr_debug("completing rq idx %u\n", wq->rq.cidx);
 		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
 		BUG_ON(t4_rq_empty(wq));
 		if (c4iw_wr_log)
@@ -661,12 +653,12 @@ flush_wq:
 
 skip_cqe:
 	if (SW_CQE(hw_cqe)) {
-		pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
-			 __func__, cq, cq->cqid, cq->sw_cidx);
+		pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
+			 cq, cq->cqid, cq->sw_cidx);
 		t4_swcq_consume(cq);
 	} else {
-		pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
-			 __func__, cq, cq->cqid, cq->cidx);
+		pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
+			 cq, cq->cqid, cq->cidx);
 		t4_hwcq_consume(cq);
 	}
 	return ret;
@@ -712,8 +704,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 	wc->vendor_err = CQE_STATUS(&cqe);
 	wc->wc_flags = 0;
 
-	pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
-		 __func__, CQE_QPID(&cqe),
+	pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
+		 CQE_QPID(&cqe),
 		 CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
 		 CQE_STATUS(&cqe), CQE_LEN(&cqe),
 		 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
@@ -857,7 +849,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
 	struct c4iw_cq *chp;
 	struct c4iw_ucontext *ucontext;
 
-	pr_debug("%s ib_cq %p\n", __func__, ib_cq);
+	pr_debug("ib_cq %p\n", ib_cq);
 	chp = to_c4iw_cq(ib_cq);
 
 	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
@@ -868,8 +860,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
 				  : NULL;
 	destroy_cq(&chp->rhp->rdev, &chp->cq,
 		   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
-		   chp->destroy_skb);
-	chp->destroy_skb = NULL;
+		   chp->destroy_skb, chp->wr_waitp);
+	c4iw_put_wr_wait(chp->wr_waitp);
 	kfree(chp);
 	return 0;
 }
@@ -889,7 +881,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 	size_t memsize, hwentries;
 	struct c4iw_mm_entry *mm, *mm2;
 
-	pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
+	pr_debug("ib_dev %p entries %d\n", ibdev, entries);
 	if (attr->flags)
 		return ERR_PTR(-EINVAL);
 
@@ -901,12 +893,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
 	if (!chp)
 		return ERR_PTR(-ENOMEM);
+	chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+	if (!chp->wr_waitp) {
+		ret = -ENOMEM;
+		goto err_free_chp;
+	}
+	c4iw_init_wr_wait(chp->wr_waitp);
 
 	wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
 	chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
 	if (!chp->destroy_skb) {
 		ret = -ENOMEM;
-		goto err1;
+		goto err_free_wr_wait;
 	}
 
 	if (ib_context)
@@ -947,9 +945,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 	chp->cq.vector = vector;
 
 	ret = create_cq(&rhp->rdev, &chp->cq,
-			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+			ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+			chp->wr_waitp);
 	if (ret)
-		goto err2;
+		goto err_free_skb;
 
 	chp->rhp = rhp;
 	chp->cq.size--;				/* status page */
@@ -960,16 +959,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 	init_waitqueue_head(&chp->wait);
 	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
 	if (ret)
-		goto err3;
+		goto err_destroy_cq;
 
 	if (ucontext) {
 		ret = -ENOMEM;
 		mm = kmalloc(sizeof *mm, GFP_KERNEL);
 		if (!mm)
-			goto err4;
+			goto err_remove_handle;
 		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
 		if (!mm2)
-			goto err5;
+			goto err_free_mm;
 
 		uresp.qid_mask = rhp->rdev.cqmask;
 		uresp.cqid = chp->cq.cqid;
@@ -984,7 +983,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 		ret = ib_copy_to_udata(udata, &uresp,
 				       sizeof(uresp) - sizeof(uresp.reserved));
 		if (ret)
-			goto err6;
+			goto err_free_mm2;
 
 		mm->key = uresp.key;
 		mm->addr = virt_to_phys(chp->cq.queue);
@@ -996,23 +995,25 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 		mm2->len = PAGE_SIZE;
 		insert_mmap(ucontext, mm2);
 	}
-	pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
-		 __func__, chp->cq.cqid, chp, chp->cq.size,
+	pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
+		 chp->cq.cqid, chp, chp->cq.size,
 		 chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
 	return &chp->ibcq;
-err6:
+err_free_mm2:
 	kfree(mm2);
-err5:
+err_free_mm:
 	kfree(mm);
-err4:
+err_remove_handle:
 	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
-err3:
+err_destroy_cq:
 	destroy_cq(&chp->rhp->rdev, &chp->cq,
 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
-		   chp->destroy_skb);
-err2:
+		   chp->destroy_skb, chp->wr_waitp);
+err_free_skb:
 	kfree_skb(chp->destroy_skb);
-err1:
+err_free_wr_wait:
+	c4iw_put_wr_wait(chp->wr_waitp);
+err_free_chp:
 	kfree(chp);
 	return ERR_PTR(ret);
 }

+ 31 - 10
drivers/infiniband/hw/cxgb4/device.c

@@ -811,8 +811,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 
 	rdev->qpmask = rdev->lldi.udb_density - 1;
 	rdev->cqmask = rdev->lldi.ucq_density - 1;
-	pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
-		 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
+	pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
+		 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
 		 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
 		 rdev->lldi.vr->pbl.start,
 		 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
@@ -935,7 +935,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
 
 static void c4iw_remove(struct uld_ctx *ctx)
 {
-	pr_debug("%s c4iw_dev %p\n", __func__,  ctx->dev);
+	pr_debug("c4iw_dev %p\n", ctx->dev);
 	c4iw_unregister_device(ctx->dev);
 	c4iw_dealloc(ctx);
 }
@@ -969,8 +969,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
 	devp->rdev.lldi = *infop;
 
 	/* init various hw-queue params based on lld info */
-	pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
-		 __func__, devp->rdev.lldi.sge_ingpadboundary,
+	pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
+		 devp->rdev.lldi.sge_ingpadboundary,
 		 devp->rdev.lldi.sge_egrstatuspagesize);
 
 	devp->rdev.hw_queue.t4_eq_status_entries =
@@ -1069,8 +1069,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
 	}
 	ctx->lldi = *infop;
 
-	pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
-		 __func__, pci_name(ctx->lldi.pdev),
+	pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
+		 pci_name(ctx->lldi.pdev),
 		 ctx->lldi.nchan, ctx->lldi.nrxq,
 		 ctx->lldi.ntxq, ctx->lldi.nports);
 
@@ -1102,8 +1102,8 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
 	if (unlikely(!skb))
 		return NULL;
 
-	 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
-		   sizeof(struct rss_header) - pktshift);
+	__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+		  sizeof(struct rss_header) - pktshift);
 
 	/*
 	 * This skb will contain:
@@ -1203,7 +1203,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
 {
 	struct uld_ctx *ctx = handle;
 
-	pr_debug("%s new_state %u\n", __func__, new_state);
+	pr_debug("new_state %u\n", new_state);
 	switch (new_state) {
 	case CXGB4_STATE_UP:
 		pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
@@ -1518,6 +1518,27 @@ static struct cxgb4_uld_info c4iw_uld_info = {
 	.control = c4iw_uld_control,
 };
 
+void _c4iw_free_wr_wait(struct kref *kref)
+{
+	struct c4iw_wr_wait *wr_waitp;
+
+	wr_waitp = container_of(kref, struct c4iw_wr_wait, kref);
+	pr_debug("Free wr_wait %p\n", wr_waitp);
+	kfree(wr_waitp);
+}
+
+struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp)
+{
+	struct c4iw_wr_wait *wr_waitp;
+
+	wr_waitp = kzalloc(sizeof(*wr_waitp), gfp);
+	if (wr_waitp) {
+		kref_init(&wr_waitp->kref);
+		pr_debug("wr_wait %p\n", wr_waitp);
+	}
+	return wr_waitp;
+}
+
 static int __init c4iw_init_module(void)
 {
 	int err;

+ 1 - 1
drivers/infiniband/hw/cxgb4/ev.c

@@ -234,7 +234,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
 		if (atomic_dec_and_test(&chp->refcnt))
 			wake_up(&chp->wait);
 	} else {
-		pr_debug("%s unknown cqid 0x%x\n", __func__, qid);
+		pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
 		spin_unlock_irqrestore(&dev->lock, flag);
 	}
 	return 0;

+ 69 - 14
drivers/infiniband/hw/cxgb4/iw_cxgb4.h

@@ -202,18 +202,50 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 struct c4iw_wr_wait {
 	struct completion completion;
 	int ret;
+	struct kref kref;
 };
 
+void _c4iw_free_wr_wait(struct kref *kref);
+
+static inline void c4iw_put_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+	pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
+		 kref_read(&wr_waitp->kref));
+	WARN_ON(kref_read(&wr_waitp->kref) == 0);
+	kref_put(&wr_waitp->kref, _c4iw_free_wr_wait);
+}
+
+static inline void c4iw_get_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+	pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
+		 kref_read(&wr_waitp->kref));
+	WARN_ON(kref_read(&wr_waitp->kref) == 0);
+	kref_get(&wr_waitp->kref);
+}
+
 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
 {
 	wr_waitp->ret = 0;
 	init_completion(&wr_waitp->completion);
 }
 
-static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
+static inline void _c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret,
+				 bool deref)
 {
 	wr_waitp->ret = ret;
 	complete(&wr_waitp->completion);
+	if (deref)
+		c4iw_put_wr_wait(wr_waitp);
+}
+
+static inline void c4iw_wake_up_noref(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+	_c4iw_wake_up(wr_waitp, ret, false);
+}
+
+static inline void c4iw_wake_up_deref(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+	_c4iw_wake_up(wr_waitp, ret, true);
 }
 
 static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
@@ -230,18 +262,40 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
 
 	ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
 	if (!ret) {
-		pr_debug("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
-			 func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+		pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+		       func, pci_name(rdev->lldi.pdev), hwtid, qpid);
 		rdev->flags |= T4_FATAL_ERROR;
 		wr_waitp->ret = -EIO;
+		goto out;
 	}
-out:
 	if (wr_waitp->ret)
 		pr_debug("%s: FW reply %d tid %u qpid %u\n",
 			 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+out:
 	return wr_waitp->ret;
 }
 
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
+
+static inline int c4iw_ref_send_wait(struct c4iw_rdev *rdev,
+				     struct sk_buff *skb,
+				     struct c4iw_wr_wait *wr_waitp,
+				     u32 hwtid, u32 qpid,
+				     const char *func)
+{
+	int ret;
+
+	pr_debug("%s wr_wait %p hwtid %u qpid %u\n", func, wr_waitp, hwtid,
+		 qpid);
+	c4iw_get_wr_wait(wr_waitp);
+	ret = c4iw_ofld_send(rdev, skb);
+	if (ret) {
+		c4iw_put_wr_wait(wr_waitp);
+		return ret;
+	}
+	return c4iw_wait_for_reply(rdev, wr_waitp, hwtid, qpid, func);
+}
+
 enum db_state {
 	NORMAL = 0,
 	FLOW_CONTROL = 1,
@@ -394,6 +448,7 @@ struct c4iw_mr {
 	dma_addr_t mpl_addr;
 	u32 max_mpl_len;
 	u32 mpl_len;
+	struct c4iw_wr_wait *wr_waitp;
 };
 
 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
@@ -407,6 +462,7 @@ struct c4iw_mw {
 	struct sk_buff *dereg_skb;
 	u64 kva;
 	struct tpt_attributes attr;
+	struct c4iw_wr_wait *wr_waitp;
 };
 
 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
@@ -423,6 +479,7 @@ struct c4iw_cq {
 	spinlock_t comp_handler_lock;
 	atomic_t refcnt;
 	wait_queue_head_t wait;
+	struct c4iw_wr_wait *wr_waitp;
 };
 
 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
@@ -484,6 +541,7 @@ struct c4iw_qp {
 	int sq_sig_all;
 	struct work_struct free_work;
 	struct c4iw_ucontext *ucontext;
+	struct c4iw_wr_wait *wr_waitp;
 };
 
 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -537,8 +595,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
 		if (mm->key == key && mm->len == len) {
 			list_del_init(&mm->entry);
 			spin_unlock(&ucontext->mmap_lock);
-			pr_debug("%s key 0x%x addr 0x%llx len %d\n",
-				 __func__, key,
+			pr_debug("key 0x%x addr 0x%llx len %d\n", key,
 				 (unsigned long long)mm->addr, mm->len);
 			return mm;
 		}
@@ -551,8 +608,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
 			       struct c4iw_mm_entry *mm)
 {
 	spin_lock(&ucontext->mmap_lock);
-	pr_debug("%s key 0x%x addr 0x%llx len %d\n",
-		 __func__, mm->key, (unsigned long long)mm->addr, mm->len);
+	pr_debug("key 0x%x addr 0x%llx len %d\n",
+		 mm->key, (unsigned long long)mm->addr, mm->len);
 	list_add_tail(&mm->entry, &ucontext->mmaps);
 	spin_unlock(&ucontext->mmap_lock);
 }
@@ -671,16 +728,14 @@ enum c4iw_mmid_state {
 #define MPA_V2_IRD_ORD_MASK             0x3FFF
 
 #define c4iw_put_ep(ep) {						\
-	pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n",		\
-		 __func__, __LINE__,					\
+	pr_debug("put_ep ep %p refcnt %d\n",		\
 		 ep, kref_read(&((ep)->kref)));				\
 	WARN_ON(kref_read(&((ep)->kref)) < 1);				\
 	kref_put(&((ep)->kref), _c4iw_free_ep);				\
 }
 
 #define c4iw_get_ep(ep) {						\
-	pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n",		\
-		 __func__, __LINE__,					\
+	pr_debug("get_ep ep %p, refcnt %d\n",		\
 		 ep, kref_read(&((ep)->kref)));				\
 	kref_get(&((ep)->kref));					\
 }
@@ -841,7 +896,7 @@ struct c4iw_ep_common {
 	struct mutex mutex;
 	struct sockaddr_storage local_addr;
 	struct sockaddr_storage remote_addr;
-	struct c4iw_wr_wait wr_wait;
+	struct c4iw_wr_wait *wr_waitp;
 	unsigned long flags;
 	unsigned long history;
 };
@@ -990,7 +1045,6 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
-int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
 void c4iw_flush_hw_cq(struct c4iw_cq *chp);
 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
@@ -1018,5 +1072,6 @@ extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
+struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
 
 #endif

+ 160 - 108
drivers/infiniband/hw/cxgb4/mem.c

@@ -60,18 +60,18 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
 
 static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
 				       u32 len, dma_addr_t data,
-				       int wait, struct sk_buff *skb)
+				       struct sk_buff *skb,
+				       struct c4iw_wr_wait *wr_waitp)
 {
 	struct ulp_mem_io *req;
 	struct ulptx_sgl *sgl;
 	u8 wr_len;
 	int ret = 0;
-	struct c4iw_wr_wait wr_wait;
 
 	addr &= 0x7FFFFFF;
 
-	if (wait)
-		c4iw_init_wr_wait(&wr_wait);
+	if (wr_waitp)
+		c4iw_init_wr_wait(wr_waitp);
 	wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
 
 	if (!skb) {
@@ -84,8 +84,8 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
 	req = __skb_put_zero(skb, wr_len);
 	INIT_ULPTX_WR(req, wr_len, 0, 0);
 	req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
-			(wait ? FW_WR_COMPL_F : 0));
-	req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
+			(wr_waitp ? FW_WR_COMPL_F : 0));
+	req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
 	req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
 	req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
 			       T5_ULP_MEMIO_ORDER_V(1) |
@@ -100,22 +100,21 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
 	sgl->len0 = cpu_to_be32(len);
 	sgl->addr0 = cpu_to_be64(data);
 
-	ret = c4iw_ofld_send(rdev, skb);
-	if (ret)
-		return ret;
-	if (wait)
-		ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+	if (wr_waitp)
+		ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
+	else
+		ret = c4iw_ofld_send(rdev, skb);
 	return ret;
 }
 
 static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
-				  void *data, struct sk_buff *skb)
+				  void *data, struct sk_buff *skb,
+				  struct c4iw_wr_wait *wr_waitp)
 {
 	struct ulp_mem_io *req;
 	struct ulptx_idata *sc;
 	u8 wr_len, *to_dp, *from_dp;
 	int copy_len, num_wqe, i, ret = 0;
-	struct c4iw_wr_wait wr_wait;
 	__be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
 
 	if (is_t4(rdev->lldi.adapter_type))
@@ -124,9 +123,9 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
 		cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
 
 	addr &= 0x7FFFFFF;
-	pr_debug("%s addr 0x%x len %u\n", __func__, addr, len);
+	pr_debug("addr 0x%x len %u\n", addr, len);
 	num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
-	c4iw_init_wr_wait(&wr_wait);
+	c4iw_init_wr_wait(wr_waitp);
 	for (i = 0; i < num_wqe; i++) {
 
 		copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
@@ -147,7 +146,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
 		if (i == (num_wqe-1)) {
 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
 						    FW_WR_COMPL_F);
-			req->wr.wr_lo = (__force __be64)(unsigned long)&wr_wait;
+			req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
 		} else
 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
 		req->wr.wr_mid = cpu_to_be32(
@@ -173,19 +172,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
 		if (copy_len % T4_ULPTX_MIN_IO)
 			memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
 			       (copy_len % T4_ULPTX_MIN_IO));
-		ret = c4iw_ofld_send(rdev, skb);
-		skb = NULL;
+		if (i == (num_wqe-1))
+			ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
+						 __func__);
+		else
+			ret = c4iw_ofld_send(rdev, skb);
 		if (ret)
-			return ret;
+			break;
+		skb = NULL;
 		len -= C4IW_MAX_INLINE_SIZE;
 	}
 
-	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
 	return ret;
 }
 
 static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
-			       void *data, struct sk_buff *skb)
+			       void *data, struct sk_buff *skb,
+			       struct c4iw_wr_wait *wr_waitp)
 {
 	u32 remain = len;
 	u32 dmalen;
@@ -208,7 +211,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
 			dmalen = T4_ULPTX_MAX_DMA;
 		remain -= dmalen;
 		ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
-						 !remain, skb);
+						 skb, remain ? NULL : wr_waitp);
 		if (ret)
 			goto out;
 		addr += dmalen >> 5;
@@ -216,7 +219,8 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
 		daddr += dmalen;
 	}
 	if (remain)
-		ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb);
+		ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
+					     wr_waitp);
 out:
 	dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
 	return ret;
@@ -227,23 +231,33 @@ out:
  * If data is NULL, clear len byte of memory to zero.
  */
 static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
-			     void *data, struct sk_buff *skb)
+			     void *data, struct sk_buff *skb,
+			     struct c4iw_wr_wait *wr_waitp)
 {
-	if (rdev->lldi.ulptx_memwrite_dsgl && use_dsgl) {
-		if (len > inline_threshold) {
-			if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
-				pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
-						    pci_name(rdev->lldi.pdev));
-				return _c4iw_write_mem_inline(rdev, addr, len,
-							      data, skb);
-			} else {
-				return 0;
-			}
-		} else
-			return _c4iw_write_mem_inline(rdev, addr,
-						      len, data, skb);
-	} else
-		return _c4iw_write_mem_inline(rdev, addr, len, data, skb);
+	int ret;
+
+	if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
+		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+					      wr_waitp);
+		goto out;
+	}
+
+	if (len <= inline_threshold) {
+		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+					      wr_waitp);
+		goto out;
+	}
+
+	ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
+	if (ret) {
+		pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
+				    pci_name(rdev->lldi.pdev));
+		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+					      wr_waitp);
+	}
+out:
+	return ret;
+
 }
 
 /*
@@ -257,7 +271,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 			   enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
 			   int bind_enabled, u32 zbva, u64 to,
 			   u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
-			   struct sk_buff *skb)
+			   struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
 {
 	int err;
 	struct fw_ri_tpte tpt;
@@ -285,8 +299,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 		mutex_unlock(&rdev->stats.lock);
 		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
 	}
-	pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
-		 __func__, stag_state, type, pdid, stag_idx);
+	pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+		 stag_state, type, pdid, stag_idx);
 
 	/* write TPT entry */
 	if (reset_tpt_entry)
@@ -311,7 +325,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 	}
 	err = write_adapter_mem(rdev, stag_idx +
 				(rdev->lldi.vr->stag.start >> 5),
-				sizeof(tpt), &tpt, skb);
+				sizeof(tpt), &tpt, skb, wr_waitp);
 
 	if (reset_tpt_entry) {
 		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -323,45 +337,50 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 }
 
 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
-		     u32 pbl_addr, u32 pbl_size)
+		     u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
 {
 	int err;
 
-	pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
-		 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
+	pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+		 pbl_addr, rdev->lldi.vr->pbl.start,
 		 pbl_size);
 
-	err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
+	err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
+				wr_waitp);
 	return err;
 }
 
 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
-		     u32 pbl_addr, struct sk_buff *skb)
+		     u32 pbl_addr, struct sk_buff *skb,
+		     struct c4iw_wr_wait *wr_waitp)
 {
 	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
-			       pbl_size, pbl_addr, skb);
+			       pbl_size, pbl_addr, skb, wr_waitp);
 }
 
-static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
+static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
+			   struct c4iw_wr_wait *wr_waitp)
 {
 	*stag = T4_STAG_UNSET;
 	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
-			       0UL, 0, 0, 0, 0, NULL);
+			       0UL, 0, 0, 0, 0, NULL, wr_waitp);
 }
 
 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
-			     struct sk_buff *skb)
+			     struct sk_buff *skb,
+			     struct c4iw_wr_wait *wr_waitp)
 {
 	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
-			       0, skb);
+			       0, skb, wr_waitp);
 }
 
 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
-			 u32 pbl_size, u32 pbl_addr)
+			 u32 pbl_size, u32 pbl_addr,
+			 struct c4iw_wr_wait *wr_waitp)
 {
 	*stag = T4_STAG_UNSET;
 	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
-			       0UL, 0, 0, pbl_size, pbl_addr, NULL);
+			       0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
 }
 
 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
@@ -372,7 +391,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
 	mhp->attr.stag = stag;
 	mmid = stag >> 8;
 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
-	pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+	pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
 	return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
 }
 
@@ -388,14 +407,15 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
 			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
 			      mhp->attr.va_fbo, mhp->attr.len ?
 			      mhp->attr.len : -1, shift - 12,
-			      mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL);
+			      mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
+			      mhp->wr_waitp);
 	if (ret)
 		return ret;
 
 	ret = finish_mem_reg(mhp, stag);
 	if (ret) {
 		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
-			  mhp->attr.pbl_addr, mhp->dereg_skb);
+			  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
 		mhp->dereg_skb = NULL;
 	}
 	return ret;
@@ -422,18 +442,24 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
 	int ret;
 	u32 stag = T4_STAG_UNSET;
 
-	pr_debug("%s ib_pd %p\n", __func__, pd);
+	pr_debug("ib_pd %p\n", pd);
 	php = to_c4iw_pd(pd);
 	rhp = php->rhp;
 
 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
 	if (!mhp)
 		return ERR_PTR(-ENOMEM);
+	mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+	if (!mhp->wr_waitp) {
+		ret = -ENOMEM;
+		goto err_free_mhp;
+	}
+	c4iw_init_wr_wait(mhp->wr_waitp);
 
 	mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
 	if (!mhp->dereg_skb) {
 		ret = -ENOMEM;
-		goto err0;
+		goto err_free_wr_wait;
 	}
 
 	mhp->rhp = rhp;
@@ -449,20 +475,22 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
 	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
 			      FW_RI_STAG_NSMR, mhp->attr.perms,
 			      mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
-			      NULL);
+			      NULL, mhp->wr_waitp);
 	if (ret)
-		goto err1;
+		goto err_free_skb;
 
 	ret = finish_mem_reg(mhp, stag);
 	if (ret)
-		goto err2;
+		goto err_dereg_mem;
 	return &mhp->ibmr;
-err2:
+err_dereg_mem:
 	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
-		  mhp->attr.pbl_addr, mhp->dereg_skb);
-err1:
+		  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+err_free_wr_wait:
+	c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_skb:
 	kfree_skb(mhp->dereg_skb);
-err0:
+err_free_mhp:
 	kfree(mhp);
 	return ERR_PTR(ret);
 }
@@ -473,13 +501,13 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	__be64 *pages;
 	int shift, n, len;
 	int i, k, entry;
-	int err = 0;
+	int err = -ENOMEM;
 	struct scatterlist *sg;
 	struct c4iw_dev *rhp;
 	struct c4iw_pd *php;
 	struct c4iw_mr *mhp;
 
-	pr_debug("%s ib_pd %p\n", __func__, pd);
+	pr_debug("ib_pd %p\n", pd);
 
 	if (length == ~0ULL)
 		return ERR_PTR(-EINVAL);
@@ -496,34 +524,31 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
 	if (!mhp)
 		return ERR_PTR(-ENOMEM);
+	mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+	if (!mhp->wr_waitp)
+		goto err_free_mhp;
 
 	mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
-	if (!mhp->dereg_skb) {
-		kfree(mhp);
-		return ERR_PTR(-ENOMEM);
-	}
+	if (!mhp->dereg_skb)
+		goto err_free_wr_wait;
 
 	mhp->rhp = rhp;
 
 	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
-	if (IS_ERR(mhp->umem)) {
-		err = PTR_ERR(mhp->umem);
-		kfree_skb(mhp->dereg_skb);
-		kfree(mhp);
-		return ERR_PTR(err);
-	}
+	if (IS_ERR(mhp->umem))
+		goto err_free_skb;
 
 	shift = mhp->umem->page_shift;
 
 	n = mhp->umem->nmap;
 	err = alloc_pbl(mhp, n);
 	if (err)
-		goto err;
+		goto err_umem_release;
 
 	pages = (__be64 *) __get_free_page(GFP_KERNEL);
 	if (!pages) {
 		err = -ENOMEM;
-		goto err_pbl;
+		goto err_pbl_free;
 	}
 
 	i = n = 0;
@@ -536,7 +561,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 			if (i == PAGE_SIZE / sizeof *pages) {
 				err = write_pbl(&mhp->rhp->rdev,
 				      pages,
-				      mhp->attr.pbl_addr + (n << 3), i);
+				      mhp->attr.pbl_addr + (n << 3), i,
+				      mhp->wr_waitp);
 				if (err)
 					goto pbl_done;
 				n += i;
@@ -547,12 +573,13 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
 	if (i)
 		err = write_pbl(&mhp->rhp->rdev, pages,
-				     mhp->attr.pbl_addr + (n << 3), i);
+				mhp->attr.pbl_addr + (n << 3), i,
+				mhp->wr_waitp);
 
 pbl_done:
 	free_page((unsigned long) pages);
 	if (err)
-		goto err_pbl;
+		goto err_pbl_free;
 
 	mhp->attr.pdid = php->pdid;
 	mhp->attr.zbva = 0;
@@ -563,17 +590,20 @@ pbl_done:
 
 	err = register_mem(rhp, php, mhp, shift);
 	if (err)
-		goto err_pbl;
+		goto err_pbl_free;
 
 	return &mhp->ibmr;
 
-err_pbl:
+err_pbl_free:
 	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
 			      mhp->attr.pbl_size << 3);
-
-err:
+err_umem_release:
 	ib_umem_release(mhp->umem);
+err_free_skb:
 	kfree_skb(mhp->dereg_skb);
+err_free_wr_wait:
+	c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
 	kfree(mhp);
 	return ERR_PTR(err);
 }
@@ -597,13 +627,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 	if (!mhp)
 		return ERR_PTR(-ENOMEM);
 
+	mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+	if (!mhp->wr_waitp) {
+		ret = -ENOMEM;
+		goto free_mhp;
+	}
+
 	mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
 	if (!mhp->dereg_skb) {
 		ret = -ENOMEM;
-		goto free_mhp;
+		goto free_wr_wait;
 	}
 
-	ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+	ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
 	if (ret)
 		goto free_skb;
 	mhp->rhp = rhp;
@@ -616,13 +652,16 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 		ret = -ENOMEM;
 		goto dealloc_win;
 	}
-	pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+	pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
 	return &(mhp->ibmw);
 
 dealloc_win:
-	deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+	deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
+			  mhp->wr_waitp);
 free_skb:
 	kfree_skb(mhp->dereg_skb);
+free_wr_wait:
+	c4iw_put_wr_wait(mhp->wr_waitp);
 free_mhp:
 	kfree(mhp);
 	return ERR_PTR(ret);
@@ -638,10 +677,12 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
 	rhp = mhp->rhp;
 	mmid = (mw->rkey) >> 8;
 	remove_handle(rhp, &rhp->mmidr, mmid);
-	deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+	deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
+			  mhp->wr_waitp);
 	kfree_skb(mhp->dereg_skb);
+	c4iw_put_wr_wait(mhp->wr_waitp);
 	kfree(mhp);
-	pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+	pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
 	return 0;
 }
 
@@ -671,23 +712,31 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
 		goto err;
 	}
 
+	mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+	if (!mhp->wr_waitp) {
+		ret = -ENOMEM;
+		goto err_free_mhp;
+	}
+	c4iw_init_wr_wait(mhp->wr_waitp);
+
 	mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
 				      length, &mhp->mpl_addr, GFP_KERNEL);
 	if (!mhp->mpl) {
 		ret = -ENOMEM;
-		goto err_mpl;
+		goto err_free_wr_wait;
 	}
 	mhp->max_mpl_len = length;
 
 	mhp->rhp = rhp;
 	ret = alloc_pbl(mhp, max_num_sg);
 	if (ret)
-		goto err1;
+		goto err_free_dma;
 	mhp->attr.pbl_size = max_num_sg;
 	ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
-				 mhp->attr.pbl_size, mhp->attr.pbl_addr);
+			    mhp->attr.pbl_size, mhp->attr.pbl_addr,
+			    mhp->wr_waitp);
 	if (ret)
-		goto err2;
+		goto err_free_pbl;
 	mhp->attr.pdid = php->pdid;
 	mhp->attr.type = FW_RI_STAG_NSMR;
 	mhp->attr.stag = stag;
@@ -696,21 +745,23 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
 	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
 		ret = -ENOMEM;
-		goto err3;
+		goto err_dereg;
 	}
 
-	pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+	pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
 	return &(mhp->ibmr);
-err3:
+err_dereg:
 	dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
-		  mhp->attr.pbl_addr, mhp->dereg_skb);
-err2:
+		  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+err_free_pbl:
 	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
 			      mhp->attr.pbl_size << 3);
-err1:
+err_free_dma:
 	dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
 			  mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
-err_mpl:
+err_free_wr_wait:
+	c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
 	kfree(mhp);
 err:
 	return ERR_PTR(ret);
@@ -744,7 +795,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
 	struct c4iw_mr *mhp;
 	u32 mmid;
 
-	pr_debug("%s ib_mr %p\n", __func__, ib_mr);
+	pr_debug("ib_mr %p\n", ib_mr);
 
 	mhp = to_c4iw_mr(ib_mr);
 	rhp = mhp->rhp;
@@ -754,7 +805,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
 		dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
 				  mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
 	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
-		  mhp->attr.pbl_addr, mhp->dereg_skb);
+		  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
 	if (mhp->attr.pbl_size)
 		c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
 				  mhp->attr.pbl_size << 3);
@@ -762,7 +813,8 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
 		kfree((void *) (unsigned long) mhp->kva);
 	if (mhp->umem)
 		ib_umem_release(mhp->umem);
-	pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
+	pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
+	c4iw_put_wr_wait(mhp->wr_waitp);
 	kfree(mhp);
 	return 0;
 }

+ 18 - 18
drivers/infiniband/hw/cxgb4/provider.c

@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref)
 	ucontext = container_of(kref, struct c4iw_ucontext, kref);
 	rhp = to_c4iw_dev(ucontext->ibucontext.device);
 
-	pr_debug("%s ucontext %p\n", __func__, ucontext);
+	pr_debug("ucontext %p\n", ucontext);
 	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
 		kfree(mm);
 	c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
 {
 	struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
 
-	pr_debug("%s context %p\n", __func__, context);
+	pr_debug("context %p\n", context);
 	c4iw_put_ucontext(ucontext);
 	return 0;
 }
@@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
 	int ret = 0;
 	struct c4iw_mm_entry *mm = NULL;
 
-	pr_debug("%s ibdev %p\n", __func__, ibdev);
+	pr_debug("ibdev %p\n", ibdev);
 	context = kzalloc(sizeof(*context), GFP_KERNEL);
 	if (!context) {
 		ret = -ENOMEM;
@@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 	struct c4iw_ucontext *ucontext;
 	u64 addr;
 
-	pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
+	pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
 		 key, len);
 
 	if (vma->vm_start & (PAGE_SIZE-1))
@@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
 
 	php = to_c4iw_pd(pd);
 	rhp = php->rhp;
-	pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
+	pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
 	c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
 	mutex_lock(&rhp->rdev.stats.lock);
 	rhp->rdev.stats.pd.cur--;
@@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
 	u32 pdid;
 	struct c4iw_dev *rhp;
 
-	pr_debug("%s ibdev %p\n", __func__, ibdev);
+	pr_debug("ibdev %p\n", ibdev);
 	rhp = (struct c4iw_dev *) ibdev;
 	pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_table);
 	if (!pdid)
@@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
 	if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
 		rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
 	mutex_unlock(&rhp->rdev.stats.lock);
-	pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
+	pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
 	return &php->ibpd;
 }
 
 static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
 			   u16 *pkey)
 {
-	pr_debug("%s ibdev %p\n", __func__, ibdev);
+	pr_debug("ibdev %p\n", ibdev);
 	*pkey = 0;
 	return 0;
 }
@@ -308,8 +308,8 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
 {
 	struct c4iw_dev *dev;
 
-	pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
-		 __func__, ibdev, port, index, gid);
+	pr_debug("ibdev %p, port %d, index %d, gid %p\n",
+		 ibdev, port, index, gid);
 	dev = to_c4iw_dev(ibdev);
 	BUG_ON(port == 0);
 	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
@@ -323,7 +323,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
 
 	struct c4iw_dev *dev;
 
-	pr_debug("%s ibdev %p\n", __func__, ibdev);
+	pr_debug("ibdev %p\n", ibdev);
 
 	if (uhw->inlen || uhw->outlen)
 		return -EINVAL;
@@ -364,7 +364,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
 	struct net_device *netdev;
 	struct in_device *inetdev;
 
-	pr_debug("%s ibdev %p\n", __func__, ibdev);
+	pr_debug("ibdev %p\n", ibdev);
 
 	dev = to_c4iw_dev(ibdev);
 	netdev = dev->rdev.lldi.ports[port-1];
@@ -406,7 +406,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
 {
 	struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
 						 ibdev.dev);
-	pr_debug("%s dev 0x%p\n", __func__, dev);
+	pr_debug("dev 0x%p\n", dev);
 	return sprintf(buf, "%d\n",
 		       CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
 }
@@ -419,7 +419,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
 	struct ethtool_drvinfo info;
 	struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
 
-	pr_debug("%s dev 0x%p\n", __func__, dev);
+	pr_debug("dev 0x%p\n", dev);
 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
 	return sprintf(buf, "%s\n", info.driver);
 }
@@ -429,7 +429,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
 {
 	struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
 						 ibdev.dev);
-	pr_debug("%s dev 0x%p\n", __func__, dev);
+	pr_debug("dev 0x%p\n", dev);
 	return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
 		       c4iw_dev->rdev.lldi.pdev->device);
 }
@@ -521,7 +521,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
 {
 	struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
 						 ibdev);
-	pr_debug("%s dev 0x%p\n", __func__, dev);
+	pr_debug("dev 0x%p\n", dev);
 
 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
 		 FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
@@ -535,7 +535,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
 	int ret;
 	int i;
 
-	pr_debug("%s c4iw_dev %p\n", __func__, dev);
+	pr_debug("c4iw_dev %p\n", dev);
 	BUG_ON(!dev->rdev.lldi.ports[0]);
 	strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
 	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
@@ -645,7 +645,7 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
 {
 	int i;
 
-	pr_debug("%s c4iw_dev %p\n", __func__, dev);
+	pr_debug("c4iw_dev %p\n", dev);
 	for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
 		device_remove_file(&dev->ibdev.dev,
 				   c4iw_class_attributes[i]);

+ 68 - 73
drivers/infiniband/hw/cxgb4/qp.c

@@ -194,13 +194,13 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
 
 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
 		     struct t4_cq *rcq, struct t4_cq *scq,
-		     struct c4iw_dev_ucontext *uctx)
+		     struct c4iw_dev_ucontext *uctx,
+		     struct c4iw_wr_wait *wr_waitp)
 {
 	int user = (uctx != &rdev->uctx);
 	struct fw_ri_res_wr *res_wr;
 	struct fw_ri_res *res;
 	int wr_len;
-	struct c4iw_wr_wait wr_wait;
 	struct sk_buff *skb;
 	int ret = 0;
 	int eqsize;
@@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
 		ret = -ENOMEM;
 		goto free_sq;
 	}
-	pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
-		 __func__, wq->sq.queue,
+	pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+		 wq->sq.queue,
 		 (unsigned long long)virt_to_phys(wq->sq.queue),
 		 wq->rq.queue,
 		 (unsigned long long)virt_to_phys(wq->rq.queue));
@@ -299,7 +299,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
 			FW_RI_RES_WR_NRES_V(2) |
 			FW_WR_COMPL_F);
 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-	res_wr->cookie = (uintptr_t)&wr_wait;
+	res_wr->cookie = (uintptr_t)wr_waitp;
 	res = res_wr->res;
 	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
 	res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -352,17 +352,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
 	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
 	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
 
-	c4iw_init_wr_wait(&wr_wait);
-
-	ret = c4iw_ofld_send(rdev, skb);
-	if (ret)
-		goto free_dma;
-	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
+	c4iw_init_wr_wait(wr_waitp);
+	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
 	if (ret)
 		goto free_dma;
 
-	pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
-		 __func__, wq->sq.qid, wq->rq.qid, wq->db,
+	pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
+		 wq->sq.qid, wq->rq.qid, wq->db,
 		 wq->sq.bar2_va, wq->rq.bar2_va);
 
 	return 0;
@@ -724,12 +720,13 @@ static void free_qp_work(struct work_struct *work)
 	ucontext = qhp->ucontext;
 	rhp = qhp->rhp;
 
-	pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+	pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
 	destroy_qp(&rhp->rdev, &qhp->wq,
 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
 
 	if (ucontext)
 		c4iw_put_ucontext(ucontext);
+	c4iw_put_wr_wait(qhp->wr_waitp);
 	kfree(qhp);
 }
 
@@ -738,19 +735,19 @@ static void queue_qp_free(struct kref *kref)
 	struct c4iw_qp *qhp;
 
 	qhp = container_of(kref, struct c4iw_qp, kref);
-	pr_debug("%s qhp %p\n", __func__, qhp);
+	pr_debug("qhp %p\n", qhp);
 	queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
 }
 
 void c4iw_qp_add_ref(struct ib_qp *qp)
 {
-	pr_debug("%s ib_qp %p\n", __func__, qp);
+	pr_debug("ib_qp %p\n", qp);
 	kref_get(&to_c4iw_qp(qp)->kref);
 }
 
 void c4iw_qp_rem_ref(struct ib_qp *qp)
 {
-	pr_debug("%s ib_qp %p\n", __func__, qp);
+	pr_debug("ib_qp %p\n", qp);
 	kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
 }
 
@@ -958,8 +955,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
 			break;
 		default:
-			pr_debug("%s post of type=%d TBD!\n", __func__,
-				 wr->opcode);
+			pr_warn("%s post of type=%d TBD!\n", __func__,
+				wr->opcode);
 			err = -EINVAL;
 		}
 		if (err) {
@@ -980,8 +977,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
 		init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
 
-		pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
-			 __func__,
+		pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
 			 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
 			 swsqe->opcode, swsqe->read_len);
 		wr = wr->next;
@@ -1057,8 +1053,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 		wqe->recv.r2[1] = 0;
 		wqe->recv.r2[2] = 0;
 		wqe->recv.len16 = len16;
-		pr_debug("%s cookie 0x%llx pidx %u\n",
-			 __func__,
+		pr_debug("cookie 0x%llx pidx %u\n",
 			 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
 		t4_rq_produce(&qhp->wq, len16);
 		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
@@ -1218,7 +1213,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
 	struct sk_buff *skb;
 	struct terminate_message *term;
 
-	pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+	pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
 		 qhp->ep->hwtid);
 
 	skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
@@ -1255,7 +1250,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
 	int rq_flushed, sq_flushed;
 	unsigned long flag;
 
-	pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
+	pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
 
 	/* locking hierarchy: cq lock first, then qp lock. */
 	spin_lock_irqsave(&rchp->lock, flag);
@@ -1340,8 +1335,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 	int ret;
 	struct sk_buff *skb;
 
-	pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
-		 ep->hwtid);
+	pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
 
 	skb = skb_dequeue(&ep->com.ep_skb_list);
 	if (WARN_ON(!skb))
@@ -1357,23 +1351,20 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 	wqe->flowid_len16 = cpu_to_be32(
 		FW_WR_FLOWID_V(ep->hwtid) |
 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
-	wqe->cookie = (uintptr_t)&ep->com.wr_wait;
+	wqe->cookie = (uintptr_t)ep->com.wr_waitp;
 
 	wqe->u.fini.type = FW_RI_TYPE_FINI;
-	ret = c4iw_ofld_send(&rhp->rdev, skb);
-	if (ret)
-		goto out;
 
-	ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
-			     qhp->wq.sq.qid, __func__);
-out:
-	pr_debug("%s ret %d\n", __func__, ret);
+	ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
+				 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+
+	pr_debug("ret %d\n", ret);
 	return ret;
 }
 
 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
 {
-	pr_debug("%s p2p_type = %d\n", __func__, p2p_type);
+	pr_debug("p2p_type = %d\n", p2p_type);
 	memset(&init->u, 0, sizeof init->u);
 	switch (p2p_type) {
 	case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
@@ -1402,7 +1393,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 	int ret;
 	struct sk_buff *skb;
 
-	pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
+	pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
 		 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
 
 	skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
@@ -1427,7 +1418,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 		FW_WR_FLOWID_V(qhp->ep->hwtid) |
 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
 
-	wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
+	wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
 
 	wqe->u.init.type = FW_RI_TYPE_INIT;
 	wqe->u.init.mpareqbit_p2ptype =
@@ -1464,18 +1455,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 	if (qhp->attr.mpa_attr.initiator)
 		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
 
-	ret = c4iw_ofld_send(&rhp->rdev, skb);
-	if (ret)
-		goto err1;
-
-	ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
-				  qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+	ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
+				 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
 	if (!ret)
 		goto out;
-err1:
+
 	free_ird(rhp, qhp->attr.max_ird);
 out:
-	pr_debug("%s ret %d\n", __func__, ret);
+	pr_debug("ret %d\n", ret);
 	return ret;
 }
 
@@ -1492,8 +1479,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 	int free = 0;
 	struct c4iw_ep *ep = NULL;
 
-	pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
-		 __func__,
+	pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
 		 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
 		 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
 
@@ -1680,7 +1666,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 	}
 	goto out;
 err:
-	pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
+	pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
 		 qhp->wq.sq.qid);
 
 	/* disassociate the LLP connection */
@@ -1717,7 +1703,7 @@ out:
 	 */
 	if (free)
 		c4iw_put_ep(&ep->com);
-	pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
+	pr_debug("exit state %d\n", qhp->attr.state);
 	return ret;
 }
 
@@ -1747,7 +1733,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
 
 	c4iw_qp_rem_ref(ib_qp);
 
-	pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
+	pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
 	return 0;
 }
 
@@ -1766,7 +1752,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 	struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
 	struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
 
-	pr_debug("%s ib_pd %p\n", __func__, pd);
+	pr_debug("ib_pd %p\n", pd);
 
 	if (attrs->qp_type != IB_QPT_RC)
 		return ERR_PTR(-EINVAL);
@@ -1798,6 +1784,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 	qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
 	if (!qhp)
 		return ERR_PTR(-ENOMEM);
+
+	qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+	if (!qhp->wr_waitp) {
+		ret = -ENOMEM;
+		goto err_free_qhp;
+	}
+
 	qhp->wq.sq.size = sqsize;
 	qhp->wq.sq.memsize =
 		(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
@@ -1814,9 +1807,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 	}
 
 	ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
-			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+			ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+			qhp->wr_waitp);
 	if (ret)
-		goto err1;
+		goto err_free_wr_wait;
 
 	attrs->cap.max_recv_wr = rqsize - 1;
 	attrs->cap.max_send_wr = sqsize - 1;
@@ -1847,35 +1841,35 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 
 	ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
 	if (ret)
-		goto err2;
+		goto err_destroy_qp;
 
 	if (udata) {
 		sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
 		if (!sq_key_mm) {
 			ret = -ENOMEM;
-			goto err3;
+			goto err_remove_handle;
 		}
 		rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
 		if (!rq_key_mm) {
 			ret = -ENOMEM;
-			goto err4;
+			goto err_free_sq_key;
 		}
 		sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
 		if (!sq_db_key_mm) {
 			ret = -ENOMEM;
-			goto err5;
+			goto err_free_rq_key;
 		}
 		rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
 		if (!rq_db_key_mm) {
 			ret = -ENOMEM;
-			goto err6;
+			goto err_free_sq_db_key;
 		}
 		if (t4_sq_onchip(&qhp->wq.sq)) {
 			ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
 						 GFP_KERNEL);
 			if (!ma_sync_key_mm) {
 				ret = -ENOMEM;
-				goto err7;
+				goto err_free_rq_db_key;
 			}
 			uresp.flags = C4IW_QPF_ONCHIP;
 		} else
@@ -1905,7 +1899,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 		spin_unlock(&ucontext->mmap_lock);
 		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
 		if (ret)
-			goto err8;
+			goto err_free_ma_sync_key;
 		sq_key_mm->key = uresp.sq_key;
 		sq_key_mm->addr = qhp->wq.sq.phys_addr;
 		sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
@@ -1937,28 +1931,29 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 	qhp->ibqp.qp_num = qhp->wq.sq.qid;
 	init_timer(&(qhp->timer));
 	INIT_LIST_HEAD(&qhp->db_fc_entry);
-	pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
-		 __func__,
+	pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
 		 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
 		 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
 		 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
 	return &qhp->ibqp;
-err8:
+err_free_ma_sync_key:
 	kfree(ma_sync_key_mm);
-err7:
+err_free_rq_db_key:
 	kfree(rq_db_key_mm);
-err6:
+err_free_sq_db_key:
 	kfree(sq_db_key_mm);
-err5:
+err_free_rq_key:
 	kfree(rq_key_mm);
-err4:
+err_free_sq_key:
 	kfree(sq_key_mm);
-err3:
+err_remove_handle:
 	remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
-err2:
+err_destroy_qp:
 	destroy_qp(&rhp->rdev, &qhp->wq,
 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-err1:
+err_free_wr_wait:
+	c4iw_put_wr_wait(qhp->wr_waitp);
+err_free_qhp:
 	kfree(qhp);
 	return ERR_PTR(ret);
 }
@@ -1971,7 +1966,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 	enum c4iw_qp_attr_mask mask = 0;
 	struct c4iw_qp_attributes attrs;
 
-	pr_debug("%s ib_qp %p\n", __func__, ibqp);
+	pr_debug("ib_qp %p\n", ibqp);
 
 	/* iwarp does not support the RTR state */
 	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -2017,7 +2012,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
 {
-	pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
+	pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
 	return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
 }
 

+ 23 - 23
drivers/infiniband/hw/cxgb4/resource.c

@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
 
 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
 {
-	pr_debug("%s entry 0x%x\n", __func__, entry);
+	pr_debug("entry 0x%x\n", entry);
 	c4iw_id_free(id_table, entry);
 }
 
@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
 	}
 out:
 	mutex_unlock(&uctx->lock);
-	pr_debug("%s qid 0x%x\n", __func__, qid);
+	pr_debug("qid 0x%x\n", qid);
 	mutex_lock(&rdev->stats.lock);
 	if (rdev->stats.qid.cur > rdev->stats.qid.max)
 		rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
 	if (!entry)
 		return;
-	pr_debug("%s qid 0x%x\n", __func__, qid);
+	pr_debug("qid 0x%x\n", qid);
 	entry->qid = qid;
 	mutex_lock(&uctx->lock);
 	list_add_tail(&entry->entry, &uctx->cqids);
@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
 	}
 out:
 	mutex_unlock(&uctx->lock);
-	pr_debug("%s qid 0x%x\n", __func__, qid);
+	pr_debug("qid 0x%x\n", qid);
 	mutex_lock(&rdev->stats.lock);
 	if (rdev->stats.qid.cur > rdev->stats.qid.max)
 		rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
 	if (!entry)
 		return;
-	pr_debug("%s qid 0x%x\n", __func__, qid);
+	pr_debug("qid 0x%x\n", qid);
 	entry->qid = qid;
 	mutex_lock(&uctx->lock);
 	list_add_tail(&entry->entry, &uctx->qpids);
@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
 {
 	unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
-	pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+	pr_debug("addr 0x%x size %d\n", (u32)addr, size);
 	mutex_lock(&rdev->stats.lock);
 	if (addr) {
 		rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
 
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
-	pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
+	pr_debug("addr 0x%x size %d\n", addr, size);
 	mutex_lock(&rdev->stats.lock);
 	rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
 	mutex_unlock(&rdev->stats.lock);
@@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
 	while (pbl_start < pbl_top) {
 		pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
 		if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
-			pr_debug("%s failed to add PBL chunk (%x/%x)\n",
-				 __func__, pbl_start, pbl_chunk);
+			pr_debug("failed to add PBL chunk (%x/%x)\n",
+				 pbl_start, pbl_chunk);
 			if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
 				pr_warn("Failed to add all PBL chunks (%x/%x)\n",
 					pbl_start, pbl_top - pbl_start);
@@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
 			}
 			pbl_chunk >>= 1;
 		} else {
-			pr_debug("%s added PBL chunk (%x/%x)\n",
-				 __func__, pbl_start, pbl_chunk);
+			pr_debug("added PBL chunk (%x/%x)\n",
+				 pbl_start, pbl_chunk);
 			pbl_start += pbl_chunk;
 		}
 	}
@@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
 {
 	unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
-	pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+	pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
 	if (!addr)
 		pr_warn_ratelimited("%s: Out of RQT memory\n",
 				    pci_name(rdev->lldi.pdev));
@@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
 
 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
-	pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+	pr_debug("addr 0x%x size %d\n", addr, size << 6);
 	mutex_lock(&rdev->stats.lock);
 	rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
 	mutex_unlock(&rdev->stats.lock);
@@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
 	while (rqt_start < rqt_top) {
 		rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
 		if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
-			pr_debug("%s failed to add RQT chunk (%x/%x)\n",
-				 __func__, rqt_start, rqt_chunk);
+			pr_debug("failed to add RQT chunk (%x/%x)\n",
+				 rqt_start, rqt_chunk);
 			if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
 				pr_warn("Failed to add all RQT chunks (%x/%x)\n",
 					rqt_start, rqt_top - rqt_start);
@@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
 			}
 			rqt_chunk >>= 1;
 		} else {
-			pr_debug("%s added RQT chunk (%x/%x)\n",
-				 __func__, rqt_start, rqt_chunk);
+			pr_debug("added RQT chunk (%x/%x)\n",
+				 rqt_start, rqt_chunk);
 			rqt_start += rqt_chunk;
 		}
 	}
@@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
 {
 	unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
-	pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+	pr_debug("addr 0x%x size %d\n", (u32)addr, size);
 	if (addr) {
 		mutex_lock(&rdev->stats.lock);
 		rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
@@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
 
 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
-	pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
+	pr_debug("addr 0x%x size %d\n", addr, size);
 	mutex_lock(&rdev->stats.lock);
 	rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
 	mutex_unlock(&rdev->stats.lock);
@@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
 	while (start < top) {
 		chunk = min(top - start + 1, chunk);
 		if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
-			pr_debug("%s failed to add OCQP chunk (%x/%x)\n",
-				 __func__, start, chunk);
+			pr_debug("failed to add OCQP chunk (%x/%x)\n",
+				 start, chunk);
 			if (chunk <= 1024 << MIN_OCQP_SHIFT) {
 				pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
 					start, top - start);
@@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
 			}
 			chunk >>= 1;
 		} else {
-			pr_debug("%s added OCQP chunk (%x/%x)\n",
-				 __func__, start, chunk);
+			pr_debug("added OCQP chunk (%x/%x)\n",
+				 start, chunk);
 			start += chunk;
 		}
 	}

+ 8 - 12
drivers/infiniband/hw/cxgb4/t4.h

@@ -466,14 +466,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
 	wmb();
 	if (wq->sq.bar2_va) {
 		if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
-			pr_debug("%s: WC wq->sq.pidx = %d\n",
-				 __func__, wq->sq.pidx);
+			pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
 			pio_copy((u64 __iomem *)
 				 (wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
 				 (u64 *)wqe);
 		} else {
-			pr_debug("%s: DB wq->sq.pidx = %d\n",
-				 __func__, wq->sq.pidx);
+			pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
 			writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
 			       wq->sq.bar2_va + SGE_UDB_KDOORBELL);
 		}
@@ -493,14 +491,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
 	wmb();
 	if (wq->rq.bar2_va) {
 		if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
-			pr_debug("%s: WC wq->rq.pidx = %d\n",
-				 __func__, wq->rq.pidx);
+			pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
 			pio_copy((u64 __iomem *)
 				 (wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
 				 (void *)wqe);
 		} else {
-			pr_debug("%s: DB wq->rq.pidx = %d\n",
-				 __func__, wq->rq.pidx);
+			pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
 			writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
 			       wq->rq.bar2_va + SGE_UDB_KDOORBELL);
 		}
@@ -601,8 +597,8 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
 {
 	cq->sw_in_use++;
 	if (cq->sw_in_use == cq->size) {
-		pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
-			 __func__, cq->cqid);
+		pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
+			__func__, cq->cqid);
 		cq->error = 1;
 		BUG_ON(1);
 	}
@@ -673,8 +669,8 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
 {
 	if (cq->sw_in_use == cq->size) {
-		pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
-			 __func__, cq->cqid);
+		pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
+			__func__, cq->cqid);
 		cq->error = 1;
 		BUG_ON(1);
 		return NULL;

+ 116 - 108
drivers/infiniband/hw/hfi1/chip.c

@@ -6520,12 +6520,11 @@ static void _dc_start(struct hfi1_devdata *dd)
 	if (!dd->dc_shutdown)
 		return;
 
-	/* Take the 8051 out of reset */
-	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-	/* Wait until 8051 is ready */
-	if (wait_fm_ready(dd, TIMEOUT_8051_START))
-		dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
-			   __func__);
+	/*
+	 * Take the 8051 out of reset, wait until 8051 is ready, and set host
+	 * version bit.
+	 */
+	release_and_wait_ready_8051_firmware(dd);
 
 	/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
 	write_csr(dd, DCC_CFG_RESET, 0x10);
@@ -8595,30 +8594,23 @@ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
 }
 
 /*
+ * If the 8051 is in reset mode (dd->dc_shutdown == 1), this function
+ * will still continue executing.
+ *
  * Returns:
  *	< 0 = Linux error, not able to get access
  *	> 0 = 8051 command RETURN_CODE
  */
-static int do_8051_command(
-	struct hfi1_devdata *dd,
-	u32 type,
-	u64 in_data,
-	u64 *out_data)
+static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+			    u64 *out_data)
 {
 	u64 reg, completed;
 	int return_code;
 	unsigned long timeout;
 
+	lockdep_assert_held(&dd->dc8051_lock);
 	hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
 
-	mutex_lock(&dd->dc8051_lock);
-
-	/* We can't send any commands to the 8051 if it's in reset */
-	if (dd->dc_shutdown) {
-		return_code = -ENODEV;
-		goto fail;
-	}
-
 	/*
 	 * If an 8051 host command timed out previously, then the 8051 is
 	 * stuck.
@@ -8718,6 +8710,29 @@ static int do_8051_command(
 	 */
 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
 
+fail:
+	return return_code;
+}
+
+/*
+ * Returns:
+ *	< 0 = Linux error, not able to get access
+ *	> 0 = 8051 command RETURN_CODE
+ */
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+			   u64 *out_data)
+{
+	int return_code;
+
+	mutex_lock(&dd->dc8051_lock);
+	/* We can't send any commands to the 8051 if it's in reset */
+	if (dd->dc_shutdown) {
+		return_code = -ENODEV;
+		goto fail;
+	}
+
+	return_code = _do_8051_command(dd, type, in_data, out_data);
+
 fail:
 	mutex_unlock(&dd->dc8051_lock);
 	return return_code;
@@ -8728,16 +8743,17 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
 	return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
 }
 
-int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
-		     u8 lane_id, u32 config_data)
+static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+			     u8 lane_id, u32 config_data)
 {
 	u64 data;
 	int ret;
 
+	lockdep_assert_held(&dd->dc8051_lock);
 	data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
 		| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
 		| (u64)config_data << LOAD_DATA_DATA_SHIFT;
-	ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+	ret = _do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
 	if (ret != HCMD_SUCCESS) {
 		dd_dev_err(dd,
 			   "load 8051 config: field id %d, lane %d, err %d\n",
@@ -8746,6 +8762,18 @@ int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
 	return ret;
 }
 
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+		     u8 lane_id, u32 config_data)
+{
+	int return_code;
+
+	mutex_lock(&dd->dc8051_lock);
+	return_code = _load_8051_config(dd, field_id, lane_id, config_data);
+	mutex_unlock(&dd->dc8051_lock);
+
+	return return_code;
+}
+
 /*
  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
  * set the result, even on error.
@@ -8861,13 +8889,14 @@ int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
 	u32 frame;
 	u32 mask;
 
+	lockdep_assert_held(&dd->dc8051_lock);
 	mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
 	read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
 	/* Clear, then set field */
 	frame &= ~mask;
 	frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
-	return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
-				frame);
+	return _load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+				 frame);
 }
 
 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
@@ -9160,25 +9189,6 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
 	return 0; /* success */
 }
 
-/*
- * Set the SerDes to internal loopback mode.
- * Returns 0 on success, -errno on error.
- */
-static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
-{
-	int ret;
-
-	ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
-	if (ret == HCMD_SUCCESS)
-		return 0;
-	dd_dev_err(dd,
-		   "Set physical link state to SerDes Loopback failed with return %d\n",
-		   ret);
-	if (ret >= 0)
-		ret = -EINVAL;
-	return ret;
-}
-
 /*
  * Do all special steps to set up loopback.
  */
@@ -9204,13 +9214,11 @@ static int init_loopback(struct hfi1_devdata *dd)
 		return 0;
 	}
 
-	/* handle serdes loopback */
-	if (loopback == LOOPBACK_SERDES) {
-		/* internal serdes loopack needs quick linkup on RTL */
-		if (dd->icode == ICODE_RTL_SILICON)
-			quick_linkup = 1;
-		return set_serdes_loopback_mode(dd);
-	}
+	/*
+	 * SerDes loopback init sequence is handled in set_local_link_attributes
+	 */
+	if (loopback == LOOPBACK_SERDES)
+		return 0;
 
 	/* LCB loopback - handled at poll time */
 	if (loopback == LOOPBACK_LCB) {
@@ -9269,7 +9277,7 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
 	u8 tx_polarity_inversion;
 	u8 rx_polarity_inversion;
 	int ret;
-
+	u32 misc_bits = 0;
 	/* reset our fabric serdes to clear any lingering problems */
 	fabric_serdes_reset(dd);
 
@@ -9315,7 +9323,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
 	if (ret != HCMD_SUCCESS)
 		goto set_local_link_attributes_fail;
 
-	ret = write_vc_local_link_width(dd, 0, 0,
+	/*
+	 * SerDes loopback init sequence requires
+	 * setting bit 0 of MISC_CONFIG_BITS
+	 */
+	if (loopback == LOOPBACK_SERDES)
+		misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
+
+	ret = write_vc_local_link_width(dd, misc_bits, 0,
 					opa_to_vc_link_widths(
 						ppd->link_width_enabled));
 	if (ret != HCMD_SUCCESS)
@@ -9967,7 +9982,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
 		val = ppd->phy_error_threshold;
 		break;
 	case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
-		val = dd->link_default;
+		val = HLS_DEFAULT;
 		break;
 
 	case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
@@ -10170,6 +10185,10 @@ static const char * const state_complete_reasons[] = {
 	[0x33] =
 	  "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
 	[0x34] = tx_out_of_policy,
+	[0x35] = "Negotiated link width is mutually exclusive",
+	[0x36] =
+	  "Timed out before receiving verifycap frames in VerifyCap.Exchange",
+	[0x37] = "Unable to resolve secure data exchange",
 };
 
 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
@@ -10569,7 +10588,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
 
 	orig_new_state = state;
 	if (state == HLS_DN_DOWNDEF)
-		state = dd->link_default;
+		state = HLS_DEFAULT;
 
 	/* interpret poll -> poll as a link bounce */
 	poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
@@ -12980,7 +12999,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
 			if (!me->arg) /* => no irq, no affinity */
 				continue;
 			hfi1_put_irq_affinity(dd, me);
-			free_irq(me->irq, me->arg);
+			pci_free_irq(dd->pcidev, i, me->arg);
 		}
 
 		/* clean structures */
@@ -12990,7 +13009,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
 	} else {
 		/* INTx */
 		if (dd->requested_intx_irq) {
-			free_irq(dd->pcidev->irq, dd);
+			pci_free_irq(dd->pcidev, 0, dd);
 			dd->requested_intx_irq = 0;
 		}
 		disable_intx(dd->pcidev);
@@ -13049,10 +13068,8 @@ static int request_intx_irq(struct hfi1_devdata *dd)
 {
 	int ret;
 
-	snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
-		 dd->unit);
-	ret = request_irq(dd->pcidev->irq, general_interrupt,
-			  IRQF_SHARED, dd->intx_name, dd);
+	ret = pci_request_irq(dd->pcidev, 0, general_interrupt, NULL, dd,
+			      DRIVER_NAME "_%d", dd->unit);
 	if (ret)
 		dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
 			   ret);
@@ -13074,7 +13091,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
 	first_sdma = last_general;
 	last_sdma = first_sdma + dd->num_sdma;
 	first_rx = last_sdma;
-	last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
+	last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
 
 	/* VNIC MSIx interrupts get mapped when VNIC contexts are created */
 	dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
@@ -13095,13 +13112,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
 		int idx;
 		struct hfi1_ctxtdata *rcd = NULL;
 		struct sdma_engine *sde = NULL;
+		char name[MAX_NAME_SIZE];
 
-		/* obtain the arguments to request_irq */
+		/* obtain the arguments to pci_request_irq */
 		if (first_general <= i && i < last_general) {
 			idx = i - first_general;
 			handler = general_interrupt;
 			arg = dd;
-			snprintf(me->name, sizeof(me->name),
+			snprintf(name, sizeof(name),
 				 DRIVER_NAME "_%d", dd->unit);
 			err_info = "general";
 			me->type = IRQ_GENERAL;
@@ -13110,14 +13128,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
 			sde = &dd->per_sdma[idx];
 			handler = sdma_interrupt;
 			arg = sde;
-			snprintf(me->name, sizeof(me->name),
+			snprintf(name, sizeof(name),
 				 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
 			err_info = "sdma";
 			remap_sdma_interrupts(dd, idx, i);
 			me->type = IRQ_SDMA;
 		} else if (first_rx <= i && i < last_rx) {
 			idx = i - first_rx;
-			rcd = hfi1_rcd_get_by_index(dd, idx);
+			rcd = hfi1_rcd_get_by_index_safe(dd, idx);
 			if (rcd) {
 				/*
 				 * Set the interrupt register and mask for this
@@ -13129,7 +13147,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
 				handler = receive_context_interrupt;
 				thread = receive_context_thread;
 				arg = rcd;
-				snprintf(me->name, sizeof(me->name),
+				snprintf(name, sizeof(name),
 					 DRIVER_NAME "_%d kctxt%d",
 					 dd->unit, idx);
 				err_info = "receive context";
@@ -13150,18 +13168,10 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
 		if (!arg)
 			continue;
 		/* make sure the name is terminated */
-		me->name[sizeof(me->name) - 1] = 0;
+		name[sizeof(name) - 1] = 0;
 		me->irq = pci_irq_vector(dd->pcidev, i);
-		/*
-		 * On err return me->irq.  Don't need to clear this
-		 * because 'arg' has not been set, and cleanup will
-		 * do the right thing.
-		 */
-		if (me->irq < 0)
-			return me->irq;
-
-		ret = request_threaded_irq(me->irq, handler, thread, 0,
-					   me->name, arg);
+		ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
+				      name);
 		if (ret) {
 			dd_dev_err(dd,
 				   "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
@@ -13169,7 +13179,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
 			return ret;
 		}
 		/*
-		 * assign arg after request_irq call, so it will be
+		 * assign arg after pci_request_irq call, so it will be
 		 * cleaned up
 		 */
 		me->arg = arg;
@@ -13187,7 +13197,7 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
 	int i;
 
 	if (!dd->num_msix_entries) {
-		synchronize_irq(dd->pcidev->irq);
+		synchronize_irq(pci_irq_vector(dd->pcidev, 0));
 		return;
 	}
 
@@ -13208,7 +13218,7 @@ void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
 		return;
 
 	hfi1_put_irq_affinity(dd, me);
-	free_irq(me->irq, me->arg);
+	pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
 
 	me->arg = NULL;
 }
@@ -13231,28 +13241,21 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
 	rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
 	rcd->imask = ((u64)1) <<
 		  ((IS_RCVAVAIL_START + idx) % 64);
-
-	snprintf(me->name, sizeof(me->name),
-		 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
-	me->name[sizeof(me->name) - 1] = 0;
 	me->type = IRQ_RCVCTXT;
 	me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
-	if (me->irq < 0) {
-		dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
-			   idx, me->irq);
-		return;
-	}
 	remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
 
-	ret = request_threaded_irq(me->irq, receive_context_interrupt,
-				   receive_context_thread, 0, me->name, arg);
+	ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
+			      receive_context_interrupt,
+			      receive_context_thread, arg,
+			      DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
 	if (ret) {
 		dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
 			   me->irq, idx, ret);
 		return;
 	}
 	/*
-	 * assign arg after request_irq call, so it will be
+	 * assign arg after pci_request_irq call, so it will be
 	 * cleaned up
 	 */
 	me->arg = arg;
@@ -13261,7 +13264,7 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
 	if (ret) {
 		dd_dev_err(dd,
 			   "unable to pin IRQ %d\n", ret);
-		free_irq(me->irq, me->arg);
+		pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
 	}
 }
 
@@ -13294,8 +13297,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
 	 *		slow source, SDMACleanupDone)
 	 *	N interrupts - one per used SDMA engine
 	 *	M interrupt - one per kernel receive context
+	 *	V interrupt - one for each VNIC context
 	 */
-	total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
+	total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
 
 	/* ask for MSI-X interrupts */
 	request = request_msix(dd, total);
@@ -13356,10 +13360,12 @@ fail:
  *                             in array of contexts
  *	freectxts  - number of free user contexts
  *	num_send_contexts - number of PIO send contexts being used
+ *	num_vnic_contexts - number of contexts reserved for VNIC
  */
 static int set_up_context_variables(struct hfi1_devdata *dd)
 {
 	unsigned long num_kernel_contexts;
+	u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
 	int total_contexts;
 	int ret;
 	unsigned ngroups;
@@ -13393,6 +13399,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
 			   num_kernel_contexts);
 		num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
 	}
+
+	/* Accommodate VNIC contexts if possible */
+	if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
+		dd_dev_err(dd, "No receive contexts available for VNIC\n");
+		num_vnic_contexts = 0;
+	}
+	total_contexts = num_kernel_contexts + num_vnic_contexts;
+
 	/*
 	 * User contexts:
 	 *	- default to 1 user context per real (non-HT) CPU core if
@@ -13402,19 +13416,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
 		num_user_contexts =
 			cpumask_weight(&node_affinity.real_cpu_mask);
 
-	total_contexts = num_kernel_contexts + num_user_contexts;
-
 	/*
 	 * Adjust the counts given a global max.
 	 */
-	if (total_contexts > dd->chip_rcv_contexts) {
+	if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) {
 		dd_dev_err(dd,
 			   "Reducing # user receive contexts to: %d, from %d\n",
-			   (int)(dd->chip_rcv_contexts - num_kernel_contexts),
+			   (int)(dd->chip_rcv_contexts - total_contexts),
 			   (int)num_user_contexts);
-		num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
 		/* recalculate */
-		total_contexts = num_kernel_contexts + num_user_contexts;
+		num_user_contexts = dd->chip_rcv_contexts - total_contexts;
 	}
 
 	/* each user context requires an entry in the RMT */
@@ -13427,25 +13438,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
 			   user_rmt_reduced);
 		/* recalculate */
 		num_user_contexts = user_rmt_reduced;
-		total_contexts = num_kernel_contexts + num_user_contexts;
 	}
 
-	/* Accommodate VNIC contexts */
-	if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
-		total_contexts += HFI1_NUM_VNIC_CTXT;
+	total_contexts += num_user_contexts;
 
 	/* the first N are kernel contexts, the rest are user/vnic contexts */
 	dd->num_rcv_contexts = total_contexts;
 	dd->n_krcv_queues = num_kernel_contexts;
 	dd->first_dyn_alloc_ctxt = num_kernel_contexts;
+	dd->num_vnic_contexts = num_vnic_contexts;
 	dd->num_user_contexts = num_user_contexts;
 	dd->freectxts = num_user_contexts;
 	dd_dev_info(dd,
-		    "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
+		    "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
 		    (int)dd->chip_rcv_contexts,
 		    (int)dd->num_rcv_contexts,
 		    (int)dd->n_krcv_queues,
-		    (int)dd->num_rcv_contexts - dd->n_krcv_queues);
+		    dd->num_vnic_contexts,
+		    dd->num_user_contexts);
 
 	/*
 	 * Receive array allocation:
@@ -14962,8 +14972,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
 		init_vl_arb_caches(ppd);
 	}
 
-	dd->link_default = HLS_DN_POLL;
-
 	/*
 	 * Do remaining PCIe setup and save PCIe values in dd.
 	 * Any error printing is already done by the init code.

+ 4 - 0
drivers/infiniband/hw/hfi1/chip.h

@@ -583,6 +583,9 @@ enum {
 #define LOOPBACK_LCB	2
 #define LOOPBACK_CABLE	3	/* external cable */
 
+/* set up serdes bit in MISC_CONFIG_BITS */
+#define LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT 0
+
 /* read and write hardware registers */
 u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
@@ -710,6 +713,7 @@ void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
 		      u8 *ver_patch);
 int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
 void read_guid(struct hfi1_devdata *dd);
+int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd);
 int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
 			  u8 neigh_reason, u8 rem_reason);

+ 1 - 0
drivers/infiniband/hw/hfi1/common.h

@@ -328,6 +328,7 @@ struct diag_pkt {
 #define SC15_PACKET 0xF
 #define SIZE_OF_CRC 1
 #define SIZE_OF_LT 1
+#define MAX_16B_PADDING 12 /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
 
 #define LIM_MGMT_P_KEY       0x7FFF
 #define FULL_MGMT_P_KEY      0xFFFF

+ 2 - 2
drivers/infiniband/hw/hfi1/debugfs.c

@@ -243,7 +243,7 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
 	spos = v;
 	i = *spos;
 
-	rcd = hfi1_rcd_get_by_index(dd, i);
+	rcd = hfi1_rcd_get_by_index_safe(dd, i);
 	if (!rcd)
 		return SEQ_SKIP;
 
@@ -402,7 +402,7 @@ static int _rcds_seq_show(struct seq_file *s, void *v)
 	loff_t *spos = v;
 	loff_t i = *spos;
 
-	rcd = hfi1_rcd_get_by_index(dd, i);
+	rcd = hfi1_rcd_get_by_index_safe(dd, i);
 	if (rcd)
 		seqfile_dump_rcd(s, rcd);
 	hfi1_rcd_put(rcd);

+ 8 - 2
drivers/infiniband/hw/hfi1/driver.c

@@ -433,6 +433,12 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
 	packet->numpkt = 0;
 }
 
+/* We support only two types - 9B and 16B for now */
+static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
+	[HFI1_PKT_TYPE_9B] = &return_cnp,
+	[HFI1_PKT_TYPE_16B] = &return_cnp_16B
+};
+
 void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
 			       bool do_cnp)
 {
@@ -866,7 +872,7 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
 	 * interrupt handler for all statically allocated kernel contexts.
 	 */
 	if (ctxt >= dd->first_dyn_alloc_ctxt) {
-		rcd = hfi1_rcd_get_by_index(dd, ctxt);
+		rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
 		if (rcd) {
 			rcd->do_interrupt =
 				&handle_receive_interrupt_nodma_rtail;
@@ -895,7 +901,7 @@ static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
 	 * interrupt handler for all statically allocated kernel contexts.
 	 */
 	if (ctxt >= dd->first_dyn_alloc_ctxt) {
-		rcd = hfi1_rcd_get_by_index(dd, ctxt);
+		rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
 		if (rcd) {
 			rcd->do_interrupt =
 				&handle_receive_interrupt_dma_rtail;

+ 280 - 206
drivers/infiniband/hw/hfi1/file_ops.c

@@ -78,16 +78,20 @@ static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
 
 static u64 kvirt_to_phys(void *addr);
-static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
+static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
 static void init_subctxts(struct hfi1_ctxtdata *uctxt,
 			  const struct hfi1_user_info *uinfo);
 static int init_user_ctxt(struct hfi1_filedata *fd,
 			  struct hfi1_ctxtdata *uctxt);
 static void user_init(struct hfi1_ctxtdata *uctxt);
-static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
-			 __u32 len);
-static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
-			 __u32 len);
+static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+			      u32 len);
+static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
+			      u32 len);
+static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
+				u32 len);
 static int setup_base_ctxt(struct hfi1_filedata *fd,
 			   struct hfi1_ctxtdata *uctxt);
 static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
@@ -101,10 +105,11 @@ static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
 static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
 static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
-			  unsigned long events);
-static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
+			  unsigned long arg);
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
+static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
-		       int start_stop);
+		       unsigned long arg);
 static int vma_fault(struct vm_fault *vmf);
 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
 			    unsigned long arg);
@@ -221,13 +226,8 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
 {
 	struct hfi1_filedata *fd = fp->private_data;
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
-	struct hfi1_user_info uinfo;
-	struct hfi1_tid_info tinfo;
 	int ret = 0;
-	unsigned long addr;
 	int uval = 0;
-	unsigned long ul_uval = 0;
-	u16 uval16 = 0;
 
 	hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
 	if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
@@ -237,171 +237,55 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
 
 	switch (cmd) {
 	case HFI1_IOCTL_ASSIGN_CTXT:
-		if (uctxt)
-			return -EINVAL;
-
-		if (copy_from_user(&uinfo,
-				   (struct hfi1_user_info __user *)arg,
-				   sizeof(uinfo)))
-			return -EFAULT;
-
-		ret = assign_ctxt(fd, &uinfo);
+		ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
 		break;
+
 	case HFI1_IOCTL_CTXT_INFO:
-		ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg,
-				    sizeof(struct hfi1_ctxt_info));
+		ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
 		break;
+
 	case HFI1_IOCTL_USER_INFO:
-		ret = get_base_info(fd, (void __user *)(unsigned long)arg,
-				    sizeof(struct hfi1_base_info));
+		ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
 		break;
+
 	case HFI1_IOCTL_CREDIT_UPD:
 		if (uctxt)
 			sc_return_credits(uctxt->sc);
 		break;
 
 	case HFI1_IOCTL_TID_UPDATE:
-		if (copy_from_user(&tinfo,
-				   (struct hfi11_tid_info __user *)arg,
-				   sizeof(tinfo)))
-			return -EFAULT;
-
-		ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
-		if (!ret) {
-			/*
-			 * Copy the number of tidlist entries we used
-			 * and the length of the buffer we registered.
-			 */
-			addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
-			if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
-					 sizeof(tinfo.tidcnt)))
-				return -EFAULT;
-
-			addr = arg + offsetof(struct hfi1_tid_info, length);
-			if (copy_to_user((void __user *)addr, &tinfo.length,
-					 sizeof(tinfo.length)))
-				ret = -EFAULT;
-		}
+		ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
 		break;
 
 	case HFI1_IOCTL_TID_FREE:
-		if (copy_from_user(&tinfo,
-				   (struct hfi11_tid_info __user *)arg,
-				   sizeof(tinfo)))
-			return -EFAULT;
-
-		ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
-		if (ret)
-			break;
-		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
-		if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
-				 sizeof(tinfo.tidcnt)))
-			ret = -EFAULT;
+		ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
 		break;
 
 	case HFI1_IOCTL_TID_INVAL_READ:
-		if (copy_from_user(&tinfo,
-				   (struct hfi11_tid_info __user *)arg,
-				   sizeof(tinfo)))
-			return -EFAULT;
-
-		ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
-		if (ret)
-			break;
-		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
-		if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
-				 sizeof(tinfo.tidcnt)))
-			ret = -EFAULT;
+		ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd));
 		break;
 
 	case HFI1_IOCTL_RECV_CTRL:
-		ret = get_user(uval, (int __user *)arg);
-		if (ret != 0)
-			return -EFAULT;
-		ret = manage_rcvq(uctxt, fd->subctxt, uval);
+		ret = manage_rcvq(uctxt, fd->subctxt, arg);
 		break;
 
 	case HFI1_IOCTL_POLL_TYPE:
-		ret = get_user(uval, (int __user *)arg);
-		if (ret != 0)
+		if (get_user(uval, (int __user *)arg))
 			return -EFAULT;
 		uctxt->poll_type = (typeof(uctxt->poll_type))uval;
 		break;
 
 	case HFI1_IOCTL_ACK_EVENT:
-		ret = get_user(ul_uval, (unsigned long __user *)arg);
-		if (ret != 0)
-			return -EFAULT;
-		ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
+		ret = user_event_ack(uctxt, fd->subctxt, arg);
 		break;
 
 	case HFI1_IOCTL_SET_PKEY:
-		ret = get_user(uval16, (u16 __user *)arg);
-		if (ret != 0)
-			return -EFAULT;
-		if (HFI1_CAP_IS_USET(PKEY_CHECK))
-			ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
-		else
-			return -EPERM;
+		ret = set_ctxt_pkey(uctxt, arg);
 		break;
 
-	case HFI1_IOCTL_CTXT_RESET: {
-		struct send_context *sc;
-		struct hfi1_devdata *dd;
-
-		if (!uctxt || !uctxt->dd || !uctxt->sc)
-			return -EINVAL;
-
-		/*
-		 * There is no protection here. User level has to
-		 * guarantee that no one will be writing to the send
-		 * context while it is being re-initialized.
-		 * If user level breaks that guarantee, it will break
-		 * it's own context and no one else's.
-		 */
-		dd = uctxt->dd;
-		sc = uctxt->sc;
-		/*
-		 * Wait until the interrupt handler has marked the
-		 * context as halted or frozen. Report error if we time
-		 * out.
-		 */
-		wait_event_interruptible_timeout(
-			sc->halt_wait, (sc->flags & SCF_HALTED),
-			msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
-		if (!(sc->flags & SCF_HALTED))
-			return -ENOLCK;
-
-		/*
-		 * If the send context was halted due to a Freeze,
-		 * wait until the device has been "unfrozen" before
-		 * resetting the context.
-		 */
-		if (sc->flags & SCF_FROZEN) {
-			wait_event_interruptible_timeout(
-				dd->event_queue,
-				!(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
-				msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
-			if (dd->flags & HFI1_FROZEN)
-				return -ENOLCK;
-
-			if (dd->flags & HFI1_FORCED_FREEZE)
-				/*
-				 * Don't allow context reset if we are into
-				 * forced freeze
-				 */
-				return -ENODEV;
-
-			sc_disable(sc);
-			ret = sc_enable(sc);
-			hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
-		} else {
-			ret = sc_restart(sc);
-		}
-		if (!ret)
-			sc_return_credits(sc);
+	case HFI1_IOCTL_CTXT_RESET:
+		ret = ctxt_reset(uctxt);
 		break;
-	}
 
 	case HFI1_IOCTL_GET_VERS:
 		uval = HFI1_USER_SWVERSION;
@@ -595,9 +479,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
 		 * Use the page where this context's flags are. User level
 		 * knows where it's own bitmap is within the page.
 		 */
-		memaddr = (unsigned long)(dd->events +
-				  ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
-				   HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
+		memaddr = (unsigned long)
+			(dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
 		memlen = PAGE_SIZE;
 		/*
 		 * v3.7 removes VM_RESERVED but the effect is kept by
@@ -779,8 +662,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
 	 * Clear any left over, unhandled events so the next process that
 	 * gets this context doesn't get confused.
 	 */
-	ev = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
-			   HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
+	ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
 	*ev = 0;
 
 	spin_lock_irqsave(&dd->uctxt_lock, flags);
@@ -891,21 +773,29 @@ static int complete_subctxt(struct hfi1_filedata *fd)
 	return ret;
 }
 
-static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
+static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
 {
 	int ret;
-	unsigned int swmajor, swminor;
+	unsigned int swmajor;
 	struct hfi1_ctxtdata *uctxt = NULL;
+	struct hfi1_user_info uinfo;
+
+	if (fd->uctxt)
+		return -EINVAL;
+
+	if (sizeof(uinfo) != len)
+		return -EINVAL;
 
-	swmajor = uinfo->userversion >> 16;
+	if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
+		return -EFAULT;
+
+	swmajor = uinfo.userversion >> 16;
 	if (swmajor != HFI1_USER_SWMAJOR)
 		return -ENODEV;
 
-	if (uinfo->subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
+	if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
 		return -EINVAL;
 
-	swminor = uinfo->userversion & 0xffff;
-
 	/*
 	 * Acquire the mutex to protect against multiple creations of what
 	 * could be a shared base context.
@@ -915,14 +805,14 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
 	 * Get a sub context if available  (fd->uctxt will be set).
 	 * ret < 0 error, 0 no context, 1 sub-context found
 	 */
-	ret = find_sub_ctxt(fd, uinfo);
+	ret = find_sub_ctxt(fd, &uinfo);
 
 	/*
 	 * Allocate a base context if context sharing is not required or a
 	 * sub context wasn't found.
 	 */
 	if (!ret)
-		ret = allocate_ctxt(fd, fd->dd, uinfo, &uctxt);
+		ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
 
 	mutex_unlock(&hfi1_mutex);
 
@@ -1230,12 +1120,13 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
 	hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
 }
 
-static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
-			 __u32 len)
+static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
 {
 	struct hfi1_ctxt_info cinfo;
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
-	int ret = 0;
+
+	if (sizeof(cinfo) != len)
+		return -EINVAL;
 
 	memset(&cinfo, 0, sizeof(cinfo));
 	cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
@@ -1265,10 +1156,10 @@ static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
 	cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
 
 	trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
-	if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
-		ret = -EFAULT;
+	if (copy_to_user((void __user *)arg, &cinfo, len))
+		return -EFAULT;
 
-	return ret;
+	return 0;
 }
 
 static int init_user_ctxt(struct hfi1_filedata *fd,
@@ -1344,18 +1235,18 @@ done:
 	return ret;
 }
 
-static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
-			 __u32 len)
+static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
 {
 	struct hfi1_base_info binfo;
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
 	struct hfi1_devdata *dd = uctxt->dd;
-	ssize_t sz;
 	unsigned offset;
-	int ret = 0;
 
 	trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
 
+	if (sizeof(binfo) != len)
+		return -EINVAL;
+
 	memset(&binfo, 0, sizeof(binfo));
 	binfo.hw_version = dd->revision;
 	binfo.sw_version = HFI1_KERN_SWVERSION;
@@ -1385,39 +1276,152 @@ static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
 					       fd->subctxt,
 					       uctxt->egrbufs.rcvtids[0].dma);
 	binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
-						 fd->subctxt, 0);
+						  fd->subctxt, 0);
 	/*
 	 * user regs are at
 	 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
 	 */
 	binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
-					    fd->subctxt, 0);
-	offset = offset_in_page((((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
-		    HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
-		  sizeof(*dd->events));
+					     fd->subctxt, 0);
+	offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
+				sizeof(*dd->events));
 	binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
-					      fd->subctxt,
-					      offset);
+					       fd->subctxt,
+					       offset);
 	binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
-					      fd->subctxt,
-					      dd->status);
+					       fd->subctxt,
+					       dd->status);
 	if (HFI1_CAP_IS_USET(DMA_RTAIL))
 		binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
-						       fd->subctxt, 0);
+							fd->subctxt, 0);
 	if (uctxt->subctxt_cnt) {
 		binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
-							uctxt->ctxt,
-							fd->subctxt, 0);
-		binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
 							 uctxt->ctxt,
 							 fd->subctxt, 0);
+		binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
+							  uctxt->ctxt,
+							  fd->subctxt, 0);
 		binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
-							 uctxt->ctxt,
-							 fd->subctxt, 0);
+							  uctxt->ctxt,
+							  fd->subctxt, 0);
 	}
-	sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
-	if (copy_to_user(ubase, &binfo, sz))
+
+	if (copy_to_user((void __user *)arg, &binfo, len))
+		return -EFAULT;
+
+	return 0;
+}
+
+/**
+ * user_exp_rcv_setup - Set up the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * Wrapper to validate ioctl information before doing _rcv_setup.
+ *
+ */
+static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+			      u32 len)
+{
+	int ret;
+	unsigned long addr;
+	struct hfi1_tid_info tinfo;
+
+	if (sizeof(tinfo) != len)
+		return -EINVAL;
+
+	if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+		return -EFAULT;
+
+	ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
+	if (!ret) {
+		/*
+		 * Copy the number of tidlist entries we used
+		 * and the length of the buffer we registered.
+		 */
+		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+		if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+				 sizeof(tinfo.tidcnt)))
+			return -EFAULT;
+
+		addr = arg + offsetof(struct hfi1_tid_info, length);
+		if (copy_to_user((void __user *)addr, &tinfo.length,
+				 sizeof(tinfo.length)))
+			ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+/**
+ * user_exp_rcv_clear - Clear the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * The hfi1_user_exp_rcv_clear() can be called from the error path.  Because
+ * of this, we need to use this wrapper to copy the user space information
+ * before doing the clear.
+ */
+static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
+			      u32 len)
+{
+	int ret;
+	unsigned long addr;
+	struct hfi1_tid_info tinfo;
+
+	if (sizeof(tinfo) != len)
+		return -EINVAL;
+
+	if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+		return -EFAULT;
+
+	ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
+	if (!ret) {
+		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+		if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+				 sizeof(tinfo.tidcnt)))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+/**
+ * user_exp_rcv_invalid - Invalidate the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * Wrapper to validate ioctl information before doing _rcv_invalid.
+ *
+ */
+static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
+				u32 len)
+{
+	int ret;
+	unsigned long addr;
+	struct hfi1_tid_info tinfo;
+
+	if (sizeof(tinfo) != len)
+		return -EINVAL;
+
+	if (!fd->invalid_tids)
+		return -EINVAL;
+
+	if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+		return -EFAULT;
+
+	ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
+	if (ret)
+		return ret;
+
+	addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+	if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+			 sizeof(tinfo.tidcnt)))
 		ret = -EFAULT;
+
 	return ret;
 }
 
@@ -1485,14 +1489,13 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
 	     ctxt++) {
 		uctxt = hfi1_rcd_get_by_index(dd, ctxt);
 		if (uctxt) {
-			unsigned long *evs = dd->events +
-				(uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
-				HFI1_MAX_SHARED_CTXTS;
+			unsigned long *evs;
 			int i;
 			/*
 			 * subctxt_cnt is 0 if not shared, so do base
 			 * separately, first, then remaining subctxt, if any
 			 */
+			evs = dd->events + uctxt_offset(uctxt);
 			set_bit(evtbit, evs);
 			for (i = 1; i < uctxt->subctxt_cnt; i++)
 				set_bit(evtbit, evs + i);
@@ -1514,13 +1517,18 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
  * re-init the software copy of the head register
  */
 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
-		       int start_stop)
+		       unsigned long arg)
 {
 	struct hfi1_devdata *dd = uctxt->dd;
 	unsigned int rcvctrl_op;
+	int start_stop;
 
 	if (subctxt)
-		goto bail;
+		return 0;
+
+	if (get_user(start_stop, (int __user *)arg))
+		return -EFAULT;
+
 	/* atomically clear receive enable ctxt. */
 	if (start_stop) {
 		/*
@@ -1539,7 +1547,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
 	}
 	hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
 	/* always; new head should be equal to new tail; see above */
-bail:
+
 	return 0;
 }
 
@@ -1549,17 +1557,20 @@ bail:
  * set, if desired, and checks again in future.
  */
 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
-			  unsigned long events)
+			  unsigned long arg)
 {
 	int i;
 	struct hfi1_devdata *dd = uctxt->dd;
 	unsigned long *evs;
+	unsigned long events;
 
 	if (!dd->events)
 		return 0;
 
-	evs = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
-			    HFI1_MAX_SHARED_CTXTS) + subctxt;
+	if (get_user(events, (unsigned long __user *)arg))
+		return -EFAULT;
+
+	evs = dd->events + uctxt_offset(uctxt) + subctxt;
 
 	for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
 		if (!test_bit(i, &events))
@@ -1569,26 +1580,89 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
 	return 0;
 }
 
-static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg)
 {
-	int ret = -ENOENT, i, intable = 0;
+	int i;
 	struct hfi1_pportdata *ppd = uctxt->ppd;
 	struct hfi1_devdata *dd = uctxt->dd;
+	u16 pkey;
 
-	if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
-		ret = -EINVAL;
-		goto done;
-	}
+	if (!HFI1_CAP_IS_USET(PKEY_CHECK))
+		return -EPERM;
+
+	if (get_user(pkey, (u16 __user *)arg))
+		return -EFAULT;
+
+	if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
+		return -EINVAL;
 
 	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
-		if (pkey == ppd->pkeys[i]) {
-			intable = 1;
-			break;
-		}
+		if (pkey == ppd->pkeys[i])
+			return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
+
+	return -ENOENT;
+}
+
+/**
+ * ctxt_reset - Reset the user context
+ * @uctxt: valid user context
+ */
+static int ctxt_reset(struct hfi1_ctxtdata *uctxt)
+{
+	struct send_context *sc;
+	struct hfi1_devdata *dd;
+	int ret = 0;
+
+	if (!uctxt || !uctxt->dd || !uctxt->sc)
+		return -EINVAL;
+
+	/*
+	 * There is no protection here. User level has to guarantee that
+	 * no one will be writing to the send context while it is being
+	 * re-initialized.  If user level breaks that guarantee, it will
+	 * break it's own context and no one else's.
+	 */
+	dd = uctxt->dd;
+	sc = uctxt->sc;
+
+	/*
+	 * Wait until the interrupt handler has marked the context as
+	 * halted or frozen. Report error if we time out.
+	 */
+	wait_event_interruptible_timeout(
+		sc->halt_wait, (sc->flags & SCF_HALTED),
+		msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+	if (!(sc->flags & SCF_HALTED))
+		return -ENOLCK;
+
+	/*
+	 * If the send context was halted due to a Freeze, wait until the
+	 * device has been "unfrozen" before resetting the context.
+	 */
+	if (sc->flags & SCF_FROZEN) {
+		wait_event_interruptible_timeout(
+			dd->event_queue,
+			!(READ_ONCE(dd->flags) & HFI1_FROZEN),
+			msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+		if (dd->flags & HFI1_FROZEN)
+			return -ENOLCK;
+
+		if (dd->flags & HFI1_FORCED_FREEZE)
+			/*
+			 * Don't allow context reset if we are into
+			 * forced freeze
+			 */
+			return -ENODEV;
+
+		sc_disable(sc);
+		ret = sc_enable(sc);
+		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
+	} else {
+		ret = sc_restart(sc);
+	}
+	if (!ret)
+		sc_return_credits(sc);
 
-	if (intable)
-		ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
-done:
 	return ret;
 }
 

+ 92 - 21
drivers/infiniband/hw/hfi1/firmware.c

@@ -70,6 +70,11 @@
 #define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
 #define HOST_INTERFACE_VERSION 1
 
+MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
+MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
+MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
+MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
+
 static uint fw_8051_load = 1;
 static uint fw_fabric_serdes_load = 1;
 static uint fw_pcie_serdes_load = 1;
@@ -113,6 +118,12 @@ struct css_header {
 #define MU_SIZE		8
 #define EXPONENT_SIZE	4
 
+/* size of platform configuration partition */
+#define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
+
+/* size of file of plaform configuration encoded in format version 4 */
+#define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
+
 /* the file itself */
 struct firmware_file {
 	struct css_header css_header;
@@ -964,6 +975,46 @@ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
 	}
 }
 
+/*
+ * Clear all reset bits, releasing the 8051.
+ * Wait for firmware to be ready to accept host requests.
+ * Then, set host version bit.
+ *
+ * This function executes even if the 8051 is in reset mode when
+ * dd->dc_shutdown == 1.
+ *
+ * Expects dd->dc8051_lock to be held.
+ */
+int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
+{
+	int ret;
+
+	lockdep_assert_held(&dd->dc8051_lock);
+	/* clear all reset bits, releasing the 8051 */
+	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
+	/*
+	 * Wait for firmware to be ready to accept host
+	 * requests.
+	 */
+	ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+	if (ret) {
+		dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
+			   get_firmware_state(dd));
+		return ret;
+	}
+
+	ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+	if (ret != HCMD_SUCCESS) {
+		dd_dev_err(dd,
+			   "Failed to set host interface version, return 0x%x\n",
+			   ret);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 /*
  * Load the 8051 firmware.
  */
@@ -1029,31 +1080,22 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
 	if (ret)
 		return ret;
 
-	/* clear all reset bits, releasing the 8051 */
-	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-
 	/*
+	 * Clear all reset bits, releasing the 8051.
 	 * DC reset step 5. Wait for firmware to be ready to accept host
 	 * requests.
+	 * Then, set host version bit.
 	 */
-	ret = wait_fm_ready(dd, TIMEOUT_8051_START);
-	if (ret) { /* timed out */
-		dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
-			   get_firmware_state(dd));
-		return -ETIMEDOUT;
-	}
+	mutex_lock(&dd->dc8051_lock);
+	ret = release_and_wait_ready_8051_firmware(dd);
+	mutex_unlock(&dd->dc8051_lock);
+	if (ret)
+		return ret;
 
 	read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
 	dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
 		    (int)ver_major, (int)ver_minor, (int)ver_patch);
 	dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
-	ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
-	if (ret != HCMD_SUCCESS) {
-		dd_dev_err(dd,
-			   "Failed to set host interface version, return 0x%x\n",
-			   ret);
-		return -EIO;
-	}
 
 	return 0;
 }
@@ -1387,7 +1429,14 @@ int acquire_hw_mutex(struct hfi1_devdata *dd)
 	unsigned long timeout;
 	int try = 0;
 	u8 mask = 1 << dd->hfi1_id;
-	u8 user;
+	u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+
+	if (user == mask) {
+		dd_dev_info(dd,
+			    "Hardware mutex already acquired, mutex mask %u\n",
+			    (u32)mask);
+		return 0;
+	}
 
 retry:
 	timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
@@ -1418,7 +1467,15 @@ retry:
 
 void release_hw_mutex(struct hfi1_devdata *dd)
 {
-	write_csr(dd, ASIC_CFG_MUTEX, 0);
+	u8 mask = 1 << dd->hfi1_id;
+	u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+
+	if (user != mask)
+		dd_dev_warn(dd,
+			    "Unable to release hardware mutex, mutex mask %u, my mask %u\n",
+			    (u32)user, (u32)mask);
+	else
+		write_csr(dd, ASIC_CFG_MUTEX, 0);
 }
 
 /* return the given resource bit(s) as a mask for the given HFI */
@@ -1733,7 +1790,7 @@ static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
 	ver_start /= 8;
 	meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
 
-	if (meta_ver < 5) {
+	if (meta_ver < 4) {
 		dd_dev_info(
 			dd, "%s:Please update platform config\n", __func__);
 		return -EINVAL;
@@ -1774,7 +1831,20 @@ int parse_platform_config(struct hfi1_devdata *dd)
 
 	/* Field is file size in DWORDs */
 	file_length = (*ptr) * 4;
-	ptr++;
+
+	/*
+	 * Length can't be larger than partition size. Assume platform
+	 * config format version 4 is being used. Interpret the file size
+	 * field as header instead by not moving the pointer.
+	 */
+	if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
+		dd_dev_info(dd,
+			    "%s:File length out of bounds, using alternative format\n",
+			    __func__);
+		file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
+	} else {
+		ptr++;
+	}
 
 	if (file_length > dd->platform_config.size) {
 		dd_dev_info(dd, "%s:File claims to be larger than read size\n",
@@ -1789,7 +1859,8 @@ int parse_platform_config(struct hfi1_devdata *dd)
 
 	/*
 	 * In both cases where we proceed, using the self-reported file length
-	 * is the safer option
+	 * is the safer option. In case of old format a predefined value is
+	 * being used.
 	 */
 	while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
 		header1 = *ptr;

+ 14 - 12
drivers/infiniband/hw/hfi1/hfi.h

@@ -390,6 +390,7 @@ struct hfi1_packet {
 /*
  * OPA 16B L2/L4 Encodings
  */
+#define OPA_16B_L4_9B		0x00
 #define OPA_16B_L2_TYPE		0x02
 #define OPA_16B_L4_IB_LOCAL	0x09
 #define OPA_16B_L4_IB_GLOBAL	0x0A
@@ -535,6 +536,8 @@ struct rvt_sge_state;
 #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
 #define HLS_DOWN ~(HLS_UP)
 
+#define HLS_DEFAULT HLS_DN_POLL
+
 /* use this MTU size if none other is given */
 #define HFI1_DEFAULT_ACTIVE_MTU 10240
 /* use this MTU size as the default maximum */
@@ -616,7 +619,6 @@ struct hfi1_msix_entry {
 	enum irq_type type;
 	int irq;
 	void *arg;
-	char name[MAX_NAME_SIZE];
 	cpumask_t mask;
 	struct irq_affinity_notify notify;
 };
@@ -1047,6 +1049,8 @@ struct hfi1_devdata {
 	u64 z_send_schedule;
 
 	u64 __percpu *send_schedule;
+	/* number of reserved contexts for VNIC usage */
+	u16 num_vnic_contexts;
 	/* number of receive contexts in use by the driver */
 	u32 num_rcv_contexts;
 	/* number of pio send contexts in use by the driver */
@@ -1109,8 +1113,7 @@ struct hfi1_devdata {
 	u16 rcvegrbufsize_shift;
 	/* both sides of the PCIe link are gen3 capable */
 	u8 link_gen3_capable;
-	/* default link down value (poll/sleep) */
-	u8 link_default;
+	u8 dc_shutdown;
 	/* localbus width (1, 2,4,8,16,32) from config space  */
 	u32 lbus_width;
 	/* localbus speed in MHz */
@@ -1183,7 +1186,6 @@ struct hfi1_devdata {
 
 	/* INTx information */
 	u32 requested_intx_irq;		/* did we request one? */
-	char intx_name[MAX_NAME_SIZE];	/* INTx name */
 
 	/* general interrupt: mask of handled interrupts */
 	u64 gi_mask[CCE_NUM_INT_CSRS];
@@ -1295,7 +1297,6 @@ struct hfi1_devdata {
 	u8 oui1;
 	u8 oui2;
 	u8 oui3;
-	u8 dc_shutdown;
 
 	/* Timer and counter used to detect RcvBufOvflCnt changes */
 	struct timer_list rcverr_timer;
@@ -1373,8 +1374,12 @@ struct hfi1_filedata {
 extern struct list_head hfi1_dev_list;
 extern spinlock_t hfi1_devs_lock;
 struct hfi1_devdata *hfi1_lookup(int unit);
-extern u32 hfi1_cpulist_count;
-extern unsigned long *hfi1_cpulist;
+
+static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
+{
+	return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
+		HFI1_MAX_SHARED_CTXTS;
+}
 
 int hfi1_init(struct hfi1_devdata *dd, int reinit);
 int hfi1_count_active_units(void);
@@ -1396,6 +1401,8 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
 void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
+						 u16 ctxt);
 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
@@ -1531,11 +1538,6 @@ typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
 				u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
 				u8 sc5, const struct ib_grh *old_grh);
 
-/* We support only two types - 9B and 16B for now */
-static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
-	[HFI1_PKT_TYPE_9B] = &return_cnp,
-	[HFI1_PKT_TYPE_16B] = &return_cnp_16B
-};
 #define PKEY_CHECK_INVALID -1
 int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
 		      u8 sc5, int8_t s_pkey_index);

+ 21 - 22
drivers/infiniband/hw/hfi1/init.c

@@ -123,8 +123,6 @@ MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user
 static inline u64 encode_rcv_header_entry_size(u16 size);
 
 static struct idr hfi1_unit_table;
-u32 hfi1_cpulist_count;
-unsigned long *hfi1_cpulist;
 
 static int hfi1_create_kctxt(struct hfi1_devdata *dd,
 			     struct hfi1_pportdata *ppd)
@@ -285,6 +283,27 @@ static int allocate_rcd_index(struct hfi1_devdata *dd,
 	return 0;
 }
 
+/**
+ * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
+ * array
+ * @dd: pointer to a valid devdata structure
+ * @ctxt: the index of an possilbe rcd
+ *
+ * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
+ * ctxt index is valid.
+ *
+ * The caller is responsible for making the _put().
+ *
+ */
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
+						 u16 ctxt)
+{
+	if (ctxt < dd->num_rcv_contexts)
+		return hfi1_rcd_get_by_index(dd, ctxt);
+
+	return NULL;
+}
+
 /**
  * hfi1_rcd_get_by_index
  * @dd: pointer to a valid devdata structure
@@ -1272,39 +1291,21 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
 	dd->int_counter = alloc_percpu(u64);
 	if (!dd->int_counter) {
 		ret = -ENOMEM;
-		hfi1_early_err(&pdev->dev,
-			       "Could not allocate per-cpu int_counter\n");
 		goto bail;
 	}
 
 	dd->rcv_limit = alloc_percpu(u64);
 	if (!dd->rcv_limit) {
 		ret = -ENOMEM;
-		hfi1_early_err(&pdev->dev,
-			       "Could not allocate per-cpu rcv_limit\n");
 		goto bail;
 	}
 
 	dd->send_schedule = alloc_percpu(u64);
 	if (!dd->send_schedule) {
 		ret = -ENOMEM;
-		hfi1_early_err(&pdev->dev,
-			       "Could not allocate per-cpu int_counter\n");
 		goto bail;
 	}
 
-	if (!hfi1_cpulist_count) {
-		u32 count = num_online_cpus();
-
-		hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
-				       GFP_KERNEL);
-		if (hfi1_cpulist)
-			hfi1_cpulist_count = count;
-		else
-			hfi1_early_err(
-			&pdev->dev,
-			"Could not alloc cpulist info, cpu affinity might be wrong\n");
-	}
 	kobject_init(&dd->kobj, &hfi1_devdata_type);
 	return dd;
 
@@ -1477,8 +1478,6 @@ static void __exit hfi1_mod_cleanup(void)
 	node_affinity_destroy();
 	hfi1_wss_exit();
 	hfi1_dbg_exit();
-	hfi1_cpulist_count = 0;
-	kfree(hfi1_cpulist);
 
 	idr_destroy(&hfi1_unit_table);
 	dispose_firmware();	/* asymmetric with obtain_firmware() */

+ 6 - 4
drivers/infiniband/hw/hfi1/mad.c

@@ -711,6 +711,7 @@ static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
 			/* Bad mkey not a violation below level 2 */
 			if (ibp->rvp.mkeyprot < 2)
 				break;
+			/* fall through */
 		case IB_MGMT_METHOD_SET:
 		case IB_MGMT_METHOD_TRAP_REPRESS:
 			if (ibp->rvp.mkey_violations != 0xFFFF)
@@ -2888,7 +2889,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
 	struct _vls_dctrs *vlinfo;
 	size_t response_data_size;
 	u32 num_ports;
-	u8 num_pslm;
 	u8 lq, num_vls;
 	u8 res_lli, res_ler;
 	u64 port_mask;
@@ -2898,7 +2898,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
 	int vfi;
 
 	num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
-	num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
 	num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
 	vl_select_mask = be32_to_cpu(req->vl_select_mask);
 	res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
@@ -4293,7 +4292,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
 			       const struct ib_wc *in_wc)
 {
 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-	u16 slid = ib_lid_cpu16(in_wc->slid);
 	u16 pkey;
 
 	if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
@@ -4320,7 +4318,11 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
 	 */
 	if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
 		return 0;
-	ingress_pkey_table_fail(ppd, pkey, slid);
+	/*
+	 * On OPA devices it is okay to lose the upper 16 bits of LID as this
+	 * information is obtained elsewhere. Mask off the upper 16 bits.
+	 */
+	ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
 	return 1;
 }
 

+ 1 - 2
drivers/infiniband/hw/hfi1/rc.c

@@ -276,7 +276,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 	if (IS_ERR(ps->s_txreq))
 		goto bail_no_tx;
 
-	ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
 	if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
 		/* header size in 32-bit words LRH+BTH = (8+12)/4. */
 		hwords = 5;
@@ -2175,7 +2174,7 @@ send_middle:
 			goto no_immediate_data;
 		if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
 			goto send_last_inv;
-		/* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
+		/* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */
 	case OP(SEND_LAST_WITH_IMMEDIATE):
 send_last_imm:
 		wc.ex.imm_data = ohdr->u.imm_data;

+ 0 - 9
drivers/infiniband/hw/hfi1/ruc.c

@@ -825,11 +825,9 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
 {
 	struct hfi1_qp_priv *priv = qp->priv;
 	struct hfi1_ibport *ibp = ps->ibp;
-	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
 	u32 bth1 = 0;
 	u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
 	u16 lrh0 = HFI1_LRH_BTH;
-	u16 slid;
 	u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
 	u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
 					 extra_bytes) >> 2);
@@ -866,13 +864,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
 		bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
 	}
 	hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
-
-	if (!ppd->lid)
-		slid = be16_to_cpu(IB_LID_PERMISSIVE);
-	else
-		slid = ppd->lid |
-			(rdma_ah_get_path_bits(&qp->remote_ah_attr) &
-			((1 << ppd->lmc) - 1));
 	hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
 			 lrh0,
 			 qp->s_hdrwords + nwords,

+ 9 - 10
drivers/infiniband/hw/hfi1/sdma.c

@@ -1392,6 +1392,13 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
 		return ret;
 
 	idle_cnt = ns_to_cclock(dd, idle_cnt);
+	if (idle_cnt)
+		dd->default_desc1 =
+			SDMA_DESC1_HEAD_TO_HOST_FLAG;
+	else
+		dd->default_desc1 =
+			SDMA_DESC1_INT_REQ_FLAG;
+
 	if (!sdma_desct_intr)
 		sdma_desct_intr = SDMA_DESC_INTR;
 
@@ -1436,13 +1443,6 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
 		sde->tail_csr =
 			get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
 
-		if (idle_cnt)
-			dd->default_desc1 =
-				SDMA_DESC1_HEAD_TO_HOST_FLAG;
-		else
-			dd->default_desc1 =
-				SDMA_DESC1_INT_REQ_FLAG;
-
 		tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
 			     (unsigned long)sde);
 
@@ -2144,7 +2144,6 @@ void sdma_dumpstate(struct sdma_engine *sde)
 
 static void dump_sdma_state(struct sdma_engine *sde)
 {
-	struct hw_sdma_desc *descq;
 	struct hw_sdma_desc *descqp;
 	u64 desc[2];
 	u64 addr;
@@ -2155,7 +2154,6 @@ static void dump_sdma_state(struct sdma_engine *sde)
 	head = sde->descq_head & sde->sdma_mask;
 	tail = sde->descq_tail & sde->sdma_mask;
 	cnt = sdma_descq_freecnt(sde);
-	descq = sde->descq;
 
 	dd_dev_err(sde->dd,
 		   "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
@@ -2593,7 +2591,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
 			 * 7220, e.g.
 			 */
 			ss->go_s99_running = 1;
-			/* fall through and start dma engine */
+			/* fall through -- and start dma engine */
 		case sdma_event_e10_go_hw_start:
 			/* This reference means the state machine is started */
 			sdma_get(&sde->state);
@@ -3016,6 +3014,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
 		case sdma_event_e60_hw_halted:
 			need_progress = 1;
 			sdma_err_progress_check_schedule(sde);
+			/* fall through */
 		case sdma_event_e90_sw_halted:
 			/*
 			* SW initiated halt does not perform engines

+ 1 - 1
drivers/infiniband/hw/hfi1/sysfs.c

@@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device,
 	 * give a more accurate picture of total contexts available.
 	 */
 	return scnprintf(buf, PAGE_SIZE, "%u\n",
-			 min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt,
+			 min(dd->num_user_contexts,
 			     (u32)dd->sc_sizes[SC_USER].count));
 }
 

+ 12 - 15
drivers/infiniband/hw/hfi1/trace.c

@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -91,12 +91,17 @@ u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
 		return __get_16b_hdr_len(&opa_hdr->opah);
 }
 
-const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
+const char *hfi1_trace_get_packet_l4_str(u8 l4)
 {
-	if (packet->etype != RHF_RCV_TYPE_BYPASS)
-		return "IB";
+	if (l4)
+		return "16B";
+	else
+		return "9B";
+}
 
-	switch (hfi1_16B_get_l2(packet->hdr)) {
+const char *hfi1_trace_get_packet_l2_str(u8 l2)
+{
+	switch (l2) {
 	case 0:
 		return "0";
 	case 1:
@@ -109,14 +114,6 @@ const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
 	return "";
 }
 
-const char *hfi1_trace_get_packet_type_str(u8 l4)
-{
-	if (l4)
-		return "16B";
-	else
-		return "9B";
-}
-
 #define IMM_PRN  "imm:%d"
 #define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
 #define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
@@ -154,7 +151,7 @@ void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
 	*opcode = ib_bth_get_opcode(ohdr);
 	*tver = ib_bth_get_tver(ohdr);
 	*pkey = ib_bth_get_pkey(ohdr);
-	*psn = ib_bth_get_psn(ohdr);
+	*psn = mask_psn(ib_bth_get_psn(ohdr));
 	*qpn = ib_bth_get_qpn(ohdr);
 }
 
@@ -169,7 +166,7 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
 	*pad = ib_bth_get_pad(ohdr);
 	*se = ib_bth_get_se(ohdr);
 	*tver = ib_bth_get_tver(ohdr);
-	*psn = ib_bth_get_psn(ohdr);
+	*psn = mask_psn(ib_bth_get_psn(ohdr));
 	*qpn = ib_bth_get_qpn(ohdr);
 }
 

+ 10 - 0
drivers/infiniband/hw/hfi1/trace.h

@@ -44,6 +44,16 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  */
+
+#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+#define show_packettype(etype)                  \
+__print_symbolic(etype,                         \
+	packettype_name(EXPECTED),              \
+	packettype_name(EAGER),                 \
+	packettype_name(IB),                    \
+	packettype_name(ERROR),                 \
+	packettype_name(BYPASS))
+
 #include "trace_dbg.h"
 #include "trace_misc.h"
 #include "trace_ctxts.h"

+ 26 - 23
drivers/infiniband/hw/hfi1/trace_ibhdrs.h

@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -99,8 +99,7 @@ u8 ibhdr_exhdr_len(struct ib_header *hdr);
 const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
 u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
 u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
-const char *hfi1_trace_get_packet_type_str(u8 l4);
-const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet);
+const char *hfi1_trace_get_packet_l4_str(u8 l4);
 void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
 			     u8 *ack, u8 *becn, u8 *fecn, u8 *mig,
 			     u8 *se, u8 *pad, u8 *opcode, u8 *tver,
@@ -129,6 +128,8 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
 			       u8 se, u8 pad, u8 opcode, const char *opname,
 			       u8 tver, u16 pkey, u32 psn, u32 qpn);
 
+const char *hfi1_trace_get_packet_l2_str(u8 l2);
+
 #define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
 
 #define lrh_name(lrh) { HFI1_##lrh, #lrh }
@@ -136,8 +137,6 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
 __print_symbolic(lrh,                    \
 	lrh_name(LRH_BTH),               \
 	lrh_name(LRH_GRH))
-#define PKT_ENTRY(pkt)	__string(ptype,  hfi1_trace_get_packet_str(packet))
-#define PKT_ASSIGN(pkt) __assign_str(ptype, hfi1_trace_get_packet_str(packet))
 
 DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
 		    TP_PROTO(struct hfi1_devdata *dd,
@@ -146,12 +145,12 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
 		    TP_ARGS(dd, packet, sc5),
 		    TP_STRUCT__entry(
 			DD_DEV_ENTRY(dd)
-			PKT_ENTRY(packet)
-			__field(bool, bypass)
+			__field(u8, etype)
 			__field(u8, ack)
 			__field(u8, age)
 			__field(u8, becn)
 			__field(u8, fecn)
+			__field(u8, l2)
 			__field(u8, l4)
 			__field(u8, lnh)
 			__field(u8, lver)
@@ -176,10 +175,10 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
 			),
 		    TP_fast_assign(
 			DD_DEV_ASSIGN(dd);
-			PKT_ASSIGN(packet);
 
-			if (packet->etype == RHF_RCV_TYPE_BYPASS) {
-				__entry->bypass = true;
+			__entry->etype = packet->etype;
+			__entry->l2 = hfi1_16B_get_l2(packet->hdr);
+			if (__entry->etype == RHF_RCV_TYPE_BYPASS) {
 				hfi1_trace_parse_16b_hdr(packet->hdr,
 							 &__entry->age,
 							 &__entry->becn,
@@ -203,7 +202,6 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
 							   &__entry->psn,
 							   &__entry->qpn);
 			} else {
-				__entry->bypass = false;
 				hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
 							&__entry->lnh,
 							&__entry->lver,
@@ -233,9 +231,13 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
 			 ),
 		    TP_printk("[%s] (%s) %s %s hlen:%d %s",
 			      __get_str(dev),
-			      __get_str(ptype),
+			      __entry->etype != RHF_RCV_TYPE_BYPASS ?
+					show_packettype(__entry->etype) :
+					hfi1_trace_get_packet_l2_str(
+						__entry->l2),
 			      hfi1_trace_fmt_lrh(p,
-						 __entry->bypass,
+						 __entry->etype ==
+							RHF_RCV_TYPE_BYPASS,
 						 __entry->age,
 						 __entry->becn,
 						 __entry->fecn,
@@ -252,7 +254,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
 						 __entry->dlid,
 						 __entry->slid),
 			      hfi1_trace_fmt_bth(p,
-						 __entry->bypass,
+						 __entry->etype ==
+							RHF_RCV_TYPE_BYPASS,
 						 __entry->ack,
 						 __entry->becn,
 						 __entry->fecn,
@@ -284,7 +287,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
 		    TP_ARGS(dd, opah, sc5),
 		    TP_STRUCT__entry(
 			DD_DEV_ENTRY(dd)
-			__field(bool, bypass)
+			__field(u8, hdr_type)
 			__field(u8, ack)
 			__field(u8, age)
 			__field(u8, becn)
@@ -316,8 +319,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
 
 			DD_DEV_ASSIGN(dd);
 
-			if (opah->hdr_type)  {
-				__entry->bypass = true;
+			__entry->hdr_type = opah->hdr_type;
+			if (__entry->hdr_type)  {
 				hfi1_trace_parse_16b_hdr(&opah->opah,
 							 &__entry->age,
 							 &__entry->becn,
@@ -331,7 +334,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
 							 &__entry->dlid,
 							 &__entry->slid);
 
-				if (entry->l4 == OPA_16B_L4_IB_LOCAL)
+				if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
 					ohdr = &opah->opah.u.oth;
 				else
 					ohdr = &opah->opah.u.l.oth;
@@ -345,7 +348,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
 							 &__entry->psn,
 							 &__entry->qpn);
 			} else {
-				__entry->bypass = false;
+				__entry->l4 = OPA_16B_L4_9B;
 				hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
 							&__entry->lnh,
 							&__entry->lver,
@@ -354,7 +357,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
 							&__entry->len,
 							&__entry->dlid,
 							&__entry->slid);
-				if (entry->lnh == HFI1_LRH_BTH)
+				if (__entry->lnh == HFI1_LRH_BTH)
 					ohdr = &opah->ibh.u.oth;
 				else
 					ohdr = &opah->ibh.u.l.oth;
@@ -378,9 +381,9 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
 		    ),
 		    TP_printk("[%s] (%s) %s %s hlen:%d %s",
 			      __get_str(dev),
-			      hfi1_trace_get_packet_type_str(__entry->l4),
+			      hfi1_trace_get_packet_l4_str(__entry->l4),
 			      hfi1_trace_fmt_lrh(p,
-						 __entry->bypass,
+						 !!__entry->hdr_type,
 						 __entry->age,
 						 __entry->becn,
 						 __entry->fecn,
@@ -397,7 +400,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
 						 __entry->dlid,
 						 __entry->slid),
 			      hfi1_trace_fmt_bth(p,
-						 __entry->bypass,
+						 !!__entry->hdr_type,
 						 __entry->ack,
 						 __entry->becn,
 						 __entry->fecn,

+ 1 - 10
drivers/infiniband/hw/hfi1/trace_rx.h

@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -62,15 +62,6 @@ __print_symbolic(type,                       \
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM hfi1_rx
 
-#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
-#define show_packettype(etype)                  \
-__print_symbolic(etype,                         \
-	packettype_name(EXPECTED),              \
-	packettype_name(EAGER),                 \
-	packettype_name(IB),                    \
-	packettype_name(ERROR),                 \
-	packettype_name(BYPASS))
-
 TRACE_EVENT(hfi1_rcvhdr,
 	    TP_PROTO(struct hfi1_devdata *dd,
 		     u32 ctxt,

+ 0 - 1
drivers/infiniband/hw/hfi1/uc.c

@@ -93,7 +93,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 		goto done_free_tx;
 	}
 
-	ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
 	if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
 		/* header size in 32-bit words LRH+BTH = (8+12)/4. */
 		hwords = 5;

+ 0 - 2
drivers/infiniband/hw/hfi1/ud.c

@@ -854,7 +854,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
 	int mgmt_pkey_idx = -1;
 	struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-	struct ib_header *hdr = packet->hdr;
 	void *data = packet->payload;
 	u32 tlen = packet->tlen;
 	struct rvt_qp *qp = packet->qp;
@@ -880,7 +879,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
 		dlid_is_permissive = (dlid == permissive_lid);
 		slid_is_permissive = (slid == permissive_lid);
 	} else {
-		hdr = packet->hdr;
 		pkey = ib_bth_get_pkey(ohdr);
 		dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
 		slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));

+ 2 - 7
drivers/infiniband/hw/hfi1/user_exp_rcv.c

@@ -542,14 +542,10 @@ int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
 {
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
 	unsigned long *ev = uctxt->dd->events +
-		(((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
-		  HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
+		(uctxt_offset(uctxt) + fd->subctxt);
 	u32 *array;
 	int ret = 0;
 
-	if (!fd->invalid_tids)
-		return -EINVAL;
-
 	/*
 	 * copy_to_user() can sleep, which will leave the invalid_lock
 	 * locked and cause the MMU notifier to be blocked on the lock
@@ -942,8 +938,7 @@ static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
 			 * process in question.
 			 */
 			ev = uctxt->dd->events +
-			  (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
-			    HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
+				(uctxt_offset(uctxt) + fdata->subctxt);
 			set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
 		}
 		fdata->invalid_tid_idx++;

+ 40 - 22
drivers/infiniband/hw/hfi1/user_sdma.c

@@ -956,10 +956,8 @@ static int pin_sdma_pages(struct user_sdma_request *req,
 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
 
 	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
-	if (!pages) {
-		SDMA_DBG(req, "Failed page array alloc");
+	if (!pages)
 		return -ENOMEM;
-	}
 	memcpy(pages, node->pages, node->npages * sizeof(*pages));
 
 	npages -= node->npages;
@@ -1254,20 +1252,25 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
 				struct user_sdma_txreq *tx, u32 datalen)
 {
 	u32 ahg[AHG_KDETH_ARRAY_SIZE];
-	int diff = 0;
+	int idx = 0;
 	u8 omfactor; /* KDETH.OM */
 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
 	struct hfi1_pkt_header *hdr = &req->hdr;
 	u16 pbclen = le16_to_cpu(hdr->pbc[0]);
 	u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
+	size_t array_size = ARRAY_SIZE(ahg);
 
 	if (PBC2LRH(pbclen) != lrhlen) {
 		/* PBC.PbcLengthDWs */
-		AHG_HEADER_SET(ahg, diff, 0, 0, 12,
-			       cpu_to_le16(LRH2PBC(lrhlen)));
+		idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12,
+				     (__force u16)cpu_to_le16(LRH2PBC(lrhlen)));
+		if (idx < 0)
+			return idx;
 		/* LRH.PktLen (we need the full 16 bits due to byte swap) */
-		AHG_HEADER_SET(ahg, diff, 3, 0, 16,
-			       cpu_to_be16(lrhlen >> 2));
+		idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16,
+				     (__force u16)cpu_to_be16(lrhlen >> 2));
+		if (idx < 0)
+			return idx;
 	}
 
 	/*
@@ -1278,12 +1281,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
 		(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
 	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
 		val32 |= 1UL << 31;
-	AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
-	AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
+	idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16,
+			     (__force u16)cpu_to_be16(val32 >> 16));
+	if (idx < 0)
+		return idx;
+	idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16,
+			     (__force u16)cpu_to_be16(val32 & 0xffff));
+	if (idx < 0)
+		return idx;
 	/* KDETH.Offset */
-	AHG_HEADER_SET(ahg, diff, 15, 0, 16,
-		       cpu_to_le16(req->koffset & 0xffff));
-	AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
+	idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16,
+			     (__force u16)cpu_to_le16(req->koffset & 0xffff));
+	if (idx < 0)
+		return idx;
+	idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16,
+			     (__force u16)cpu_to_le16(req->koffset >> 16));
+	if (idx < 0)
+		return idx;
 	if (req_opcode(req->info.ctrl) == EXPECTED) {
 		__le16 val;
 
@@ -1310,10 +1324,13 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
 				 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
 				 KDETH_OM_SMALL_SHIFT;
 		/* KDETH.OM and KDETH.OFFSET (TID) */
-		AHG_HEADER_SET(ahg, diff, 7, 0, 16,
-			       ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
+		idx = ahg_header_set(
+				ahg, idx, array_size, 7, 0, 16,
+				((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
 				((req->tidoffset >> omfactor)
-				 & 0x7fff)));
+				& 0x7fff)));
+		if (idx < 0)
+			return idx;
 		/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
 		val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
 				   (EXP_TID_GET(tidval, IDX) & 0x3ff));
@@ -1330,21 +1347,22 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
 					     AHG_KDETH_INTR_SHIFT));
 		}
 
-		AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
+		idx = ahg_header_set(ahg, idx, array_size,
+				     7, 16, 14, (__force u16)val);
+		if (idx < 0)
+			return idx;
 	}
-	if (diff < 0)
-		return diff;
 
 	trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
 					req->info.comp_idx, req->sde->this_idx,
-					req->ahg_idx, ahg, diff, tidval);
+					req->ahg_idx, ahg, idx, tidval);
 	sdma_txinit_ahg(&tx->txreq,
 			SDMA_TXREQ_F_USE_AHG,
-			datalen, req->ahg_idx, diff,
+			datalen, req->ahg_idx, idx,
 			ahg, sizeof(req->hdr),
 			user_sdma_txreq_cb);
 
-	return diff;
+	return idx;
 }
 
 /*

+ 20 - 9
drivers/infiniband/hw/hfi1/user_sdma.h

@@ -80,15 +80,26 @@
 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
 
-#define AHG_HEADER_SET(arr, idx, dw, bit, width, value)			\
-	do {								\
-		if ((idx) < ARRAY_SIZE((arr)))				\
-			(arr)[(idx++)] = sdma_build_ahg_descriptor(	\
-				(__force u16)(value), (dw), (bit),	\
-							(width));	\
-		else							\
-			return -ERANGE;					\
-	} while (0)
+/**
+ * Build an SDMA AHG header update descriptor and save it to an array.
+ * @arr        - Array to save the descriptor to.
+ * @idx        - Index of the array at which the descriptor will be saved.
+ * @array_size - Size of the array arr.
+ * @dw         - Update index into the header in DWs.
+ * @bit        - Start bit.
+ * @width      - Field width.
+ * @value      - 16 bits of immediate data to write into the field.
+ * Returns -ERANGE if idx is invalid. If successful, returns the next index
+ * (idx + 1) of the array to be used for the next descriptor.
+ */
+static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
+				 u8 dw, u8 bit, u8 width, u16 value)
+{
+	if ((size_t)idx >= array_size)
+		return -ERANGE;
+	arr[idx++] = sdma_build_ahg_descriptor(value, dw, bit, width);
+	return idx;
+}
 
 /* Tx request flag bits */
 #define TXREQ_FLAGS_REQ_ACK   BIT(0)      /* Set the ACK bit in the header */

+ 8 - 24
drivers/infiniband/hw/hfi1/verbs.c

@@ -146,6 +146,9 @@ static int pio_wait(struct rvt_qp *qp,
 /* Length of buffer to create verbs txreq cache name */
 #define TXREQ_NAME_LEN 24
 
+/* 16B trailing buffer */
+static const u8 trail_buf[MAX_16B_PADDING];
+
 static uint wss_threshold;
 module_param(wss_threshold, uint, S_IRUGO);
 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@@ -812,9 +815,7 @@ static int build_verbs_tx_desc(
 	int ret = 0;
 	struct hfi1_sdma_header *phdr = &tx->phdr;
 	u16 hdrbytes = tx->hdr_dwords << 2;
-	u32 *hdr;
 	u8 extra_bytes = 0;
-	static char trail_buf[12]; /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
 
 	if (tx->phdr.hdr.hdr_type) {
 		/*
@@ -823,9 +824,6 @@ static int build_verbs_tx_desc(
 		 */
 		extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
 			      (SIZE_OF_CRC << 2) + SIZE_OF_LT;
-		hdr = (u32 *)&phdr->hdr.opah;
-	} else {
-		hdr = (u32 *)&phdr->hdr.ibh;
 	}
 	if (!ahg_info->ahgcount) {
 		ret = sdma_txinit_ahg(
@@ -869,9 +867,9 @@ static int build_verbs_tx_desc(
 	}
 
 	/* add icrc, lt byte, and padding to flit */
-	if (extra_bytes != 0)
+	if (extra_bytes)
 		ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
-					trail_buf, extra_bytes);
+					(void *)trail_buf, extra_bytes);
 
 bail_txadd:
 	return ret;
@@ -891,14 +889,12 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
 	u8 sc5 = priv->s_sc;
 	int ret;
 	u32 dwords;
-	bool bypass = false;
 
 	if (ps->s_txreq->phdr.hdr.hdr_type) {
 		u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
 
 		dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
 			  SIZE_OF_LT) >> 2;
-		bypass = true;
 	} else {
 		dwords = (len + 3) >> 2;
 	}
@@ -1033,8 +1029,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
 	int wc_status = IB_WC_SUCCESS;
 	int ret = 0;
 	pio_release_cb cb = NULL;
-	u32 lrh0_16b;
-	bool bypass = false;
 	u8 extra_bytes = 0;
 
 	if (ps->s_txreq->phdr.hdr.hdr_type) {
@@ -1043,8 +1037,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
 		extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
 		dwords = (len + extra_bytes) >> 2;
 		hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
-		lrh0_16b = ps->s_txreq->phdr.hdr.opah.lrh[0];
-		bypass = true;
 	} else {
 		dwords = (len + 3) >> 2;
 		hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
@@ -1128,18 +1120,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
 				len -= slen;
 			}
 		}
-		/*
-		 * Bypass packet will need to copy additional
-		 * bytes to accommodate for CRC and LT bytes
-		 */
-		if (extra_bytes) {
-			u8 *empty_buf;
+		/* add icrc, lt byte, and padding to flit */
+		if (extra_bytes)
+			seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
 
-			empty_buf = kcalloc(extra_bytes, sizeof(u8),
-					    GFP_KERNEL);
-			seg_pio_copy_mid(pbuf, empty_buf, extra_bytes);
-			kfree(empty_buf);
-		}
 		seg_pio_copy_end(pbuf);
 	}
 

+ 2 - 0
drivers/infiniband/hw/hfi1/verbs_txreq.h

@@ -92,6 +92,8 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
 	tx->psc = priv->s_sendcontext;
 	/* so that we can test if the sdma decriptors are there */
 	tx->txreq.num_desc = 0;
+	/* Set the header type */
+	tx->phdr.hdr.hdr_type = priv->hdr_type;
 	return tx;
 }
 

+ 5 - 2
drivers/infiniband/hw/hfi1/vnic_main.c

@@ -840,6 +840,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
 	struct rdma_netdev *rn;
 	int i, size, rc;
 
+	if (!dd->num_vnic_contexts)
+		return ERR_PTR(-ENOMEM);
+
 	if (!port_num || (port_num > dd->num_pports))
 		return ERR_PTR(-EINVAL);
 
@@ -848,7 +851,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
 
 	size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
 	netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
-				  dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT);
+				  dd->chip_sdma_engines, dd->num_vnic_contexts);
 	if (!netdev)
 		return ERR_PTR(-ENOMEM);
 
@@ -856,7 +859,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
 	vinfo = opa_vnic_dev_priv(netdev);
 	vinfo->dd = dd;
 	vinfo->num_tx_q = dd->chip_sdma_engines;
-	vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT;
+	vinfo->num_rx_q = dd->num_vnic_contexts;
 	vinfo->netdev = netdev;
 	rn->free_rdma_netdev = hfi1_vnic_free_rn;
 	rn->set_id = hfi1_vnic_set_vesw_id;

+ 23 - 2
drivers/infiniband/hw/hns/Kconfig

@@ -1,10 +1,31 @@
 config INFINIBAND_HNS
 	tristate "HNS RoCE Driver"
 	depends on NET_VENDOR_HISILICON
-	depends on (ARM64 || (COMPILE_TEST && 64BIT)) && HNS && HNS_DSAF && HNS_ENET
+	depends on ARM64 || (COMPILE_TEST && 64BIT)
 	---help---
 	  This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
-	  is used in Hisilicon Hi1610 and more further ICT SoC.
+	  is used in Hisilicon Hip06 and more further ICT SoC based on
+	  platform device.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called hns-roce.
+
+config INFINIBAND_HNS_HIP06
+	tristate "Hisilicon Hip06 Family RoCE support"
+	depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
+	---help---
+	  RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
+	  Hip07 SoC. These RoCE engines are platform devices.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called hns-roce-hw-v1.
+
+config INFINIBAND_HNS_HIP08
+	tristate "Hisilicon Hip08 Family RoCE support"
+	depends on INFINIBAND_HNS && PCI && HNS3
+	---help---
+	  RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
+	  The RoCE engine is a PCI device.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called hns-roce-hw-v2.

+ 7 - 1
drivers/infiniband/hw/hns/Makefile

@@ -2,7 +2,13 @@
 # Makefile for the Hisilicon RoCE drivers.
 #
 
+ccflags-y :=  -Idrivers/net/ethernet/hisilicon/hns3
+
 obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
 hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
 	hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
-	hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o
+	hns_roce_cq.o hns_roce_alloc.o
+obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
+hns-roce-hw-v1-objs := hns_roce_hw_v1.o
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+hns-roce-hw-v2-objs := hns_roce_hw_v2.o

+ 1 - 1
drivers/infiniband/hw/hns/hns_roce_ah.c

@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
 				 struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct ib_gid_attr gid_attr;
 	struct hns_roce_ah *ah;
 	u16 vlan_tag = 0xffff;

+ 5 - 3
drivers/infiniband/hw/hns/hns_roce_alloc.c

@@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
 {
 	hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
 }
+EXPORT_SYMBOL_GPL(hns_roce_bitmap_free);
 
 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
 				int align, unsigned long *obj)
@@ -160,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
 		       struct hns_roce_buf *buf)
 {
 	int i;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	u32 bits_per_long = BITS_PER_LONG;
 
 	if (buf->nbufs == 1) {
@@ -171,12 +172,13 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
 
 		for (i = 0; i < buf->nbufs; ++i)
 			if (buf->page_list[i].buf)
-				dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
+				dma_free_coherent(dev, PAGE_SIZE,
 						  buf->page_list[i].buf,
 						  buf->page_list[i].map);
 		kfree(buf->page_list);
 	}
 }
+EXPORT_SYMBOL_GPL(hns_roce_buf_free);
 
 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 		       struct hns_roce_buf *buf)
@@ -184,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 	int i = 0;
 	dma_addr_t t;
 	struct page **pages;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	u32 bits_per_long = BITS_PER_LONG;
 
 	/* SQ/RQ buf lease than one page, SQ + RQ = 8K */

+ 12 - 95
drivers/infiniband/hw/hns/hns_roce_cmd.c

@@ -38,69 +38,7 @@
 
 #define CMD_POLL_TOKEN		0xffff
 #define CMD_MAX_NUM		32
-#define STATUS_MASK		0xff
 #define CMD_TOKEN_MASK		0x1f
-#define GO_BIT_TIMEOUT_MSECS	10000
-
-enum {
-	HCR_TOKEN_OFFSET	= 0x14,
-	HCR_STATUS_OFFSET	= 0x18,
-	HCR_GO_BIT		= 15,
-};
-
-static int cmd_pending(struct hns_roce_dev *hr_dev)
-{
-	u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
-
-	return (!!(status & (1 << HCR_GO_BIT)));
-}
-
-/* this function should be serialized with "hcr_mutex" */
-static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
-				       u64 in_param, u64 out_param,
-				       u32 in_modifier, u8 op_modifier, u16 op,
-				       u16 token, int event)
-{
-	struct hns_roce_cmdq *cmd = &hr_dev->cmd;
-	struct device *dev = &hr_dev->pdev->dev;
-	u32 __iomem *hcr = (u32 *)cmd->hcr;
-	int ret = -EAGAIN;
-	unsigned long end;
-	u32 val = 0;
-
-	end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
-	while (cmd_pending(hr_dev)) {
-		if (time_after(jiffies, end)) {
-			dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
-				(int)end);
-			goto out;
-		}
-		cond_resched();
-	}
-
-	roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
-		       op);
-	roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
-		       ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
-	roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
-	roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
-	roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
-		       ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
-
-	__raw_writeq(cpu_to_le64(in_param), hcr + 0);
-	__raw_writeq(cpu_to_le64(out_param), hcr + 2);
-	__raw_writel(cpu_to_le32(in_modifier), hcr + 4);
-	/* Memory barrier */
-	wmb();
-
-	__raw_writel(cpu_to_le32(val), hcr + 5);
-
-	mmiowb();
-	ret = 0;
-
-out:
-	return ret;
-}
 
 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
 				     u64 out_param, u32 in_modifier,
@@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
 				     int event)
 {
 	struct hns_roce_cmdq *cmd = &hr_dev->cmd;
-	int ret = -EAGAIN;
+	int ret;
 
 	mutex_lock(&cmd->hcr_mutex);
-	ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
-					  in_modifier, op_modifier, op, token,
-					  event);
+	ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
+				    op_modifier, op, token, event);
 	mutex_unlock(&cmd->hcr_mutex);
 
 	return ret;
@@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
 				    u8 op_modifier, u16 op,
 				    unsigned long timeout)
 {
-	struct device *dev = &hr_dev->pdev->dev;
-	u8 __iomem *hcr = hr_dev->cmd.hcr;
-	unsigned long end = 0;
-	u32 status = 0;
+	struct device *dev = hr_dev->dev;
 	int ret;
 
 	ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
@@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
 					CMD_POLL_TOKEN, 0);
 	if (ret) {
 		dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
-		goto out;
-	}
-
-	end = msecs_to_jiffies(timeout) + jiffies;
-	while (cmd_pending(hr_dev) && time_before(jiffies, end))
-		cond_resched();
-
-	if (cmd_pending(hr_dev)) {
-		dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
-		ret = -ETIMEDOUT;
-		goto out;
+		return ret;
 	}
 
-	status = le32_to_cpu((__force __be32)
-			      __raw_readl(hcr + HCR_STATUS_OFFSET));
-	if ((status & STATUS_MASK) != 0x1) {
-		dev_err(dev, "mailbox status 0x%x!\n", status);
-		ret = -EBUSY;
-		goto out;
-	}
-
-out:
-	return ret;
+	return hr_dev->hw->chk_mbox(hr_dev, timeout);
 }
 
 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
@@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
 				    unsigned long timeout)
 {
 	struct hns_roce_cmdq *cmd = &hr_dev->cmd;
-	struct device *dev = &hr_dev->pdev->dev;
 	struct hns_roce_cmd_context *context;
-	int ret = 0;
+	struct device *dev = hr_dev->dev;
+	int ret;
 
 	spin_lock(&cmd->context_lock);
 	WARN_ON(cmd->free_head < 0);
@@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
 					      in_modifier, op_modifier, op,
 					      timeout);
 }
+EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
 
 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	mutex_init(&hr_dev->cmd.hcr_mutex);
 	sema_init(&hr_dev->cmd.poll_sem, 1);
 	hr_dev->cmd.use_events = 0;
 	hr_dev->cmd.toggle = 1;
 	hr_dev->cmd.max_cmds = CMD_MAX_NUM;
-	hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
 	hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
 					   HNS_ROCE_MAILBOX_SIZE,
 					   HNS_ROCE_MAILBOX_SIZE, 0);
@@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox
 
 	return mailbox;
 }
+EXPORT_SYMBOL_GPL(hns_roce_alloc_cmd_mailbox);
 
 void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
 			       struct hns_roce_cmd_mailbox *mailbox)
@@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
 	dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
 	kfree(mailbox);
 }
+EXPORT_SYMBOL_GPL(hns_roce_free_cmd_mailbox);

+ 50 - 0
drivers/infiniband/hw/hns/hns_roce_cmd.h

@@ -36,6 +36,56 @@
 #define HNS_ROCE_MAILBOX_SIZE		4096
 #define HNS_ROCE_CMD_TIMEOUT_MSECS	10000
 
+enum {
+	/* QPC BT commands */
+	HNS_ROCE_CMD_WRITE_QPC_BT0	= 0x0,
+	HNS_ROCE_CMD_WRITE_QPC_BT1	= 0x1,
+	HNS_ROCE_CMD_WRITE_QPC_BT2	= 0x2,
+	HNS_ROCE_CMD_READ_QPC_BT0	= 0x4,
+	HNS_ROCE_CMD_READ_QPC_BT1	= 0x5,
+	HNS_ROCE_CMD_READ_QPC_BT2	= 0x6,
+	HNS_ROCE_CMD_DESTROY_QPC_BT0	= 0x8,
+	HNS_ROCE_CMD_DESTROY_QPC_BT1	= 0x9,
+	HNS_ROCE_CMD_DESTROY_QPC_BT2	= 0xa,
+
+	/* QPC operation */
+	HNS_ROCE_CMD_MODIFY_QPC		= 0x41,
+	HNS_ROCE_CMD_QUERY_QPC		= 0x42,
+
+	/* CQC BT commands */
+	HNS_ROCE_CMD_WRITE_CQC_BT0	= 0x10,
+	HNS_ROCE_CMD_WRITE_CQC_BT1	= 0x11,
+	HNS_ROCE_CMD_WRITE_CQC_BT2	= 0x12,
+	HNS_ROCE_CMD_READ_CQC_BT0	= 0x14,
+	HNS_ROCE_CMD_READ_CQC_BT1	= 0x15,
+	HNS_ROCE_CMD_READ_CQC_BT2	= 0x1b,
+	HNS_ROCE_CMD_DESTROY_CQC_BT0	= 0x18,
+	HNS_ROCE_CMD_DESTROY_CQC_BT1	= 0x19,
+	HNS_ROCE_CMD_DESTROY_CQC_BT2	= 0x1a,
+
+	/* MPT BT commands */
+	HNS_ROCE_CMD_WRITE_MPT_BT0	= 0x20,
+	HNS_ROCE_CMD_WRITE_MPT_BT1	= 0x21,
+	HNS_ROCE_CMD_WRITE_MPT_BT2	= 0x22,
+	HNS_ROCE_CMD_READ_MPT_BT0	= 0x24,
+	HNS_ROCE_CMD_READ_MPT_BT1	= 0x25,
+	HNS_ROCE_CMD_READ_MPT_BT2	= 0x26,
+	HNS_ROCE_CMD_DESTROY_MPT_BT0	= 0x28,
+	HNS_ROCE_CMD_DESTROY_MPT_BT1	= 0x29,
+	HNS_ROCE_CMD_DESTROY_MPT_BT2	= 0x2a,
+
+	/* SRQC BT commands */
+	HNS_ROCE_CMD_WRITE_SRQC_BT0	= 0x30,
+	HNS_ROCE_CMD_WRITE_SRQC_BT1	= 0x31,
+	HNS_ROCE_CMD_WRITE_SRQC_BT2	= 0x32,
+	HNS_ROCE_CMD_READ_SRQC_BT0	= 0x34,
+	HNS_ROCE_CMD_READ_SRQC_BT1	= 0x35,
+	HNS_ROCE_CMD_READ_SRQC_BT2	= 0x36,
+	HNS_ROCE_CMD_DESTROY_SRQC_BT0	= 0x38,
+	HNS_ROCE_CMD_DESTROY_SRQC_BT1	= 0x39,
+	HNS_ROCE_CMD_DESTROY_SRQC_BT2	= 0x3a,
+};
+
 enum {
 	/* TPT commands */
 	HNS_ROCE_CMD_SW2HW_MPT		= 0xd,

+ 23 - 0
drivers/infiniband/hw/hns/hns_roce_common.h

@@ -341,6 +341,7 @@
 #define ROCEE_BT_CMD_L_REG			0x200
 
 #define ROCEE_MB1_REG				0x210
+#define ROCEE_MB6_REG				0x224
 #define ROCEE_DB_SQ_L_0_REG			0x230
 #define ROCEE_DB_OTHERS_L_0_REG			0x238
 #define ROCEE_QP1C_CFG0_0_REG			0x270
@@ -362,4 +363,26 @@
 #define ROCEE_ECC_UCERR_ALM0_REG		0xB34
 #define ROCEE_ECC_CERR_ALM0_REG			0xB40
 
+/* V2 ROCEE REG */
+#define ROCEE_TX_CMQ_BASEADDR_L_REG		0x07000
+#define ROCEE_TX_CMQ_BASEADDR_H_REG		0x07004
+#define ROCEE_TX_CMQ_DEPTH_REG			0x07008
+#define ROCEE_TX_CMQ_TAIL_REG			0x07010
+#define ROCEE_TX_CMQ_HEAD_REG			0x07014
+
+#define ROCEE_RX_CMQ_BASEADDR_L_REG		0x07018
+#define ROCEE_RX_CMQ_BASEADDR_H_REG		0x0701c
+#define ROCEE_RX_CMQ_DEPTH_REG			0x07020
+#define ROCEE_RX_CMQ_TAIL_REG			0x07024
+#define ROCEE_RX_CMQ_HEAD_REG			0x07028
+
+#define ROCEE_VF_SMAC_CFG0_REG			0x12000
+#define ROCEE_VF_SMAC_CFG1_REG			0x12004
+
+#define ROCEE_VF_SGID_CFG0_REG			0x10000
+#define ROCEE_VF_SGID_CFG1_REG			0x10004
+#define ROCEE_VF_SGID_CFG2_REG			0x10008
+#define ROCEE_VF_SGID_CFG3_REG			0x1000c
+#define ROCEE_VF_SGID_CFG4_REG			0x10010
+
 #endif /* _HNS_ROCE_COMMON_H */

+ 45 - 27
drivers/infiniband/hw/hns/hns_roce_cq.c

@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
 	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
-		dev_err(&hr_dev->pdev->dev,
+		dev_err(hr_dev->dev,
 			"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
 			event_type, hr_cq->cqn);
 		return;
@@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
 			     struct hns_roce_uar *hr_uar,
 			     struct hns_roce_cq *hr_cq, int vector)
 {
-	struct hns_roce_cmd_mailbox *mailbox = NULL;
-	struct hns_roce_cq_table *cq_table = NULL;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_cmd_mailbox *mailbox;
+	struct hns_roce_hem_table *mtt_table;
+	struct hns_roce_cq_table *cq_table;
+	struct device *dev = hr_dev->dev;
 	dma_addr_t dma_handle;
-	u64 *mtts = NULL;
-	int ret = 0;
+	u64 *mtts;
+	int ret;
 
 	cq_table = &hr_dev->cq_table;
 
 	/* Get the physical address of cq buf */
-	mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+		mtt_table = &hr_dev->mr_table.mtt_cqe_table;
+	else
+		mtt_table = &hr_dev->mr_table.mtt_table;
+
+	mtts = hns_roce_table_find(hr_dev, mtt_table,
 				   hr_mtt->first_seg, &dma_handle);
 	if (!mtts) {
 		dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
@@ -182,21 +188,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
 void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 {
 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int ret;
 
 	ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
 	if (ret)
 		dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
 			hr_cq->cqn);
-
-	/* Waiting interrupt process procedure carried out */
-	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
-
-	/* wait for all interrupt processed */
-	if (atomic_dec_and_test(&hr_cq->refcount))
-		complete(&hr_cq->free);
-	wait_for_completion(&hr_cq->free);
+	if (hr_dev->eq_table.eq) {
+		/* Waiting interrupt process procedure carried out */
+		synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+		/* wait for all interrupt processed */
+		if (atomic_dec_and_test(&hr_cq->refcount))
+			complete(&hr_cq->free);
+		wait_for_completion(&hr_cq->free);
+	}
 
 	spin_lock_irq(&cq_table->lock);
 	radix_tree_delete(&cq_table->tree, hr_cq->cqn);
@@ -205,6 +212,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
 	hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
 }
+EXPORT_SYMBOL_GPL(hns_roce_free_cq);
 
 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
 				   struct ib_ucontext *context,
@@ -218,6 +226,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
 	if (IS_ERR(*umem))
 		return PTR_ERR(*umem);
 
+	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+		buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+	else
+		buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
 	ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
 				(*umem)->page_shift, &buf->hr_mtt);
 	if (ret)
@@ -247,6 +259,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
 	if (ret)
 		goto out;
 
+	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+		buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+	else
+		buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+
 	ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
 				buf->hr_buf.page_shift, &buf->hr_mtt);
 	if (ret)
@@ -281,13 +298,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
 				    struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_ib_create_cq ucmd;
 	struct hns_roce_cq *hr_cq = NULL;
 	struct hns_roce_uar *uar = NULL;
 	int vector = attr->comp_vector;
 	int cq_entries = attr->cqe;
-	int ret = 0;
+	int ret;
 
 	if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
 		dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
@@ -295,13 +312,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
 		return ERR_PTR(-EINVAL);
 	}
 
-	hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
+	hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
 	if (!hr_cq)
 		return ERR_PTR(-ENOMEM);
 
-	/* In v1 engine, parameter verification */
-	if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
-		cq_entries = HNS_ROCE_MIN_CQE_NUM;
+	if (hr_dev->caps.min_cqes)
+		cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
 
 	cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
 	hr_cq->ib_cq.cqe = cq_entries - 1;
@@ -335,8 +351,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
 		}
 
 		uar = &hr_dev->priv_uar;
-		hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
-				 0x1000 * uar->index;
+		hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
+				DB_REG_OFFSET * uar->index;
 	}
 
 	/* Allocate cq index, fill cq_context */
@@ -353,7 +369,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
 	 * problems if tptr is set to zero here, so we initialze it in user
 	 * space.
 	 */
-	if (!context)
+	if (!context && hr_cq->tptr_addr)
 		*hr_cq->tptr_addr = 0;
 
 	/* Get created cq handler and carry out event */
@@ -385,6 +401,7 @@ err_cq:
 	kfree(hr_cq);
 	return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
 
 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
 {
@@ -410,10 +427,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
 
 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_cq *cq;
 
 	cq = radix_tree_lookup(&hr_dev->cq_table.tree,
@@ -429,7 +447,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
 {
 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_cq *cq;
 
 	cq = radix_tree_lookup(&cq_table->tree,

+ 89 - 12
drivers/infiniband/hw/hns/hns_roce_device.h

@@ -78,6 +78,8 @@
 #define HNS_ROCE_MAX_GID_NUM			16
 #define HNS_ROCE_GID_SIZE			16
 
+#define HNS_ROCE_HOP_NUM_0			0xff
+
 #define BITMAP_NO_RR				0
 #define BITMAP_RR				1
 
@@ -168,6 +170,11 @@ enum {
 	HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE	= 0x07,
 };
 
+enum hns_roce_mtt_type {
+	MTT_TYPE_WQE,
+	MTT_TYPE_CQE,
+};
+
 #define HNS_ROCE_CMD_SUCCESS			1
 
 #define HNS_ROCE_PORT_DOWN			0
@@ -232,12 +239,17 @@ struct hns_roce_hem_table {
 	int		lowmem;
 	struct mutex	mutex;
 	struct hns_roce_hem **hem;
+	u64		**bt_l1;
+	dma_addr_t	*bt_l1_dma_addr;
+	u64		**bt_l0;
+	dma_addr_t	*bt_l0_dma_addr;
 };
 
 struct hns_roce_mtt {
-	unsigned long	first_seg;
-	int		order;
-	int		page_shift;
+	unsigned long		first_seg;
+	int			order;
+	int			page_shift;
+	enum hns_roce_mtt_type	mtt_type;
 };
 
 /* Only support 4K page size for mr register */
@@ -255,6 +267,19 @@ struct hns_roce_mr {
 	int			type;	/* MR's register type */
 	u64			*pbl_buf;/* MR's PBL space */
 	dma_addr_t		pbl_dma_addr;	/* MR's PBL space PA */
+	u32			pbl_size;/* PA number in the PBL */
+	u64			pbl_ba;/* page table address */
+	u32			l0_chunk_last_num;/* L0 last number */
+	u32			l1_chunk_last_num;/* L1 last number */
+	u64			**pbl_bt_l2;/* PBL BT L2 */
+	u64			**pbl_bt_l1;/* PBL BT L1 */
+	u64			*pbl_bt_l0;/* PBL BT L0 */
+	dma_addr_t		*pbl_l2_dma_addr;/* PBL BT L2 dma addr */
+	dma_addr_t		*pbl_l1_dma_addr;/* PBL BT L1 dma addr */
+	dma_addr_t		pbl_l0_dma_addr;/* PBL BT L0 dma addr */
+	u32			pbl_ba_pg_sz;/* BT chunk page size */
+	u32			pbl_buf_pg_sz;/* buf chunk page size */
+	u32			pbl_hop_num;/* multi-hop number */
 };
 
 struct hns_roce_mr_table {
@@ -262,6 +287,8 @@ struct hns_roce_mr_table {
 	struct hns_roce_buddy		mtt_buddy;
 	struct hns_roce_hem_table	mtt_table;
 	struct hns_roce_hem_table	mtpt_table;
+	struct hns_roce_buddy		mtt_cqe_buddy;
+	struct hns_roce_hem_table	mtt_cqe_table;
 };
 
 struct hns_roce_wq {
@@ -277,6 +304,12 @@ struct hns_roce_wq {
 	void __iomem	*db_reg_l;
 };
 
+struct hns_roce_sge {
+	int		sge_cnt;  /* SGE num */
+	int		offset;
+	int		sge_shift;/* SGE size */
+};
+
 struct hns_roce_buf_list {
 	void		*buf;
 	dma_addr_t	map;
@@ -367,7 +400,6 @@ struct hns_roce_cmd_context {
 
 struct hns_roce_cmdq {
 	struct dma_pool		*pool;
-	u8 __iomem		*hcr;
 	struct mutex		hcr_mutex;
 	struct semaphore	poll_sem;
 	/*
@@ -429,6 +461,9 @@ struct hns_roce_qp {
 
 	atomic_t		refcount;
 	struct completion	free;
+
+	struct hns_roce_sge	sge;
+	u32			next_sge;
 };
 
 struct hns_roce_sqp {
@@ -439,7 +474,6 @@ struct hns_roce_ib_iboe {
 	spinlock_t		lock;
 	struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
 	struct notifier_block	nb;
-	struct notifier_block	nb_inet;
 	u8			phy_port[HNS_ROCE_MAX_PORTS];
 };
 
@@ -477,16 +511,20 @@ struct hns_roce_caps {
 	u32		max_wqes;	/* 16k */
 	u32		max_sq_desc_sz;	/* 64 */
 	u32		max_rq_desc_sz;	/* 64 */
+	u32		max_srq_desc_sz;
 	int		max_qp_init_rdma;
 	int		max_qp_dest_rdma;
 	int		num_cqs;
 	int		max_cqes;
+	int		min_cqes;
+	u32		min_wqes;
 	int		reserved_cqs;
 	int		num_aeq_vectors;	/* 1 */
 	int		num_comp_vectors;	/* 32 ceq */
 	int		num_other_vectors;
 	int		num_mtpts;
 	u32		num_mtt_segs;
+	u32		num_cqe_segs;
 	int		reserved_mrws;
 	int		reserved_uars;
 	int		num_pds;
@@ -499,19 +537,50 @@ struct hns_roce_caps {
 	int		qpc_entry_sz;
 	int		irrl_entry_sz;
 	int		cqc_entry_sz;
+	u32		pbl_ba_pg_sz;
+	u32		pbl_buf_pg_sz;
+	u32		pbl_hop_num;
 	int		aeqe_depth;
 	int		ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
 	enum ib_mtu	max_mtu;
+	u32		qpc_bt_num;
+	u32		srqc_bt_num;
+	u32		cqc_bt_num;
+	u32		mpt_bt_num;
+	u32		qpc_ba_pg_sz;
+	u32		qpc_buf_pg_sz;
+	u32		qpc_hop_num;
+	u32		srqc_ba_pg_sz;
+	u32		srqc_buf_pg_sz;
+	u32		srqc_hop_num;
+	u32		cqc_ba_pg_sz;
+	u32		cqc_buf_pg_sz;
+	u32		cqc_hop_num;
+	u32		mpt_ba_pg_sz;
+	u32		mpt_buf_pg_sz;
+	u32		mpt_hop_num;
+	u32		mtt_ba_pg_sz;
+	u32		mtt_buf_pg_sz;
+	u32		mtt_hop_num;
+	u32		cqe_ba_pg_sz;
+	u32		cqe_buf_pg_sz;
+	u32		cqe_hop_num;
 };
 
 struct hns_roce_hw {
 	int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
-	void (*hw_profile)(struct hns_roce_dev *hr_dev);
+	int (*cmq_init)(struct hns_roce_dev *hr_dev);
+	void (*cmq_exit)(struct hns_roce_dev *hr_dev);
+	int (*hw_profile)(struct hns_roce_dev *hr_dev);
 	int (*hw_init)(struct hns_roce_dev *hr_dev);
 	void (*hw_exit)(struct hns_roce_dev *hr_dev);
+	int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
+			 u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
+			 u16 token, int event);
+	int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
 	void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
 			union ib_gid *gid);
-	void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+	int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
 	void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
 			enum ib_mtu mtu);
 	int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
@@ -519,8 +588,11 @@ struct hns_roce_hw {
 	void (*write_cqc)(struct hns_roce_dev *hr_dev,
 			  struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
 			  dma_addr_t dma_handle, int nent, u32 vector);
+	int (*set_hem)(struct hns_roce_dev *hr_dev,
+		       struct hns_roce_hem_table *table, int obj, int step_idx);
 	int (*clear_hem)(struct hns_roce_dev *hr_dev,
-			 struct hns_roce_hem_table *table, int obj);
+			 struct hns_roce_hem_table *table, int obj,
+			 int step_idx);
 	int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
 	int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
@@ -535,12 +607,13 @@ struct hns_roce_hw {
 	int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 	int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
 	int (*destroy_cq)(struct ib_cq *ibcq);
-	void	*priv;
 };
 
 struct hns_roce_dev {
 	struct ib_device	ib_dev;
 	struct platform_device  *pdev;
+	struct pci_dev		*pci_dev;
+	struct device		*dev;
 	struct hns_roce_uar     priv_uar;
 	const char		*irq_names[HNS_ROCE_MAX_IRQ_NUM];
 	spinlock_t		sm_lock;
@@ -569,9 +642,12 @@ struct hns_roce_dev {
 
 	int			cmd_mod;
 	int			loop_idc;
+	u32			sdb_offset;
+	u32			odb_offset;
 	dma_addr_t		tptr_dma_addr; /*only for hw v1*/
 	u32			tptr_size; /*only for hw v1*/
-	struct hns_roce_hw	*hw;
+	const struct hns_roce_hw *hw;
+	void			*priv;
 };
 
 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -723,6 +799,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 		       int attr_mask, struct ib_udata *udata);
 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
+void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
 			  struct ib_cq *ib_cq);
 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
@@ -749,7 +826,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
-
-extern struct hns_roce_hw hns_roce_hw_v1;
+int hns_roce_init(struct hns_roce_dev *hr_dev);
+void hns_roce_exit(struct hns_roce_dev *hr_dev);
 
 #endif /* _HNS_ROCE_DEVICE_H */

+ 3 - 3
drivers/infiniband/hw/hns/hns_roce_eq.c

@@ -558,7 +558,7 @@ static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
 	writel(eqshift_val, eqc);
 
 	/* Configure eq extended address 12~44bit */
-	writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
+	writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
 
 	/*
 	 * Configure eq extended address 45~49 bit.
@@ -572,13 +572,13 @@ static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
 	roce_set_field(eqcuridx_val,
 		       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
 		       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
-	writel(eqcuridx_val, (u8 *)eqc + 8);
+	writel(eqcuridx_val, eqc + 8);
 
 	/* Configure eq consumer index */
 	roce_set_field(eqconsindx_val,
 		       ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
 		       ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
-	writel(eqconsindx_val, (u8 *)eqc + 0xc);
+	writel(eqconsindx_val, eqc + 0xc);
 
 	return 0;
 

+ 672 - 27
drivers/infiniband/hw/hns/hns_roce_hem.c

@@ -42,8 +42,162 @@
 #define DMA_ADDR_T_SHIFT		12
 #define BT_BA_SHIFT			32
 
-struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
-					gfp_t gfp_mask)
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
+{
+	if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
+	    (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
+	    (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+	    (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
+	    (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+	    (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT))
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
+
+static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
+			    u32 bt_chunk_num)
+{
+	int i;
+
+	for (i = 0; i < bt_chunk_num; i++)
+		if (hem[start_idx + i])
+			return false;
+
+	return true;
+}
+
+static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
+{
+	int i;
+
+	for (i = 0; i < bt_chunk_num; i++)
+		if (bt[start_idx + i])
+			return false;
+
+	return true;
+}
+
+static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
+{
+	if (check_whether_bt_num_3(table_type, hop_num))
+		return 3;
+	else if (check_whether_bt_num_2(table_type, hop_num))
+		return 2;
+	else if (check_whether_bt_num_1(table_type, hop_num))
+		return 1;
+	else
+		return 0;
+}
+
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+			   struct hns_roce_hem_table *table, unsigned long *obj,
+			   struct hns_roce_hem_mhop *mhop)
+{
+	struct device *dev = hr_dev->dev;
+	u32 chunk_ba_num;
+	u32 table_idx;
+	u32 bt_num;
+	u32 chunk_size;
+
+	switch (table->type) {
+	case HEM_TYPE_QPC:
+		mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+					     + PAGE_SHIFT);
+		mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+					     + PAGE_SHIFT);
+		mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
+		mhop->hop_num = hr_dev->caps.qpc_hop_num;
+		break;
+	case HEM_TYPE_MTPT:
+		mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+					     + PAGE_SHIFT);
+		mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+					     + PAGE_SHIFT);
+		mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
+		mhop->hop_num = hr_dev->caps.mpt_hop_num;
+		break;
+	case HEM_TYPE_CQC:
+		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+					     + PAGE_SHIFT);
+		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+					    + PAGE_SHIFT);
+		mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
+		mhop->hop_num = hr_dev->caps.cqc_hop_num;
+		break;
+	case HEM_TYPE_SRQC:
+		mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+					     + PAGE_SHIFT);
+		mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+					     + PAGE_SHIFT);
+		mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
+		mhop->hop_num = hr_dev->caps.srqc_hop_num;
+		break;
+	case HEM_TYPE_MTT:
+		mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
+					     + PAGE_SHIFT);
+		mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+					     + PAGE_SHIFT);
+		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
+		mhop->hop_num = hr_dev->caps.mtt_hop_num;
+		break;
+	case HEM_TYPE_CQE:
+		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
+					     + PAGE_SHIFT);
+		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+					     + PAGE_SHIFT);
+		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
+		mhop->hop_num = hr_dev->caps.cqe_hop_num;
+		break;
+	default:
+		dev_err(dev, "Table %d not support multi-hop addressing!\n",
+			 table->type);
+		return -EINVAL;
+	}
+
+	if (!obj)
+		return 0;
+
+	/*
+	 * QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
+	 * MTT/CQE alloc hem for bt pages.
+	 */
+	bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+	chunk_ba_num = mhop->bt_chunk_size / 8;
+	chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
+			      mhop->bt_chunk_size;
+	table_idx = (*obj & (table->num_obj - 1)) /
+		     (chunk_size / table->obj_size);
+	switch (bt_num) {
+	case 3:
+		mhop->l2_idx = table_idx & (chunk_ba_num - 1);
+		mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
+		mhop->l0_idx = table_idx / chunk_ba_num / chunk_ba_num;
+		break;
+	case 2:
+		mhop->l1_idx = table_idx & (chunk_ba_num - 1);
+		mhop->l0_idx = table_idx / chunk_ba_num;
+		break;
+	case 1:
+		mhop->l0_idx = table_idx;
+		break;
+	default:
+		dev_err(dev, "Table %d not support hop_num = %d!\n",
+			     table->type, mhop->hop_num);
+		return -EINVAL;
+	}
+	if (mhop->l0_idx >= mhop->ba_l0_num)
+		mhop->l0_idx %= mhop->ba_l0_num;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(hns_roce_calc_hem_mhop);
+
+static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
+					       int npages,
+					       unsigned long hem_alloc_size,
+					       gfp_t gfp_mask)
 {
 	struct hns_roce_hem_chunk *chunk = NULL;
 	struct hns_roce_hem *hem;
@@ -61,7 +215,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
 	hem->refcount = 0;
 	INIT_LIST_HEAD(&hem->chunk_list);
 
-	order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
+	order = get_order(hem_alloc_size);
 
 	while (npages > 0) {
 		if (!chunk) {
@@ -84,7 +238,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
 		 * memory, directly return fail.
 		 */
 		mem = &chunk->mem[chunk->npages];
-		buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
+		buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
 				&sg_dma_address(mem), gfp_mask);
 		if (!buf)
 			goto fail;
@@ -115,7 +269,7 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
 
 	list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
 		for (i = 0; i < chunk->npages; ++i)
-			dma_free_coherent(&hr_dev->pdev->dev,
+			dma_free_coherent(hr_dev->dev,
 				   chunk->mem[i].length,
 				   lowmem_page_address(sg_page(&chunk->mem[i])),
 				   sg_dma_address(&chunk->mem[i]));
@@ -128,8 +282,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
 static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
 			    struct hns_roce_hem_table *table, unsigned long obj)
 {
-	struct device *dev = &hr_dev->pdev->dev;
 	spinlock_t *lock = &hr_dev->bt_cmd_lock;
+	struct device *dev = hr_dev->dev;
 	unsigned long end = 0;
 	unsigned long flags;
 	struct hns_roce_hem_iter iter;
@@ -209,13 +363,184 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
 	return ret;
 }
 
+static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+				   struct hns_roce_hem_table *table,
+				   unsigned long obj)
+{
+	struct device *dev = hr_dev->dev;
+	struct hns_roce_hem_mhop mhop;
+	struct hns_roce_hem_iter iter;
+	u32 buf_chunk_size;
+	u32 bt_chunk_size;
+	u32 chunk_ba_num;
+	u32 hop_num;
+	u32 size;
+	u32 bt_num;
+	u64 hem_idx;
+	u64 bt_l1_idx = 0;
+	u64 bt_l0_idx = 0;
+	u64 bt_ba;
+	unsigned long mhop_obj = obj;
+	int bt_l1_allocated = 0;
+	int bt_l0_allocated = 0;
+	int step_idx;
+	int ret;
+
+	ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+	if (ret)
+		return ret;
+
+	buf_chunk_size = mhop.buf_chunk_size;
+	bt_chunk_size = mhop.bt_chunk_size;
+	hop_num = mhop.hop_num;
+	chunk_ba_num = bt_chunk_size / 8;
+
+	bt_num = hns_roce_get_bt_num(table->type, hop_num);
+	switch (bt_num) {
+	case 3:
+		hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+			  mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
+		bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+		bt_l0_idx = mhop.l0_idx;
+		break;
+	case 2:
+		hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+		bt_l0_idx = mhop.l0_idx;
+		break;
+	case 1:
+		hem_idx = mhop.l0_idx;
+		break;
+	default:
+		dev_err(dev, "Table %d not support hop_num = %d!\n",
+			     table->type, hop_num);
+		return -EINVAL;
+	}
+
+	mutex_lock(&table->mutex);
+
+	if (table->hem[hem_idx]) {
+		++table->hem[hem_idx]->refcount;
+		goto out;
+	}
+
+	/* alloc L1 BA's chunk */
+	if ((check_whether_bt_num_3(table->type, hop_num) ||
+		check_whether_bt_num_2(table->type, hop_num)) &&
+		!table->bt_l0[bt_l0_idx]) {
+		table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
+					    &(table->bt_l0_dma_addr[bt_l0_idx]),
+					    GFP_KERNEL);
+		if (!table->bt_l0[bt_l0_idx]) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		bt_l0_allocated = 1;
+
+		/* set base address to hardware */
+		if (table->type < HEM_TYPE_MTT) {
+			step_idx = 0;
+			if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+				ret = -ENODEV;
+				dev_err(dev, "set HEM base address to HW failed!\n");
+				goto err_dma_alloc_l1;
+			}
+		}
+	}
+
+	/* alloc L2 BA's chunk */
+	if (check_whether_bt_num_3(table->type, hop_num) &&
+	    !table->bt_l1[bt_l1_idx])  {
+		table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
+					    &(table->bt_l1_dma_addr[bt_l1_idx]),
+					    GFP_KERNEL);
+		if (!table->bt_l1[bt_l1_idx]) {
+			ret = -ENOMEM;
+			goto err_dma_alloc_l1;
+		}
+		bt_l1_allocated = 1;
+		*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
+					       table->bt_l1_dma_addr[bt_l1_idx];
+
+		/* set base address to hardware */
+		step_idx = 1;
+		if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+			ret = -ENODEV;
+			dev_err(dev, "set HEM base address to HW failed!\n");
+			goto err_alloc_hem_buf;
+		}
+	}
+
+	/*
+	 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
+	 * alloc bt space chunk for MTT/CQE.
+	 */
+	size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
+	table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
+						size >> PAGE_SHIFT,
+						size,
+						(table->lowmem ? GFP_KERNEL :
+						GFP_HIGHUSER) | __GFP_NOWARN);
+	if (!table->hem[hem_idx]) {
+		ret = -ENOMEM;
+		goto err_alloc_hem_buf;
+	}
+
+	hns_roce_hem_first(table->hem[hem_idx], &iter);
+	bt_ba = hns_roce_hem_addr(&iter);
+
+	if (table->type < HEM_TYPE_MTT) {
+		if (hop_num == 2) {
+			*(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
+			step_idx = 2;
+		} else if (hop_num == 1) {
+			*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
+			step_idx = 1;
+		} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
+			step_idx = 0;
+		}
+
+		/* set HEM base address to hardware */
+		if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+			ret = -ENODEV;
+			dev_err(dev, "set HEM base address to HW failed!\n");
+			goto err_alloc_hem_buf;
+		}
+	} else if (hop_num == 2) {
+		*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
+	}
+
+	++table->hem[hem_idx]->refcount;
+	goto out;
+
+err_alloc_hem_buf:
+	if (bt_l1_allocated) {
+		dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
+				  table->bt_l1_dma_addr[bt_l1_idx]);
+		table->bt_l1[bt_l1_idx] = NULL;
+	}
+
+err_dma_alloc_l1:
+	if (bt_l0_allocated) {
+		dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
+				  table->bt_l0_dma_addr[bt_l0_idx]);
+		table->bt_l0[bt_l0_idx] = NULL;
+	}
+
+out:
+	mutex_unlock(&table->mutex);
+	return ret;
+}
+
 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
 		       struct hns_roce_hem_table *table, unsigned long obj)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int ret = 0;
 	unsigned long i;
 
+	if (hns_roce_check_whether_mhop(hr_dev, table->type))
+		return hns_roce_table_mhop_get(hr_dev, table, obj);
+
 	i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
 	     table->obj_size);
 
@@ -228,6 +553,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
 
 	table->hem[i] = hns_roce_alloc_hem(hr_dev,
 				       HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+				       HNS_ROCE_HEM_ALLOC_SIZE,
 				       (table->lowmem ? GFP_KERNEL :
 					GFP_HIGHUSER) | __GFP_NOWARN);
 	if (!table->hem[i]) {
@@ -237,6 +563,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
 
 	/* Set HEM base address(128K/page, pa) to Hardware */
 	if (hns_roce_set_hem(hr_dev, table, obj)) {
+		hns_roce_free_hem(hr_dev, table->hem[i]);
+		table->hem[i] = NULL;
 		ret = -ENODEV;
 		dev_err(dev, "set HEM base address to HW failed.\n");
 		goto out;
@@ -248,12 +576,131 @@ out:
 	return ret;
 }
 
+static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+				    struct hns_roce_hem_table *table,
+				    unsigned long obj,
+				    int check_refcount)
+{
+	struct device *dev = hr_dev->dev;
+	struct hns_roce_hem_mhop mhop;
+	unsigned long mhop_obj = obj;
+	u32 bt_chunk_size;
+	u32 chunk_ba_num;
+	u32 hop_num;
+	u32 start_idx;
+	u32 bt_num;
+	u64 hem_idx;
+	u64 bt_l1_idx = 0;
+	int ret;
+
+	ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+	if (ret)
+		return;
+
+	bt_chunk_size = mhop.bt_chunk_size;
+	hop_num = mhop.hop_num;
+	chunk_ba_num = bt_chunk_size / 8;
+
+	bt_num = hns_roce_get_bt_num(table->type, hop_num);
+	switch (bt_num) {
+	case 3:
+		hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+			  mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
+		bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+		break;
+	case 2:
+		hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+		break;
+	case 1:
+		hem_idx = mhop.l0_idx;
+		break;
+	default:
+		dev_err(dev, "Table %d not support hop_num = %d!\n",
+			     table->type, hop_num);
+		return;
+	}
+
+	mutex_lock(&table->mutex);
+
+	if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
+		mutex_unlock(&table->mutex);
+		return;
+	}
+
+	if (table->type < HEM_TYPE_MTT && hop_num == 1) {
+		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+			dev_warn(dev, "Clear HEM base address failed.\n");
+	} else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
+		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
+			dev_warn(dev, "Clear HEM base address failed.\n");
+	} else if (table->type < HEM_TYPE_MTT &&
+		   hop_num == HNS_ROCE_HOP_NUM_0) {
+		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+			dev_warn(dev, "Clear HEM base address failed.\n");
+	}
+
+	/*
+	 * free buffer space chunk for QPC/MTPT/CQC/SRQC.
+	 * free bt space chunk for MTT/CQE.
+	 */
+	hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
+	table->hem[hem_idx] = NULL;
+
+	if (check_whether_bt_num_2(table->type, hop_num)) {
+		start_idx = mhop.l0_idx * chunk_ba_num;
+		if (hns_roce_check_hem_null(table->hem, start_idx,
+					    chunk_ba_num)) {
+			if (table->type < HEM_TYPE_MTT &&
+			    hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+				dev_warn(dev, "Clear HEM base address failed.\n");
+
+			dma_free_coherent(dev, bt_chunk_size,
+					  table->bt_l0[mhop.l0_idx],
+					  table->bt_l0_dma_addr[mhop.l0_idx]);
+			table->bt_l0[mhop.l0_idx] = NULL;
+		}
+	} else if (check_whether_bt_num_3(table->type, hop_num)) {
+		start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+			    mhop.l1_idx * chunk_ba_num;
+		if (hns_roce_check_hem_null(table->hem, start_idx,
+					    chunk_ba_num)) {
+			if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+				dev_warn(dev, "Clear HEM base address failed.\n");
+
+			dma_free_coherent(dev, bt_chunk_size,
+					  table->bt_l1[bt_l1_idx],
+					  table->bt_l1_dma_addr[bt_l1_idx]);
+			table->bt_l1[bt_l1_idx] = NULL;
+
+			start_idx = mhop.l0_idx * chunk_ba_num;
+			if (hns_roce_check_bt_null(table->bt_l1, start_idx,
+						   chunk_ba_num)) {
+				if (hr_dev->hw->clear_hem(hr_dev, table, obj,
+							  0))
+					dev_warn(dev, "Clear HEM base address failed.\n");
+
+				dma_free_coherent(dev, bt_chunk_size,
+					    table->bt_l0[mhop.l0_idx],
+					    table->bt_l0_dma_addr[mhop.l0_idx]);
+				table->bt_l0[mhop.l0_idx] = NULL;
+			}
+		}
+	}
+
+	mutex_unlock(&table->mutex);
+}
+
 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
 			struct hns_roce_hem_table *table, unsigned long obj)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	unsigned long i;
 
+	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+		hns_roce_table_mhop_put(hr_dev, table, obj, 1);
+		return;
+	}
+
 	i = (obj & (table->num_obj - 1)) /
 	    (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
 
@@ -261,7 +708,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
 
 	if (--table->hem[i]->refcount == 0) {
 		/* Clear HEM base address */
-		if (hr_dev->hw->clear_hem(hr_dev, table, obj))
+		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
 			dev_warn(dev, "Clear HEM base address failed.\n");
 
 		hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -271,23 +718,46 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
 	mutex_unlock(&table->mutex);
 }
 
-void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
-			  dma_addr_t *dma_handle)
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+			  struct hns_roce_hem_table *table,
+			  unsigned long obj, dma_addr_t *dma_handle)
 {
 	struct hns_roce_hem_chunk *chunk;
-	unsigned long idx;
-	int i;
-	int offset, dma_offset;
+	struct hns_roce_hem_mhop mhop;
 	struct hns_roce_hem *hem;
 	struct page *page = NULL;
+	unsigned long mhop_obj = obj;
+	unsigned long idx;
+	int offset, dma_offset;
+	int i, j;
+	u32 hem_idx = 0;
 
 	if (!table->lowmem)
 		return NULL;
 
 	mutex_lock(&table->mutex);
-	idx = (obj & (table->num_obj - 1)) * table->obj_size;
-	hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
-	dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
+
+	if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
+		idx = (obj & (table->num_obj - 1)) * table->obj_size;
+		hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
+		dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
+	} else {
+		hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+		/* mtt mhop */
+		i = mhop.l0_idx;
+		j = mhop.l1_idx;
+		if (mhop.hop_num == 2)
+			hem_idx = i * (mhop.bt_chunk_size / 8) + j;
+		else if (mhop.hop_num == 1 ||
+			 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
+			hem_idx = i;
+
+		hem = table->hem[hem_idx];
+		dma_offset = offset = (obj & (table->num_obj - 1)) *
+				       table->obj_size % mhop.bt_chunk_size;
+		if (mhop.hop_num == 2)
+			dma_offset = offset = 0;
+	}
 
 	if (!hem)
 		goto out;
@@ -314,14 +784,21 @@ out:
 	mutex_unlock(&table->mutex);
 	return page ? lowmem_page_address(page) + offset : NULL;
 }
+EXPORT_SYMBOL_GPL(hns_roce_table_find);
 
 int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
 			     struct hns_roce_hem_table *table,
 			     unsigned long start, unsigned long end)
 {
+	struct hns_roce_hem_mhop mhop;
 	unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
-	unsigned long i = 0;
-	int ret = 0;
+	unsigned long i;
+	int ret;
+
+	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+		hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+		inc = mhop.bt_chunk_size / table->obj_size;
+	}
 
 	/* Allocate MTT entry memory according to chunk(128K) */
 	for (i = start; i <= end; i += inc) {
@@ -344,10 +821,17 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_hem_table *table,
 			      unsigned long start, unsigned long end)
 {
+	struct hns_roce_hem_mhop mhop;
+	unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
 	unsigned long i;
 
+	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+		hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+		inc = mhop.bt_chunk_size / table->obj_size;
+	}
+
 	for (i = start; i <= end;
-		i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
+		i += inc)
 		hns_roce_table_put(hr_dev, table, i);
 }
 
@@ -356,15 +840,119 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
 			    unsigned long obj_size, unsigned long nobj,
 			    int use_lowmem)
 {
+	struct device *dev = hr_dev->dev;
 	unsigned long obj_per_chunk;
 	unsigned long num_hem;
 
-	obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
-	num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+	if (!hns_roce_check_whether_mhop(hr_dev, type)) {
+		obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
+		num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+
+		table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
+		if (!table->hem)
+			return -ENOMEM;
+	} else {
+		unsigned long buf_chunk_size;
+		unsigned long bt_chunk_size;
+		unsigned long bt_chunk_num;
+		unsigned long num_bt_l0 = 0;
+		u32 hop_num;
+
+		switch (type) {
+		case HEM_TYPE_QPC:
+			buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+					+ PAGE_SHIFT);
+			bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+					+ PAGE_SHIFT);
+			num_bt_l0 = hr_dev->caps.qpc_bt_num;
+			hop_num = hr_dev->caps.qpc_hop_num;
+			break;
+		case HEM_TYPE_MTPT:
+			buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+					+ PAGE_SHIFT);
+			bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+					+ PAGE_SHIFT);
+			num_bt_l0 = hr_dev->caps.mpt_bt_num;
+			hop_num = hr_dev->caps.mpt_hop_num;
+			break;
+		case HEM_TYPE_CQC:
+			buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+					+ PAGE_SHIFT);
+			bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+					+ PAGE_SHIFT);
+			num_bt_l0 = hr_dev->caps.cqc_bt_num;
+			hop_num = hr_dev->caps.cqc_hop_num;
+			break;
+		case HEM_TYPE_SRQC:
+			buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+					+ PAGE_SHIFT);
+			bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+					+ PAGE_SHIFT);
+			num_bt_l0 = hr_dev->caps.srqc_bt_num;
+			hop_num = hr_dev->caps.srqc_hop_num;
+			break;
+		case HEM_TYPE_MTT:
+			buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+					+ PAGE_SHIFT);
+			bt_chunk_size = buf_chunk_size;
+			hop_num = hr_dev->caps.mtt_hop_num;
+			break;
+		case HEM_TYPE_CQE:
+			buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+					+ PAGE_SHIFT);
+			bt_chunk_size = buf_chunk_size;
+			hop_num = hr_dev->caps.cqe_hop_num;
+			break;
+		default:
+			dev_err(dev,
+			  "Table %d not support to init hem table here!\n",
+			  type);
+			return -EINVAL;
+		}
+		obj_per_chunk = buf_chunk_size / obj_size;
+		num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+		bt_chunk_num = bt_chunk_size / 8;
+		if (table->type >= HEM_TYPE_MTT)
+			num_bt_l0 = bt_chunk_num;
+
+		table->hem = kcalloc(num_hem, sizeof(*table->hem),
+					 GFP_KERNEL);
+		if (!table->hem)
+			goto err_kcalloc_hem_buf;
+
+		if (check_whether_bt_num_3(table->type, hop_num)) {
+			unsigned long num_bt_l1;
+
+			num_bt_l1 = (num_hem + bt_chunk_num - 1) /
+					     bt_chunk_num;
+			table->bt_l1 = kcalloc(num_bt_l1,
+					       sizeof(*table->bt_l1),
+					       GFP_KERNEL);
+			if (!table->bt_l1)
+				goto err_kcalloc_bt_l1;
+
+			table->bt_l1_dma_addr = kcalloc(num_bt_l1,
+						 sizeof(*table->bt_l1_dma_addr),
+						 GFP_KERNEL);
+
+			if (!table->bt_l1_dma_addr)
+				goto err_kcalloc_l1_dma;
+		}
 
-	table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
-	if (!table->hem)
-		return -ENOMEM;
+		if (check_whether_bt_num_2(table->type, hop_num) ||
+			check_whether_bt_num_3(table->type, hop_num)) {
+			table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
+					       GFP_KERNEL);
+			if (!table->bt_l0)
+				goto err_kcalloc_bt_l0;
+
+			table->bt_l0_dma_addr = kcalloc(num_bt_l0,
+						 sizeof(*table->bt_l0_dma_addr),
+						 GFP_KERNEL);
+			if (!table->bt_l0_dma_addr)
+				goto err_kcalloc_l0_dma;
+		}
+	}
 
 	table->type = type;
 	table->num_hem = num_hem;
@@ -374,18 +962,72 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
 	mutex_init(&table->mutex);
 
 	return 0;
+
+err_kcalloc_l0_dma:
+	kfree(table->bt_l0);
+	table->bt_l0 = NULL;
+
+err_kcalloc_bt_l0:
+	kfree(table->bt_l1_dma_addr);
+	table->bt_l1_dma_addr = NULL;
+
+err_kcalloc_l1_dma:
+	kfree(table->bt_l1);
+	table->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+	kfree(table->hem);
+	table->hem = NULL;
+
+err_kcalloc_hem_buf:
+	return -ENOMEM;
+}
+
+static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
+					    struct hns_roce_hem_table *table)
+{
+	struct hns_roce_hem_mhop mhop;
+	u32 buf_chunk_size;
+	int i;
+	u64 obj;
+
+	hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+	buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
+					mhop.bt_chunk_size;
+
+	for (i = 0; i < table->num_hem; ++i) {
+		obj = i * buf_chunk_size / table->obj_size;
+		if (table->hem[i])
+			hns_roce_table_mhop_put(hr_dev, table, obj, 0);
+	}
+
+	kfree(table->hem);
+	table->hem = NULL;
+	kfree(table->bt_l1);
+	table->bt_l1 = NULL;
+	kfree(table->bt_l1_dma_addr);
+	table->bt_l1_dma_addr = NULL;
+	kfree(table->bt_l0);
+	table->bt_l0 = NULL;
+	kfree(table->bt_l0_dma_addr);
+	table->bt_l0_dma_addr = NULL;
 }
 
 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
 				struct hns_roce_hem_table *table)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	unsigned long i;
 
+	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+		hns_roce_cleanup_mhop_hem_table(hr_dev, table);
+		return;
+	}
+
 	for (i = 0; i < table->num_hem; ++i)
 		if (table->hem[i]) {
 			if (hr_dev->hw->clear_hem(hr_dev, table,
-			    i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
+			    i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size, 0))
 				dev_err(dev, "Clear HEM base address failed.\n");
 
 			hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -401,4 +1043,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+		hns_roce_cleanup_hem_table(hr_dev,
+					   &hr_dev->mr_table.mtt_cqe_table);
 }

+ 30 - 2
drivers/infiniband/hw/hns/hns_roce_hem.h

@@ -47,6 +47,7 @@ enum {
 
 	 /* UNMAP HEM */
 	HEM_TYPE_MTT,
+	HEM_TYPE_CQE,
 	HEM_TYPE_IRRL,
 };
 
@@ -54,6 +55,18 @@ enum {
 	 ((256 - sizeof(struct list_head) - 2 * sizeof(int)) /	 \
 	 (sizeof(struct scatterlist)))
 
+#define check_whether_bt_num_3(type, hop_num) \
+	(type < HEM_TYPE_MTT && hop_num == 2)
+
+#define check_whether_bt_num_2(type, hop_num) \
+	((type < HEM_TYPE_MTT && hop_num == 1) || \
+	(type >= HEM_TYPE_MTT && hop_num == 2))
+
+#define check_whether_bt_num_1(type, hop_num) \
+	((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
+	(type >= HEM_TYPE_MTT && hop_num == 1) || \
+	(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
+
 enum {
 	 HNS_ROCE_HEM_PAGE_SHIFT = 12,
 	 HNS_ROCE_HEM_PAGE_SIZE  = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
@@ -77,12 +90,23 @@ struct hns_roce_hem_iter {
 	int				 page_idx;
 };
 
+struct hns_roce_hem_mhop {
+	u32	hop_num;
+	u32	buf_chunk_size;
+	u32	bt_chunk_size;
+	u32	ba_l0_num;
+	u32	l0_idx;/* level 0 base address table index */
+	u32	l1_idx;/* level 1 base address table index */
+	u32	l2_idx;/* level 2 base address table index */
+};
+
 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
 		       struct hns_roce_hem_table *table, unsigned long obj);
 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
 			struct hns_roce_hem_table *table, unsigned long obj);
-void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+			  struct hns_roce_hem_table *table, unsigned long obj,
 			  dma_addr_t *dma_handle);
 int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
 			     struct hns_roce_hem_table *table,
@@ -97,6 +121,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
 				struct hns_roce_hem_table *table);
 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+			   struct hns_roce_hem_table *table, unsigned long *obj,
+			   struct hns_roce_hem_mhop *mhop);
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
 
 static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
 				      struct hns_roce_hem_iter *iter)
@@ -105,7 +133,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
 	iter->chunk = list_empty(&hem->chunk_list) ? NULL :
 				 list_entry(hem->chunk_list.next,
 					    struct hns_roce_hem_chunk, list);
-	 iter->page_idx = 0;
+	iter->page_idx = 0;
 }
 
 static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)

+ 481 - 118
drivers/infiniband/hw/hns/hns_roce_hw_v1.c

@@ -34,6 +34,7 @@
 #include <linux/acpi.h>
 #include <linux/etherdevice.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <rdma/ib_umem.h>
 #include "hns_roce_common.h"
 #include "hns_roce_device.h"
@@ -56,8 +57,8 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
 	rseg->len   = 0;
 }
 
-int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-			  struct ib_send_wr **bad_wr)
+static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+				 struct ib_send_wr **bad_wr)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 	struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
@@ -316,8 +317,8 @@ out:
 	return ret;
 }
 
-int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
-			  struct ib_recv_wr **bad_wr)
+static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+				 struct ib_recv_wr **bad_wr)
 {
 	int ret = 0;
 	int nreq = 0;
@@ -472,7 +473,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
 	dma_addr_t sdb_dma_addr;
 	u32 val;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	db = &priv->db_table;
 
 	/* Configure extend SDB threshold */
@@ -511,7 +512,7 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
 	dma_addr_t odb_dma_addr;
 	u32 val;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	db = &priv->db_table;
 
 	/* Configure extend ODB threshold */
@@ -547,7 +548,7 @@ static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
 	dma_addr_t odb_dma_addr;
 	int ret = 0;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	db = &priv->db_table;
 
 	db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
@@ -668,7 +669,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 	u8 port = 0;
 	u8 sl;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	free_mr = &priv->free_mr;
 
 	/* Reserved cq for loop qp */
@@ -816,7 +817,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
 	int ret;
 	int i;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	free_mr = &priv->free_mr;
 
 	for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
@@ -850,7 +851,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
 	u32 odb_evt_mod;
 	int ret = 0;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	db = &priv->db_table;
 
 	memset(db, 0, sizeof(*db));
@@ -876,7 +877,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
 	return 0;
 }
 
-void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
+static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
 {
 	struct hns_roce_recreate_lp_qp_work *lp_qp_work;
 	struct hns_roce_dev *hr_dev;
@@ -906,11 +907,13 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
 	unsigned long end =
 	  msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	free_mr = &priv->free_mr;
 
 	lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
 			     GFP_KERNEL);
+	if (!lp_qp_work)
+		return -ENOMEM;
 
 	INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
 
@@ -982,7 +985,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 	hr_dev = to_hr_dev(mr_work->ib_dev);
 	dev = &hr_dev->pdev->dev;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	free_mr = &priv->free_mr;
 	mr_free_cq = free_mr->mr_free_cq;
 
@@ -1001,6 +1004,11 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 		}
 	}
 
+	if (!ne) {
+		dev_err(dev, "Reserved loop qp is absent!\n");
+		goto free_work;
+	}
+
 	do {
 		ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
 		if (ret < 0) {
@@ -1025,7 +1033,8 @@ free_work:
 	kfree(mr_work);
 }
 
-int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
+				struct hns_roce_mr *mr)
 {
 	struct device *dev = &hr_dev->pdev->dev;
 	struct hns_roce_mr_free_work *mr_work;
@@ -1038,7 +1047,7 @@ int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
 	int npages;
 	int ret = 0;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	free_mr = &priv->free_mr;
 
 	if (mr->enabled) {
@@ -1103,7 +1112,7 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
 	struct hns_roce_v1_priv *priv;
 	struct hns_roce_db_table *db;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	db = &priv->db_table;
 
 	if (db->sdb_ext_mod) {
@@ -1133,7 +1142,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
 	struct hns_roce_raq_table *raq;
 	struct device *dev = &hr_dev->pdev->dev;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	raq = &priv->raq_table;
 
 	raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
@@ -1210,7 +1219,7 @@ static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
 	struct hns_roce_v1_priv *priv;
 	struct hns_roce_raq_table *raq;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	raq = &priv->raq_table;
 
 	dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
@@ -1244,7 +1253,7 @@ static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
 	struct hns_roce_v1_priv *priv;
 	int ret;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 
 	priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
 		HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
@@ -1286,7 +1295,7 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
 	struct device *dev = &hr_dev->pdev->dev;
 	struct hns_roce_v1_priv *priv;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 
 	dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
 		priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
@@ -1304,7 +1313,7 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
 	struct hns_roce_buf_list *tptr_buf;
 	struct hns_roce_v1_priv *priv;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	tptr_buf = &priv->tptr_table.tptr_buf;
 
 	/*
@@ -1330,7 +1339,7 @@ static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
 	struct hns_roce_buf_list *tptr_buf;
 	struct hns_roce_v1_priv *priv;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	tptr_buf = &priv->tptr_table.tptr_buf;
 
 	dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
@@ -1344,7 +1353,7 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
 	struct hns_roce_v1_priv *priv;
 	int ret = 0;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	free_mr = &priv->free_mr;
 
 	free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
@@ -1368,7 +1377,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
 	struct hns_roce_free_mr *free_mr;
 	struct hns_roce_v1_priv *priv;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	free_mr = &priv->free_mr;
 
 	flush_workqueue(free_mr->free_mr_wq);
@@ -1383,7 +1392,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
  * @enable: true -- drop reset, false -- reset
  * return 0 - success , negative --fail
  */
-int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
+static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
 {
 	struct device_node *dsaf_node;
 	struct device *dev = &hr_dev->pdev->dev;
@@ -1432,7 +1441,7 @@ static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
 	struct hns_roce_v1_priv *priv;
 	struct hns_roce_des_qp *des_qp;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	des_qp = &priv->des_qp;
 
 	des_qp->requeue_flag = 1;
@@ -1450,7 +1459,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
 	struct hns_roce_v1_priv *priv;
 	struct hns_roce_des_qp *des_qp;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	des_qp = &priv->des_qp;
 
 	des_qp->requeue_flag = 0;
@@ -1458,7 +1467,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
 	destroy_workqueue(des_qp->qp_wq);
 }
 
-void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
+static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
 {
 	int i = 0;
 	struct hns_roce_caps *caps = &hr_dev->caps;
@@ -1474,7 +1483,9 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
 
 	caps->num_qps		= HNS_ROCE_V1_MAX_QP_NUM;
 	caps->max_wqes		= HNS_ROCE_V1_MAX_WQE_NUM;
+	caps->min_wqes		= HNS_ROCE_MIN_WQE_NUM;
 	caps->num_cqs		= HNS_ROCE_V1_MAX_CQ_NUM;
+	caps->min_cqes		= HNS_ROCE_MIN_CQE_NUM;
 	caps->max_cqes		= HNS_ROCE_V1_MAX_CQE_NUM;
 	caps->max_sq_sg		= HNS_ROCE_V1_SG_NUM;
 	caps->max_rq_sg		= HNS_ROCE_V1_SG_NUM;
@@ -1524,9 +1535,11 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
 	caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
 							 ROCEE_ACK_DELAY_REG));
 	caps->max_mtu = IB_MTU_2048;
+
+	return 0;
 }
 
-int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
+static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
 {
 	int ret;
 	u32 val;
@@ -1605,7 +1618,7 @@ error_failed_raq_init:
 	return ret;
 }
 
-void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
+static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
 {
 	hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
 	hns_roce_free_mr_free(hr_dev);
@@ -1616,8 +1629,81 @@ void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
 	hns_roce_db_free(hr_dev);
 }
 
-void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
-			 union ib_gid *gid)
+static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
+{
+	u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
+
+	return (!!(status & (1 << HCR_GO_BIT)));
+}
+
+static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+				 u64 out_param, u32 in_modifier, u8 op_modifier,
+				 u16 op, u16 token, int event)
+{
+	u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
+	unsigned long end;
+	u32 val = 0;
+
+	end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
+	while (hns_roce_v1_cmd_pending(hr_dev)) {
+		if (time_after(jiffies, end)) {
+			dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
+				(int)jiffies, (int)end);
+			return -EAGAIN;
+		}
+		cond_resched();
+	}
+
+	roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
+		       op);
+	roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
+		       ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
+	roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
+	roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
+	roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
+		       ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
+
+	__raw_writeq(cpu_to_le64(in_param), hcr + 0);
+	__raw_writeq(cpu_to_le64(out_param), hcr + 2);
+	__raw_writel(cpu_to_le32(in_modifier), hcr + 4);
+	/* Memory barrier */
+	wmb();
+
+	__raw_writel(cpu_to_le32(val), hcr + 5);
+
+	mmiowb();
+
+	return 0;
+}
+
+static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
+				unsigned long timeout)
+{
+	u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
+	unsigned long end = 0;
+	u32 status = 0;
+
+	end = msecs_to_jiffies(timeout) + jiffies;
+	while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
+		cond_resched();
+
+	if (hns_roce_v1_cmd_pending(hr_dev)) {
+		dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
+		return -ETIMEDOUT;
+	}
+
+	status = le32_to_cpu((__force __be32)
+			      __raw_readl(hcr + HCR_STATUS_OFFSET));
+	if ((status & STATUS_MASK) != 0x1) {
+		dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+				int gid_index, union ib_gid *gid)
 {
 	u32 *p = NULL;
 	u8 gid_idx = 0;
@@ -1641,7 +1727,8 @@ void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
 		       (HNS_ROCE_V1_GID_NUM * gid_idx));
 }
 
-void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
+static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
+			       u8 *addr)
 {
 	u32 reg_smac_l;
 	u16 reg_smac_h;
@@ -1654,8 +1741,13 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
 	 * because of smac not equal to dmac.
 	 * We Need to release and create reserved qp again.
 	 */
-	if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
-		dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
+	if (hr_dev->hw->dereg_mr) {
+		int ret;
+
+		ret = hns_roce_v1_recreate_lp_qp(hr_dev);
+		if (ret && ret != -ETIMEDOUT)
+			return ret;
+	}
 
 	p = (u32 *)(&addr[0]);
 	reg_smac_l = *p;
@@ -1670,10 +1762,12 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
 		       ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
 	roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
 		   val);
+
+	return 0;
 }
 
-void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
-			 enum ib_mtu mtu)
+static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
+				enum ib_mtu mtu)
 {
 	u32 val;
 
@@ -1685,8 +1779,8 @@ void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
 		   val);
 }
 
-int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
-			   unsigned long mtpt_idx)
+static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+				  unsigned long mtpt_idx)
 {
 	struct hns_roce_v1_mpt_entry *mpt_entry;
 	struct scatterlist *sg;
@@ -1858,7 +1952,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
 	return get_sw_cqe(hr_cq, hr_cq->cons_index);
 }
 
-void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
+static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
 {
 	u32 doorbell[2];
 
@@ -1931,9 +2025,10 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
 	spin_unlock_irq(&hr_cq->lock);
 }
 
-void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
-			   struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
-			   dma_addr_t dma_handle, int nent, u32 vector)
+static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
+				  struct hns_roce_cq *hr_cq, void *mb_buf,
+				  u64 *mtts, dma_addr_t dma_handle, int nent,
+				  u32 vector)
 {
 	struct hns_roce_cq_context *cq_context = NULL;
 	struct hns_roce_buf_list *tptr_buf;
@@ -1941,7 +2036,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
 	dma_addr_t tptr_dma_addr;
 	int offset;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	tptr_buf = &priv->tptr_table.tptr_buf;
 
 	cq_context = mb_buf;
@@ -2018,7 +2113,8 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
 	cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
 }
 
-int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
+				     enum ib_cq_notify_flags flags)
 {
 	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
 	u32 notification_flag;
@@ -2279,8 +2375,9 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 		return ret;
 }
 
-int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
-		struct hns_roce_hem_table *table, int obj)
+static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
+				 struct hns_roce_hem_table *table, int obj,
+				 int step_idx)
 {
 	struct device *dev = &hr_dev->pdev->dev;
 	struct hns_roce_v1_priv *priv;
@@ -2289,7 +2386,7 @@ int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
 	void __iomem *bt_cmd;
 	u64 bt_ba = 0;
 
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 
 	switch (table->type) {
 	case HEM_TYPE_QPC:
@@ -2441,14 +2538,14 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 	int rq_pa_start;
 	u32 reg_val;
 	u64 *mtts;
-	u32 *addr;
+	u32 __iomem *addr;
 
 	context = kzalloc(sizeof(*context), GFP_KERNEL);
 	if (!context)
 		return -ENOMEM;
 
 	/* Search QP buf's MTTs */
-	mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+	mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
 				   hr_qp->mtt.first_seg, &dma_handle);
 	if (!mtts) {
 		dev_err(dev, "qp buf pa find failed\n");
@@ -2523,8 +2620,9 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 			       QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
 
 		/* Copy context to QP1C register */
-		addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
-			hr_qp->phy_port * sizeof(*context));
+		addr = (u32 __iomem *)(hr_dev->reg_base +
+				       ROCEE_QP1C_CFG0_0_REG +
+				       hr_qp->phy_port * sizeof(*context));
 
 		writel(context->qp1c_bytes_4, addr);
 		writel(context->sq_rq_bt_l, addr + 1);
@@ -2595,7 +2693,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 		return -ENOMEM;
 
 	/* Search qp buf's mtts */
-	mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+	mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
 				   hr_qp->mtt.first_seg, &dma_handle);
 	if (mtts == NULL) {
 		dev_err(dev, "qp buf pa find failed\n");
@@ -2603,8 +2701,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 	}
 
 	/* Search IRRL's mtts */
-	mtts_2 = hns_roce_table_find(&hr_dev->qp_table.irrl_table, hr_qp->qpn,
-				     &dma_handle_2);
+	mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+				     hr_qp->qpn, &dma_handle_2);
 	if (mtts_2 == NULL) {
 		dev_err(dev, "qp irrl_table find failed\n");
 		goto out;
@@ -2800,10 +2898,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 			       QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
 			       ilog2((unsigned int)attr->max_dest_rd_atomic));
 
-		roce_set_field(context->qpc_bytes_36,
-			       QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
-			       QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
-			       attr->dest_qp_num);
+		if (attr_mask & IB_QP_DEST_QPN)
+			roce_set_field(context->qpc_bytes_36,
+				       QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
+				       QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
+				       attr->dest_qp_num);
 
 		/* Configure GID index */
 		port_num = rdma_ah_get_port_num(&attr->ah_attr);
@@ -3143,7 +3242,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 
 		if (ibqp->uobject) {
 			hr_qp->rq.db_reg_l = hr_dev->reg_base +
-				     ROCEE_DB_OTHERS_L_0_REG +
+				     hr_dev->odb_offset +
 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
 		}
 
@@ -3177,9 +3276,10 @@ out:
 	return ret;
 }
 
-int hns_roce_v1_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
-			  int attr_mask, enum ib_qp_state cur_state,
-			  enum ib_qp_state new_state)
+static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
+				 const struct ib_qp_attr *attr, int attr_mask,
+				 enum ib_qp_state cur_state,
+				 enum ib_qp_state new_state)
 {
 
 	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
@@ -3270,6 +3370,7 @@ static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 	qp_attr->path_mtu	= IB_MTU_256;
 	qp_attr->path_mig_state	= IB_MIG_ARMED;
 	qp_attr->qkey		= QKEY_VAL;
+	qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
 	qp_attr->rq_psn		= 0;
 	qp_attr->sq_psn		= 0;
 	qp_attr->dest_qp_num	= 1;
@@ -3351,6 +3452,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 					       QP_CONTEXT_QPC_BYTES_48_MTU_M,
 					       QP_CONTEXT_QPC_BYTES_48_MTU_S);
 	qp_attr->path_mig_state = IB_MIG_ARMED;
+	qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
 	if (hr_qp->ibqp.qp_type == IB_QPT_UD)
 		qp_attr->qkey = QKEY_VAL;
 
@@ -3406,10 +3508,10 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 			      QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
 	qp_attr->port_num = hr_qp->port + 1;
 	qp_attr->sq_draining = 0;
-	qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
+	qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
 				 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
 				 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
-	qp_attr->max_dest_rd_atomic = roce_get_field(context->qpc_bytes_32,
+	qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
 				 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
 				 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
 	qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
@@ -3444,8 +3546,9 @@ out:
 	return ret;
 }
 
-int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
-			 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+				int qp_attr_mask,
+				struct ib_qp_init_attr *qp_init_attr)
 {
 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 
@@ -3454,6 +3557,53 @@ int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 		hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
 }
 
+static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
+				      u32 *old_send, u32 *old_retry,
+				      u32 *tsp_st, u32 *success_flags)
+{
+	u32 sdb_retry_cnt;
+	u32 sdb_send_ptr;
+	u32 cur_cnt, old_cnt;
+	u32 send_ptr;
+
+	sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+	sdb_retry_cnt =	roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
+	cur_cnt = roce_get_field(sdb_send_ptr,
+				 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+				 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+		  roce_get_field(sdb_retry_cnt,
+				 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+				 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+	if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
+		old_cnt = roce_get_field(*old_send,
+					 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+					 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+			  roce_get_field(*old_retry,
+					 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+					 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+		if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+			*success_flags = 1;
+	} else {
+		old_cnt = roce_get_field(*old_send,
+					 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+					 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
+		if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
+			*success_flags = 1;
+		} else {
+			send_ptr = roce_get_field(*old_send,
+					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+				   roce_get_field(sdb_retry_cnt,
+					    ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+					    ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+			roce_set_field(*old_send,
+				       ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+				       ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
+				       send_ptr);
+		}
+	}
+}
+
 static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
 				      struct hns_roce_qp *hr_qp,
 				      u32 sdb_issue_ptr,
@@ -3461,12 +3611,10 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
 				      u32 *wait_stage)
 {
 	struct device *dev = &hr_dev->pdev->dev;
-	u32 sdb_retry_cnt, old_retry;
 	u32 sdb_send_ptr, old_send;
 	u32 success_flags = 0;
-	u32 cur_cnt, old_cnt;
 	unsigned long end;
-	u32 send_ptr;
+	u32 old_retry;
 	u32 inv_cnt;
 	u32 tsp_st;
 
@@ -3524,47 +3672,9 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
 
 				msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
 
-				sdb_send_ptr = roce_read(hr_dev,
-							ROCEE_SDB_SEND_PTR_REG);
-				sdb_retry_cnt =	roce_read(hr_dev,
-						       ROCEE_SDB_RETRY_CNT_REG);
-				cur_cnt = roce_get_field(sdb_send_ptr,
-					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
-					roce_get_field(sdb_retry_cnt,
-					ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
-					ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
-				if (!roce_get_bit(tsp_st,
-					ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
-					old_cnt = roce_get_field(old_send,
-					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
-					roce_get_field(old_retry,
-					ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
-					ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
-					if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
-						success_flags = 1;
-				} else {
-					old_cnt = roce_get_field(old_send,
-					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
-					if (cur_cnt - old_cnt >
-					    SDB_ST_CMP_VAL) {
-						success_flags = 1;
-					} else {
-						send_ptr =
-							roce_get_field(old_send,
-					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
-					    roce_get_field(sdb_retry_cnt,
-					    ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
-					    ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
-					    roce_set_field(old_send,
-					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
-						send_ptr);
-					}
-				}
+				hns_roce_check_sdb_status(hr_dev, &old_send,
+							  &old_retry, &tsp_st,
+							  &success_flags);
 			} while (!success_flags);
 		}
 
@@ -3664,7 +3774,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
 	qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
 	hr_dev = to_hr_dev(qp_work_entry->ib_dev);
 	dev = &hr_dev->pdev->dev;
-	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 	hr_qp = qp_work_entry->qp;
 	qpn = hr_qp->qpn;
 
@@ -3781,7 +3891,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
 		qp_work->sdb_inv_cnt	= qp_work_entry.sdb_inv_cnt;
 		qp_work->sche_cnt	= qp_work_entry.sche_cnt;
 
-		priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+		priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 		queue_work(priv->des_qp.qp_wq, &qp_work->work);
 		dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
 	}
@@ -3789,7 +3899,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
 	return 0;
 }
 
-int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
+static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
 	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
@@ -3841,13 +3951,13 @@ int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
 	return ret;
 }
 
-struct hns_roce_v1_priv hr_v1_priv;
-
-struct hns_roce_hw hns_roce_hw_v1 = {
+static const struct hns_roce_hw hns_roce_hw_v1 = {
 	.reset = hns_roce_v1_reset,
 	.hw_profile = hns_roce_v1_profile,
 	.hw_init = hns_roce_v1_init,
 	.hw_exit = hns_roce_v1_exit,
+	.post_mbox = hns_roce_v1_post_mbox,
+	.chk_mbox = hns_roce_v1_chk_mbox,
 	.set_gid = hns_roce_v1_set_gid,
 	.set_mac = hns_roce_v1_set_mac,
 	.set_mtu = hns_roce_v1_set_mtu,
@@ -3863,5 +3973,258 @@ struct hns_roce_hw hns_roce_hw_v1 = {
 	.poll_cq = hns_roce_v1_poll_cq,
 	.dereg_mr = hns_roce_v1_dereg_mr,
 	.destroy_cq = hns_roce_v1_destroy_cq,
-	.priv = &hr_v1_priv,
 };
+
+static const struct of_device_id hns_roce_of_match[] = {
+	{ .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
+	{},
+};
+MODULE_DEVICE_TABLE(of, hns_roce_of_match);
+
+static const struct acpi_device_id hns_roce_acpi_match[] = {
+	{ "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
+
+static int hns_roce_node_match(struct device *dev, void *fwnode)
+{
+	return dev->fwnode == fwnode;
+}
+
+static struct
+platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
+{
+	struct device *dev;
+
+	/* get the 'device' corresponding to the matching 'fwnode' */
+	dev = bus_find_device(&platform_bus_type, NULL,
+			      fwnode, hns_roce_node_match);
+	/* get the platform device */
+	return dev ? to_platform_device(dev) : NULL;
+}
+
+static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct platform_device *pdev = NULL;
+	struct net_device *netdev = NULL;
+	struct device_node *net_node;
+	struct resource *res;
+	int port_cnt = 0;
+	u8 phy_port;
+	int ret;
+	int i;
+
+	/* check if we are compatible with the underlying SoC */
+	if (dev_of_node(dev)) {
+		const struct of_device_id *of_id;
+
+		of_id = of_match_node(hns_roce_of_match, dev->of_node);
+		if (!of_id) {
+			dev_err(dev, "device is not compatible!\n");
+			return -ENXIO;
+		}
+		hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
+		if (!hr_dev->hw) {
+			dev_err(dev, "couldn't get H/W specific DT data!\n");
+			return -ENXIO;
+		}
+	} else if (is_acpi_device_node(dev->fwnode)) {
+		const struct acpi_device_id *acpi_id;
+
+		acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
+		if (!acpi_id) {
+			dev_err(dev, "device is not compatible!\n");
+			return -ENXIO;
+		}
+		hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
+		if (!hr_dev->hw) {
+			dev_err(dev, "couldn't get H/W specific ACPI data!\n");
+			return -ENXIO;
+		}
+	} else {
+		dev_err(dev, "can't read compatibility data from DT or ACPI\n");
+		return -ENXIO;
+	}
+
+	/* get the mapped register base address */
+	res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "memory resource not found!\n");
+		return -EINVAL;
+	}
+	hr_dev->reg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hr_dev->reg_base))
+		return PTR_ERR(hr_dev->reg_base);
+
+	/* read the node_guid of IB device from the DT or ACPI */
+	ret = device_property_read_u8_array(dev, "node-guid",
+					    (u8 *)&hr_dev->ib_dev.node_guid,
+					    GUID_LEN);
+	if (ret) {
+		dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
+		return ret;
+	}
+
+	/* get the RoCE associated ethernet ports or netdevices */
+	for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
+		if (dev_of_node(dev)) {
+			net_node = of_parse_phandle(dev->of_node, "eth-handle",
+						    i);
+			if (!net_node)
+				continue;
+			pdev = of_find_device_by_node(net_node);
+		} else if (is_acpi_device_node(dev->fwnode)) {
+			struct acpi_reference_args args;
+			struct fwnode_handle *fwnode;
+
+			ret = acpi_node_get_property_reference(dev->fwnode,
+							       "eth-handle",
+							       i, &args);
+			if (ret)
+				continue;
+			fwnode = acpi_fwnode_handle(args.adev);
+			pdev = hns_roce_find_pdev(fwnode);
+		} else {
+			dev_err(dev, "cannot read data from DT or ACPI\n");
+			return -ENXIO;
+		}
+
+		if (pdev) {
+			netdev = platform_get_drvdata(pdev);
+			phy_port = (u8)i;
+			if (netdev) {
+				hr_dev->iboe.netdevs[port_cnt] = netdev;
+				hr_dev->iboe.phy_port[port_cnt] = phy_port;
+			} else {
+				dev_err(dev, "no netdev found with pdev %s\n",
+					pdev->name);
+				return -ENODEV;
+			}
+			port_cnt++;
+		}
+	}
+
+	if (port_cnt == 0) {
+		dev_err(dev, "unable to get eth-handle for available ports!\n");
+		return -EINVAL;
+	}
+
+	hr_dev->caps.num_ports = port_cnt;
+
+	/* cmd issue mode: 0 is poll, 1 is event */
+	hr_dev->cmd_mod = 1;
+	hr_dev->loop_idc = 0;
+	hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+	hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
+
+	/* read the interrupt names from the DT or ACPI */
+	ret = device_property_read_string_array(dev, "interrupt-names",
+						hr_dev->irq_names,
+						HNS_ROCE_MAX_IRQ_NUM);
+	if (ret < 0) {
+		dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
+		return ret;
+	}
+
+	/* fetch the interrupt numbers */
+	for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+		hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
+		if (hr_dev->irq[i] <= 0) {
+			dev_err(dev, "platform get of irq[=%d] failed!\n", i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * hns_roce_probe - RoCE driver entrance
+ * @pdev: pointer to platform device
+ * Return : int
+ *
+ */
+static int hns_roce_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct hns_roce_dev *hr_dev;
+	struct device *dev = &pdev->dev;
+
+	hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+	if (!hr_dev)
+		return -ENOMEM;
+
+	hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
+	if (!hr_dev->priv) {
+		ret = -ENOMEM;
+		goto error_failed_kzalloc;
+	}
+
+	hr_dev->pdev = pdev;
+	hr_dev->dev = dev;
+	platform_set_drvdata(pdev, hr_dev);
+
+	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
+	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
+		dev_err(dev, "Not usable DMA addressing mode\n");
+		ret = -EIO;
+		goto error_failed_get_cfg;
+	}
+
+	ret = hns_roce_get_cfg(hr_dev);
+	if (ret) {
+		dev_err(dev, "Get Configuration failed!\n");
+		goto error_failed_get_cfg;
+	}
+
+	ret = hns_roce_init(hr_dev);
+	if (ret) {
+		dev_err(dev, "RoCE engine init failed!\n");
+		goto error_failed_get_cfg;
+	}
+
+	return 0;
+
+error_failed_get_cfg:
+	kfree(hr_dev->priv);
+
+error_failed_kzalloc:
+	ib_dealloc_device(&hr_dev->ib_dev);
+
+	return ret;
+}
+
+/**
+ * hns_roce_remove - remove RoCE device
+ * @pdev: pointer to platform device
+ */
+static int hns_roce_remove(struct platform_device *pdev)
+{
+	struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
+
+	hns_roce_exit(hr_dev);
+	kfree(hr_dev->priv);
+	ib_dealloc_device(&hr_dev->ib_dev);
+
+	return 0;
+}
+
+static struct platform_driver hns_roce_driver = {
+	.probe = hns_roce_probe,
+	.remove = hns_roce_remove,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = hns_roce_of_match,
+		.acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
+	},
+};
+
+module_platform_driver(hns_roce_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");

+ 5 - 0
drivers/infiniband/hw/hns/hns_roce_hw_v1.h

@@ -948,6 +948,11 @@ struct hns_roce_qp_context {
 #define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M   \
 	(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
 
+#define STATUS_MASK		0xff
+#define GO_BIT_TIMEOUT_MSECS	10000
+#define HCR_STATUS_OFFSET	0x18
+#define HCR_GO_BIT		15
+
 struct hns_roce_rq_db {
 	u32    u32_4;
 	u32    u32_8;

+ 3133 - 0
drivers/infiniband/hw/hns/hns_roce_hw_v2.c

@@ -0,0 +1,3133 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <rdma/ib_umem.h>
+
+#include "hnae3.h"
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_hw_v2.h"
+
+static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
+			    struct ib_sge *sg)
+{
+	dseg->lkey = cpu_to_le32(sg->lkey);
+	dseg->addr = cpu_to_le64(sg->addr);
+	dseg->len  = cpu_to_le32(sg->length);
+}
+
+static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+				 struct ib_send_wr **bad_wr)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+	struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
+	struct hns_roce_qp *qp = to_hr_qp(ibqp);
+	struct hns_roce_v2_wqe_data_seg *dseg;
+	struct device *dev = hr_dev->dev;
+	struct hns_roce_v2_db sq_db;
+	unsigned int sge_ind = 0;
+	unsigned int wqe_sz = 0;
+	unsigned long flags;
+	unsigned int ind;
+	void *wqe = NULL;
+	int ret = 0;
+	int nreq;
+	int i;
+
+	if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
+		dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
+		*bad_wr = NULL;
+		return -EOPNOTSUPP;
+	}
+
+	if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+		dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
+		*bad_wr = wr;
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&qp->sq.lock, flags);
+	ind = qp->sq_next_wqe;
+	sge_ind = qp->next_sge;
+
+	for (nreq = 0; wr; ++nreq, wr = wr->next) {
+		if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+			ret = -ENOMEM;
+			*bad_wr = wr;
+			goto out;
+		}
+
+		if (unlikely(wr->num_sge > qp->sq.max_gs)) {
+			dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
+				wr->num_sge, qp->sq.max_gs);
+			ret = -EINVAL;
+			*bad_wr = wr;
+			goto out;
+		}
+
+		wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
+		qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
+								      wr->wr_id;
+
+
+		rc_sq_wqe = wqe;
+		memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+		for (i = 0; i < wr->num_sge; i++)
+			rc_sq_wqe->msg_len += wr->sg_list[i].length;
+
+		rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+
+		switch (wr->opcode) {
+		case IB_WR_RDMA_READ:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_RDMA_READ);
+			rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+			rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+			break;
+		case IB_WR_RDMA_WRITE:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+			rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+			rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+			break;
+		case IB_WR_RDMA_WRITE_WITH_IMM:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
+			rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+			rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+			break;
+		case IB_WR_SEND:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_SEND);
+			break;
+		case IB_WR_SEND_WITH_INV:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
+			break;
+		case IB_WR_SEND_WITH_IMM:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+			break;
+		case IB_WR_LOCAL_INV:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+			break;
+		case IB_WR_ATOMIC_CMP_AND_SWP:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+			break;
+		case IB_WR_ATOMIC_FETCH_AND_ADD:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+			break;
+		case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+			roce_set_field(rc_sq_wqe->byte_4,
+				      V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				      V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				      HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
+			break;
+		case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+			roce_set_field(rc_sq_wqe->byte_4,
+				     V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				     V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				     HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
+			break;
+		default:
+			roce_set_field(rc_sq_wqe->byte_4,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+				       HNS_ROCE_V2_WQE_OP_MASK);
+			break;
+		}
+
+		roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S, 1);
+
+		wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+		dseg = wqe;
+		if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+			if (rc_sq_wqe->msg_len >
+				hr_dev->caps.max_sq_inline) {
+				ret = -EINVAL;
+				*bad_wr = wr;
+				dev_err(dev, "inline len(1-%d)=%d, illegal",
+					rc_sq_wqe->msg_len,
+					hr_dev->caps.max_sq_inline);
+				goto out;
+			}
+
+			for (i = 0; i < wr->num_sge; i++) {
+				memcpy(wqe, ((void *)wr->sg_list[i].addr),
+				       wr->sg_list[i].length);
+				wqe += wr->sg_list[i].length;
+				wqe_sz += wr->sg_list[i].length;
+			}
+
+			roce_set_bit(rc_sq_wqe->byte_4,
+				     V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+		} else {
+			if (wr->num_sge <= 2) {
+				for (i = 0; i < wr->num_sge; i++)
+					set_data_seg_v2(dseg + i,
+							wr->sg_list + i);
+			} else {
+				roce_set_field(rc_sq_wqe->byte_20,
+				V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+				V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+				sge_ind & (qp->sge.sge_cnt - 1));
+
+				for (i = 0; i < 2; i++)
+					set_data_seg_v2(dseg + i,
+							wr->sg_list + i);
+
+				dseg = get_send_extend_sge(qp,
+					sge_ind & (qp->sge.sge_cnt - 1));
+
+				for (i = 0; i < wr->num_sge - 2; i++) {
+					set_data_seg_v2(dseg + i,
+							wr->sg_list + 2 + i);
+					sge_ind++;
+				}
+			}
+
+			roce_set_field(rc_sq_wqe->byte_16,
+				       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+				       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
+				       wr->num_sge);
+			wqe_sz += wr->num_sge *
+				  sizeof(struct hns_roce_v2_wqe_data_seg);
+		}
+		ind++;
+	}
+
+out:
+	if (likely(nreq)) {
+		qp->sq.head += nreq;
+		/* Memory barrier */
+		wmb();
+
+		sq_db.byte_4 = 0;
+		sq_db.parameter = 0;
+
+		roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
+			       V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
+		roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
+			       V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+		roce_set_field(sq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
+			       V2_DB_PARAMETER_CONS_IDX_S,
+			       qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+		roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
+			       V2_DB_PARAMETER_SL_S, qp->sl);
+
+		hns_roce_write64_k((__be32 *)&sq_db, qp->sq.db_reg_l);
+
+		qp->sq_next_wqe = ind;
+		qp->next_sge = sge_ind;
+	}
+
+	spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+	return ret;
+}
+
+static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+				 struct ib_recv_wr **bad_wr)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+	struct hns_roce_v2_wqe_data_seg *dseg;
+	struct device *dev = hr_dev->dev;
+	struct hns_roce_v2_db rq_db;
+	unsigned long flags;
+	void *wqe = NULL;
+	int ret = 0;
+	int nreq;
+	int ind;
+	int i;
+
+	spin_lock_irqsave(&hr_qp->rq.lock, flags);
+	ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
+
+	if (hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_ERR) {
+		spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+		*bad_wr = wr;
+		return -EINVAL;
+	}
+
+	for (nreq = 0; wr; ++nreq, wr = wr->next) {
+		if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
+			hr_qp->ibqp.recv_cq)) {
+			ret = -ENOMEM;
+			*bad_wr = wr;
+			goto out;
+		}
+
+		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+			dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
+				wr->num_sge, hr_qp->rq.max_gs);
+			ret = -EINVAL;
+			*bad_wr = wr;
+			goto out;
+		}
+
+		wqe = get_recv_wqe(hr_qp, ind);
+		dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+		for (i = 0; i < wr->num_sge; i++) {
+			if (!wr->sg_list[i].length)
+				continue;
+			set_data_seg_v2(dseg, wr->sg_list + i);
+			dseg++;
+		}
+
+		if (i < hr_qp->rq.max_gs) {
+			dseg[i].lkey = cpu_to_be32(HNS_ROCE_INVALID_LKEY);
+			dseg[i].addr = 0;
+		}
+
+		hr_qp->rq.wrid[ind] = wr->wr_id;
+
+		ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+	}
+
+out:
+	if (likely(nreq)) {
+		hr_qp->rq.head += nreq;
+		/* Memory barrier */
+		wmb();
+
+		rq_db.byte_4 = 0;
+		rq_db.parameter = 0;
+
+		roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_TAG_M,
+			       V2_DB_BYTE_4_TAG_S, hr_qp->qpn);
+		roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_CMD_M,
+			       V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_RQ_DB);
+		roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
+			       V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
+
+		hns_roce_write64_k((__be32 *)&rq_db, hr_qp->rq.db_reg_l);
+	}
+	spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+
+	return ret;
+}
+
+static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
+{
+	int ntu = ring->next_to_use;
+	int ntc = ring->next_to_clean;
+	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+	return ring->desc_num - used - 1;
+}
+
+static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
+				   struct hns_roce_v2_cmq_ring *ring)
+{
+	int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
+
+	ring->desc = kzalloc(size, GFP_KERNEL);
+	if (!ring->desc)
+		return -ENOMEM;
+
+	ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
+					     DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
+		ring->desc_dma_addr = 0;
+		kfree(ring->desc);
+		ring->desc = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
+				   struct hns_roce_v2_cmq_ring *ring)
+{
+	dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
+			 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
+			 DMA_BIDIRECTIONAL);
+	kfree(ring->desc);
+}
+
+static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
+{
+	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+	struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
+					    &priv->cmq.csq : &priv->cmq.crq;
+
+	ring->flag = ring_type;
+	ring->next_to_clean = 0;
+	ring->next_to_use = 0;
+
+	return hns_roce_alloc_cmq_desc(hr_dev, ring);
+}
+
+static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
+{
+	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+	struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
+					    &priv->cmq.csq : &priv->cmq.crq;
+	dma_addr_t dma = ring->desc_dma_addr;
+
+	if (ring_type == TYPE_CSQ) {
+		roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
+		roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
+			   upper_32_bits(dma));
+		roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
+			  (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
+			   HNS_ROCE_CMQ_ENABLE);
+		roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
+		roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
+	} else {
+		roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
+		roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
+			   upper_32_bits(dma));
+		roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
+			  (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
+			   HNS_ROCE_CMQ_ENABLE);
+		roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
+		roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
+	}
+}
+
+static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+	int ret;
+
+	/* Setup the queue entries for command queue */
+	priv->cmq.csq.desc_num = 1024;
+	priv->cmq.crq.desc_num = 1024;
+
+	/* Setup the lock for command queue */
+	spin_lock_init(&priv->cmq.csq.lock);
+	spin_lock_init(&priv->cmq.crq.lock);
+
+	/* Setup Tx write back timeout */
+	priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
+
+	/* Init CSQ */
+	ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
+	if (ret) {
+		dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
+		return ret;
+	}
+
+	/* Init CRQ */
+	ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
+	if (ret) {
+		dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
+		goto err_crq;
+	}
+
+	/* Init CSQ REG */
+	hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
+
+	/* Init CRQ REG */
+	hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
+
+	return 0;
+
+err_crq:
+	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
+
+	return ret;
+}
+
+static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+
+	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
+	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
+}
+
+static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
+					  enum hns_roce_opcode_type opcode,
+					  bool is_read)
+{
+	memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
+	desc->opcode = cpu_to_le16(opcode);
+	desc->flag =
+		cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
+	if (is_read)
+		desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
+	else
+		desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
+}
+
+static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+	u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+
+	return head == priv->cmq.csq.next_to_use;
+}
+
+static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+	struct hns_roce_cmq_desc *desc;
+	u16 ntc = csq->next_to_clean;
+	u32 head;
+	int clean = 0;
+
+	desc = &csq->desc[ntc];
+	head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+	while (head != ntc) {
+		memset(desc, 0, sizeof(*desc));
+		ntc++;
+		if (ntc == csq->desc_num)
+			ntc = 0;
+		desc = &csq->desc[ntc];
+		clean++;
+	}
+	csq->next_to_clean = ntc;
+
+	return clean;
+}
+
+static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+			     struct hns_roce_cmq_desc *desc, int num)
+{
+	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+	struct hns_roce_cmq_desc *desc_to_use;
+	bool complete = false;
+	u32 timeout = 0;
+	int handle = 0;
+	u16 desc_ret;
+	int ret = 0;
+	int ntc;
+
+	spin_lock_bh(&csq->lock);
+
+	if (num > hns_roce_cmq_space(csq)) {
+		spin_unlock_bh(&csq->lock);
+		return -EBUSY;
+	}
+
+	/*
+	 * Record the location of desc in the cmq for this time
+	 * which will be use for hardware to write back
+	 */
+	ntc = csq->next_to_use;
+
+	while (handle < num) {
+		desc_to_use = &csq->desc[csq->next_to_use];
+		*desc_to_use = desc[handle];
+		dev_dbg(hr_dev->dev, "set cmq desc:\n");
+		csq->next_to_use++;
+		if (csq->next_to_use == csq->desc_num)
+			csq->next_to_use = 0;
+		handle++;
+	}
+
+	/* Write to hardware */
+	roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
+
+	/*
+	 * If the command is sync, wait for the firmware to write back,
+	 * if multi descriptors to be sent, use the first one to check
+	 */
+	if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
+		do {
+			if (hns_roce_cmq_csq_done(hr_dev))
+				break;
+			udelay(1);
+			timeout++;
+		} while (timeout < priv->cmq.tx_timeout);
+	}
+
+	if (hns_roce_cmq_csq_done(hr_dev)) {
+		complete = true;
+		handle = 0;
+		while (handle < num) {
+			/* get the result of hardware write back */
+			desc_to_use = &csq->desc[ntc];
+			desc[handle] = *desc_to_use;
+			dev_dbg(hr_dev->dev, "Get cmq desc:\n");
+			desc_ret = desc[handle].retval;
+			if (desc_ret == CMD_EXEC_SUCCESS)
+				ret = 0;
+			else
+				ret = -EIO;
+			priv->cmq.last_status = desc_ret;
+			ntc++;
+			handle++;
+			if (ntc == csq->desc_num)
+				ntc = 0;
+		}
+	}
+
+	if (!complete)
+		ret = -EAGAIN;
+
+	/* clean the command send queue */
+	handle = hns_roce_cmq_csq_clean(hr_dev);
+	if (handle != num)
+		dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
+			 handle, num);
+
+	spin_unlock_bh(&csq->lock);
+
+	return ret;
+}
+
+static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_query_version *resp;
+	struct hns_roce_cmq_desc desc;
+	int ret;
+
+	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
+	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+	if (ret)
+		return ret;
+
+	resp = (struct hns_roce_query_version *)desc.data;
+	hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
+	hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
+
+	return 0;
+}
+
+static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_cfg_global_param *req;
+	struct hns_roce_cmq_desc desc;
+
+	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
+				      false);
+
+	req = (struct hns_roce_cfg_global_param *)desc.data;
+	memset(req, 0, sizeof(*req));
+	roce_set_field(req->time_cfg_udp_port,
+		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
+		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
+	roce_set_field(req->time_cfg_udp_port,
+		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
+		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
+
+	return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_cmq_desc desc[2];
+	struct hns_roce_pf_res *res;
+	int ret;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		hns_roce_cmq_setup_basic_desc(&desc[i],
+					      HNS_ROCE_OPC_QUERY_PF_RES, true);
+
+		if (i == 0)
+			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+		else
+			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+	}
+
+	ret = hns_roce_cmq_send(hr_dev, desc, 2);
+	if (ret)
+		return ret;
+
+	res = (struct hns_roce_pf_res *)desc[0].data;
+
+	hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
+						 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
+						 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
+	hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
+						PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
+						PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
+	hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
+						 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
+						 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
+	hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
+						 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
+						 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
+
+	return 0;
+}
+
+static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_cmq_desc desc[2];
+	struct hns_roce_vf_res_a *req_a;
+	struct hns_roce_vf_res_b *req_b;
+	int i;
+
+	req_a = (struct hns_roce_vf_res_a *)desc[0].data;
+	req_b = (struct hns_roce_vf_res_b *)desc[1].data;
+	memset(req_a, 0, sizeof(*req_a));
+	memset(req_b, 0, sizeof(*req_b));
+	for (i = 0; i < 2; i++) {
+		hns_roce_cmq_setup_basic_desc(&desc[i],
+					      HNS_ROCE_OPC_ALLOC_VF_RES, false);
+
+		if (i == 0)
+			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+		else
+			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+		if (i == 0) {
+			roce_set_field(req_a->vf_qpc_bt_idx_num,
+				       VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
+				       VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
+			roce_set_field(req_a->vf_qpc_bt_idx_num,
+				       VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
+				       VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
+				       HNS_ROCE_VF_QPC_BT_NUM);
+
+			roce_set_field(req_a->vf_srqc_bt_idx_num,
+				       VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
+				       VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
+			roce_set_field(req_a->vf_srqc_bt_idx_num,
+				       VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
+				       VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
+				       HNS_ROCE_VF_SRQC_BT_NUM);
+
+			roce_set_field(req_a->vf_cqc_bt_idx_num,
+				       VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
+				       VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
+			roce_set_field(req_a->vf_cqc_bt_idx_num,
+				       VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
+				       VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
+				       HNS_ROCE_VF_CQC_BT_NUM);
+
+			roce_set_field(req_a->vf_mpt_bt_idx_num,
+				       VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
+				       VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
+			roce_set_field(req_a->vf_mpt_bt_idx_num,
+				       VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
+				       VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
+				       HNS_ROCE_VF_MPT_BT_NUM);
+
+			roce_set_field(req_a->vf_eqc_bt_idx_num,
+				       VF_RES_A_DATA_5_VF_EQC_IDX_M,
+				       VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
+			roce_set_field(req_a->vf_eqc_bt_idx_num,
+				       VF_RES_A_DATA_5_VF_EQC_NUM_M,
+				       VF_RES_A_DATA_5_VF_EQC_NUM_S,
+				       HNS_ROCE_VF_EQC_NUM);
+		} else {
+			roce_set_field(req_b->vf_smac_idx_num,
+				       VF_RES_B_DATA_1_VF_SMAC_IDX_M,
+				       VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
+			roce_set_field(req_b->vf_smac_idx_num,
+				       VF_RES_B_DATA_1_VF_SMAC_NUM_M,
+				       VF_RES_B_DATA_1_VF_SMAC_NUM_S,
+				       HNS_ROCE_VF_SMAC_NUM);
+
+			roce_set_field(req_b->vf_sgid_idx_num,
+				       VF_RES_B_DATA_2_VF_SGID_IDX_M,
+				       VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
+			roce_set_field(req_b->vf_sgid_idx_num,
+				       VF_RES_B_DATA_2_VF_SGID_NUM_M,
+				       VF_RES_B_DATA_2_VF_SGID_NUM_S,
+				       HNS_ROCE_VF_SGID_NUM);
+
+			roce_set_field(req_b->vf_qid_idx_sl_num,
+				       VF_RES_B_DATA_3_VF_QID_IDX_M,
+				       VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
+			roce_set_field(req_b->vf_qid_idx_sl_num,
+				       VF_RES_B_DATA_3_VF_SL_NUM_M,
+				       VF_RES_B_DATA_3_VF_SL_NUM_S,
+				       HNS_ROCE_VF_SL_NUM);
+		}
+	}
+
+	return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
+{
+	u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
+	u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
+	u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
+	u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
+	struct hns_roce_cfg_bt_attr *req;
+	struct hns_roce_cmq_desc desc;
+
+	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
+	req = (struct hns_roce_cfg_bt_attr *)desc.data;
+	memset(req, 0, sizeof(*req));
+
+	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
+		       CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
+		       hr_dev->caps.qpc_ba_pg_sz);
+	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
+		       CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
+		       hr_dev->caps.qpc_buf_pg_sz);
+	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
+		       CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
+		       qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
+
+	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
+		       CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
+		       hr_dev->caps.srqc_ba_pg_sz);
+	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
+		       CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
+		       hr_dev->caps.srqc_buf_pg_sz);
+	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
+		       CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
+		       srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
+
+	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
+		       CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
+		       hr_dev->caps.cqc_ba_pg_sz);
+	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
+		       CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
+		       hr_dev->caps.cqc_buf_pg_sz);
+	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
+		       CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
+		       cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
+
+	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
+		       CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
+		       hr_dev->caps.mpt_ba_pg_sz);
+	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
+		       CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
+		       hr_dev->caps.mpt_buf_pg_sz);
+	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
+		       CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
+		       mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
+
+	return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_caps *caps = &hr_dev->caps;
+	int ret;
+
+	ret = hns_roce_cmq_query_hw_info(hr_dev);
+	if (ret) {
+		dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
+			ret);
+		return ret;
+	}
+
+	ret = hns_roce_config_global_param(hr_dev);
+	if (ret) {
+		dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
+			ret);
+	}
+
+	/* Get pf resource owned by every pf */
+	ret = hns_roce_query_pf_resource(hr_dev);
+	if (ret) {
+		dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
+			ret);
+		return ret;
+	}
+
+	ret = hns_roce_alloc_vf_resource(hr_dev);
+	if (ret) {
+		dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+			ret);
+		return ret;
+	}
+
+	hr_dev->vendor_part_id = 0;
+	hr_dev->sys_image_guid = 0;
+
+	caps->num_qps		= HNS_ROCE_V2_MAX_QP_NUM;
+	caps->max_wqes		= HNS_ROCE_V2_MAX_WQE_NUM;
+	caps->num_cqs		= HNS_ROCE_V2_MAX_CQ_NUM;
+	caps->max_cqes		= HNS_ROCE_V2_MAX_CQE_NUM;
+	caps->max_sq_sg		= HNS_ROCE_V2_MAX_SQ_SGE_NUM;
+	caps->max_rq_sg		= HNS_ROCE_V2_MAX_RQ_SGE_NUM;
+	caps->max_sq_inline	= HNS_ROCE_V2_MAX_SQ_INLINE;
+	caps->num_uars		= HNS_ROCE_V2_UAR_NUM;
+	caps->phy_num_uars	= HNS_ROCE_V2_PHY_UAR_NUM;
+	caps->num_aeq_vectors	= 1;
+	caps->num_comp_vectors	= 63;
+	caps->num_other_vectors	= 0;
+	caps->num_mtpts		= HNS_ROCE_V2_MAX_MTPT_NUM;
+	caps->num_mtt_segs	= HNS_ROCE_V2_MAX_MTT_SEGS;
+	caps->num_cqe_segs	= HNS_ROCE_V2_MAX_CQE_SEGS;
+	caps->num_pds		= HNS_ROCE_V2_MAX_PD_NUM;
+	caps->max_qp_init_rdma	= HNS_ROCE_V2_MAX_QP_INIT_RDMA;
+	caps->max_qp_dest_rdma	= HNS_ROCE_V2_MAX_QP_DEST_RDMA;
+	caps->max_sq_desc_sz	= HNS_ROCE_V2_MAX_SQ_DESC_SZ;
+	caps->max_rq_desc_sz	= HNS_ROCE_V2_MAX_RQ_DESC_SZ;
+	caps->max_srq_desc_sz	= HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
+	caps->qpc_entry_sz	= HNS_ROCE_V2_QPC_ENTRY_SZ;
+	caps->irrl_entry_sz	= HNS_ROCE_V2_IRRL_ENTRY_SZ;
+	caps->cqc_entry_sz	= HNS_ROCE_V2_CQC_ENTRY_SZ;
+	caps->mtpt_entry_sz	= HNS_ROCE_V2_MTPT_ENTRY_SZ;
+	caps->mtt_entry_sz	= HNS_ROCE_V2_MTT_ENTRY_SZ;
+	caps->cq_entry_sz	= HNS_ROCE_V2_CQE_ENTRY_SIZE;
+	caps->page_size_cap	= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
+	caps->reserved_lkey	= 0;
+	caps->reserved_pds	= 0;
+	caps->reserved_mrws	= 1;
+	caps->reserved_uars	= 0;
+	caps->reserved_cqs	= 0;
+
+	caps->qpc_ba_pg_sz	= 0;
+	caps->qpc_buf_pg_sz	= 0;
+	caps->qpc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
+	caps->srqc_ba_pg_sz	= 0;
+	caps->srqc_buf_pg_sz	= 0;
+	caps->srqc_hop_num	= HNS_ROCE_HOP_NUM_0;
+	caps->cqc_ba_pg_sz	= 0;
+	caps->cqc_buf_pg_sz	= 0;
+	caps->cqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
+	caps->mpt_ba_pg_sz	= 0;
+	caps->mpt_buf_pg_sz	= 0;
+	caps->mpt_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
+	caps->pbl_ba_pg_sz	= 0;
+	caps->pbl_buf_pg_sz	= 0;
+	caps->pbl_hop_num	= HNS_ROCE_PBL_HOP_NUM;
+	caps->mtt_ba_pg_sz	= 0;
+	caps->mtt_buf_pg_sz	= 0;
+	caps->mtt_hop_num	= HNS_ROCE_MTT_HOP_NUM;
+	caps->cqe_ba_pg_sz	= 0;
+	caps->cqe_buf_pg_sz	= 0;
+	caps->cqe_hop_num	= HNS_ROCE_CQE_HOP_NUM;
+
+	caps->pkey_table_len[0] = 1;
+	caps->gid_table_len[0] = 2;
+	caps->local_ca_ack_delay = 0;
+	caps->max_mtu = IB_MTU_4096;
+
+	ret = hns_roce_v2_set_bt(hr_dev);
+	if (ret)
+		dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
+			ret);
+
+	return ret;
+}
+
+static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
+{
+	u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
+
+	return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
+}
+
+static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
+{
+	u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
+
+	return status & HNS_ROCE_HW_MB_STATUS_MASK;
+}
+
+static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+				 u64 out_param, u32 in_modifier, u8 op_modifier,
+				 u16 op, u16 token, int event)
+{
+	struct device *dev = hr_dev->dev;
+	u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
+					   ROCEE_VF_MB_CFG0_REG);
+	unsigned long end;
+	u32 val0 = 0;
+	u32 val1 = 0;
+
+	end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
+	while (hns_roce_v2_cmd_pending(hr_dev)) {
+		if (time_after(jiffies, end)) {
+			dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
+				(int)end);
+			return -EAGAIN;
+		}
+		cond_resched();
+	}
+
+	roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
+		       HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
+	roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
+		       HNS_ROCE_VF_MB4_CMD_SHIFT, op);
+	roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
+		       HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
+	roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
+		       HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
+
+	__raw_writeq(cpu_to_le64(in_param), hcr + 0);
+	__raw_writeq(cpu_to_le64(out_param), hcr + 2);
+
+	/* Memory barrier */
+	wmb();
+
+	__raw_writel(cpu_to_le32(val0), hcr + 4);
+	__raw_writel(cpu_to_le32(val1), hcr + 5);
+
+	mmiowb();
+
+	return 0;
+}
+
+static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
+				unsigned long timeout)
+{
+	struct device *dev = hr_dev->dev;
+	unsigned long end = 0;
+	u32 status;
+
+	end = msecs_to_jiffies(timeout) + jiffies;
+	while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
+		cond_resched();
+
+	if (hns_roce_v2_cmd_pending(hr_dev)) {
+		dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
+		return -ETIMEDOUT;
+	}
+
+	status = hns_roce_v2_cmd_complete(hr_dev);
+	if (status != 0x1) {
+		dev_err(dev, "mailbox status 0x%x!\n", status);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+				int gid_index, union ib_gid *gid)
+{
+	u32 *p;
+	u32 val;
+
+	p = (u32 *)&gid->raw[0];
+	roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG0_REG +
+		       0x20 * gid_index);
+
+	p = (u32 *)&gid->raw[4];
+	roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG1_REG +
+		       0x20 * gid_index);
+
+	p = (u32 *)&gid->raw[8];
+	roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG2_REG +
+		       0x20 * gid_index);
+
+	p = (u32 *)&gid->raw[0xc];
+	roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG3_REG +
+		       0x20 * gid_index);
+
+	val = roce_read(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index);
+	roce_set_field(val, ROCEE_VF_SGID_CFG4_SGID_TYPE_M,
+		       ROCEE_VF_SGID_CFG4_SGID_TYPE_S, 0);
+
+	roce_write(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index, val);
+}
+
+static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
+			       u8 *addr)
+{
+	u16 reg_smac_h;
+	u32 reg_smac_l;
+	u32 val;
+
+	reg_smac_l = *(u32 *)(&addr[0]);
+	roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_VF_SMAC_CFG0_REG +
+		       0x08 * phy_port);
+	val = roce_read(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port);
+
+	reg_smac_h  = *(u16 *)(&addr[4]);
+	roce_set_field(val, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M,
+		       ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S, reg_smac_h);
+	roce_write(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port, val);
+
+	return 0;
+}
+
+static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+				  unsigned long mtpt_idx)
+{
+	struct hns_roce_v2_mpt_entry *mpt_entry;
+	struct scatterlist *sg;
+	u64 *pages;
+	int entry;
+	int i;
+
+	mpt_entry = mb_buf;
+	memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
+	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+		       V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
+		       HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
+	roce_set_field(mpt_entry->byte_4_pd_hop_st,
+		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mr->pbl_ba_pg_sz);
+	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+		       V2_MPT_BYTE_4_PD_S, mr->pd);
+	mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
+
+	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
+	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
+	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
+		     (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
+	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
+	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
+		     (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
+	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
+		     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
+		     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+	mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
+
+	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
+		     mr->type == MR_TYPE_MR ? 0 : 1);
+	mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
+
+	mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
+	mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
+	mpt_entry->lkey = cpu_to_le32(mr->key);
+	mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
+	mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+
+	if (mr->type == MR_TYPE_DMA)
+		return 0;
+
+	mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+
+	mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+	roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
+		       V2_MPT_BYTE_48_PBL_BA_H_S,
+		       upper_32_bits(mr->pbl_ba >> 3));
+	mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
+
+	pages = (u64 *)__get_free_page(GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	i = 0;
+	for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+		pages[i] = ((u64)sg_dma_address(sg)) >> 6;
+
+		/* Record the first 2 entry directly to MTPT table */
+		if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+			break;
+		i++;
+	}
+
+	mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
+	roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
+		       V2_MPT_BYTE_56_PA0_H_S,
+		       upper_32_bits(pages[0]));
+	mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
+
+	mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
+	roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
+		       V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+
+	free_page((unsigned long)pages);
+
+	roce_set_field(mpt_entry->byte_64_buf_pa1,
+		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mr->pbl_buf_pg_sz);
+	mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
+
+	return 0;
+}
+
+static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
+{
+	return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
+				   n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+}
+
+static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
+{
+	struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
+
+	/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
+	return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
+		!!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
+}
+
+static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
+{
+	return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
+}
+
+static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
+{
+	struct hns_roce_v2_cq_db cq_db;
+
+	cq_db.byte_4 = 0;
+	cq_db.parameter = 0;
+
+	roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
+		       V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
+	roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
+		       V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
+
+	roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
+		       V2_CQ_DB_PARAMETER_CONS_IDX_S,
+		       cons_index & ((hr_cq->cq_depth << 1) - 1));
+	roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
+		       V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
+
+	hns_roce_write64_k((__be32 *)&cq_db, hr_cq->cq_db_l);
+
+}
+
+static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+				   struct hns_roce_srq *srq)
+{
+	struct hns_roce_v2_cqe *cqe, *dest;
+	u32 prod_index;
+	int nfreed = 0;
+	u8 owner_bit;
+
+	for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
+	     ++prod_index) {
+		if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
+			break;
+	}
+
+	/*
+	 * Now backwards through the CQ, removing CQ entries
+	 * that match our QP by overwriting them with next entries.
+	 */
+	while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
+		cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
+		if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+				    V2_CQE_BYTE_16_LCL_QPN_S) &
+				    HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
+			/* In v1 engine, not support SRQ */
+			++nfreed;
+		} else if (nfreed) {
+			dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
+					  hr_cq->ib_cq.cqe);
+			owner_bit = roce_get_bit(dest->byte_4,
+						 V2_CQE_BYTE_4_OWNER_S);
+			memcpy(dest, cqe, sizeof(*cqe));
+			roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
+				     owner_bit);
+		}
+	}
+
+	if (nfreed) {
+		hr_cq->cons_index += nfreed;
+		/*
+		 * Make sure update of buffer contents is done before
+		 * updating consumer index.
+		 */
+		wmb();
+		hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+	}
+}
+
+static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+				 struct hns_roce_srq *srq)
+{
+	spin_lock_irq(&hr_cq->lock);
+	__hns_roce_v2_cq_clean(hr_cq, qpn, srq);
+	spin_unlock_irq(&hr_cq->lock);
+}
+
+static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
+				  struct hns_roce_cq *hr_cq, void *mb_buf,
+				  u64 *mtts, dma_addr_t dma_handle, int nent,
+				  u32 vector)
+{
+	struct hns_roce_v2_cq_context *cq_context;
+
+	cq_context = mb_buf;
+	memset(cq_context, 0, sizeof(*cq_context));
+
+	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
+		       V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
+	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
+		       V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
+	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
+		       V2_CQC_BYTE_4_CEQN_S, vector);
+	cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
+
+	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
+		       V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
+
+	cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+	cq_context->cqe_cur_blk_addr =
+				cpu_to_le32(cq_context->cqe_cur_blk_addr);
+
+	roce_set_field(cq_context->byte_16_hop_addr,
+		       V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
+		       V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
+		       cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
+	roce_set_field(cq_context->byte_16_hop_addr,
+		       V2_CQC_BYTE_16_CQE_HOP_NUM_M,
+		       V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
+		       HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
+
+	cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
+	roce_set_field(cq_context->byte_24_pgsz_addr,
+		       V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
+		       V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
+		       cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
+	roce_set_field(cq_context->byte_24_pgsz_addr,
+		       V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
+		       V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
+		       hr_dev->caps.cqe_ba_pg_sz);
+	roce_set_field(cq_context->byte_24_pgsz_addr,
+		       V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
+		       V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
+		       hr_dev->caps.cqe_buf_pg_sz);
+
+	cq_context->cqe_ba = (u32)(dma_handle >> 3);
+
+	roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
+		       V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+}
+
+static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
+				     enum ib_cq_notify_flags flags)
+{
+	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+	u32 notification_flag;
+	u32 doorbell[2];
+
+	doorbell[0] = 0;
+	doorbell[1] = 0;
+
+	notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+			     V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
+	/*
+	 * flags = 0; Notification Flag = 1, next
+	 * flags = 1; Notification Flag = 0, solocited
+	 */
+	roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
+		       hr_cq->cqn);
+	roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
+		       HNS_ROCE_V2_CQ_DB_NTR);
+	roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
+		       V2_CQ_DB_PARAMETER_CONS_IDX_S,
+		       hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
+	roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
+		       V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
+	roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
+		     notification_flag);
+
+	hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+
+	return 0;
+}
+
+static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
+				struct hns_roce_qp **cur_qp, struct ib_wc *wc)
+{
+	struct hns_roce_dev *hr_dev;
+	struct hns_roce_v2_cqe *cqe;
+	struct hns_roce_qp *hr_qp;
+	struct hns_roce_wq *wq;
+	int is_send;
+	u16 wqe_ctr;
+	u32 opcode;
+	u32 status;
+	int qpn;
+
+	/* Find cqe according to consumer index */
+	cqe = next_cqe_sw_v2(hr_cq);
+	if (!cqe)
+		return -EAGAIN;
+
+	++hr_cq->cons_index;
+	/* Memory barrier */
+	rmb();
+
+	/* 0->SQ, 1->RQ */
+	is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
+
+	qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+				V2_CQE_BYTE_16_LCL_QPN_S);
+
+	if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
+		hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+		hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+		if (unlikely(!hr_qp)) {
+			dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
+				hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
+			return -EINVAL;
+		}
+		*cur_qp = hr_qp;
+	}
+
+	wc->qp = &(*cur_qp)->ibqp;
+	wc->vendor_err = 0;
+
+	status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
+				V2_CQE_BYTE_4_STATUS_S);
+	switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
+	case HNS_ROCE_CQE_V2_SUCCESS:
+		wc->status = IB_WC_SUCCESS;
+		break;
+	case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
+		wc->status = IB_WC_LOC_LEN_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
+		wc->status = IB_WC_LOC_QP_OP_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
+		wc->status = IB_WC_LOC_PROT_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
+		wc->status = IB_WC_WR_FLUSH_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_MW_BIND_ERR:
+		wc->status = IB_WC_MW_BIND_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
+		wc->status = IB_WC_BAD_RESP_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
+		wc->status = IB_WC_LOC_ACCESS_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
+		wc->status = IB_WC_REM_INV_REQ_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
+		wc->status = IB_WC_REM_ACCESS_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
+		wc->status = IB_WC_REM_OP_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
+		wc->status = IB_WC_RETRY_EXC_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
+		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+		break;
+	case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
+		wc->status = IB_WC_REM_ABORT_ERR;
+		break;
+	default:
+		wc->status = IB_WC_GENERAL_ERR;
+		break;
+	}
+
+	/* CQE status error, directly return */
+	if (wc->status != IB_WC_SUCCESS)
+		return 0;
+
+	if (is_send) {
+		wc->wc_flags = 0;
+		/* SQ corresponding to CQE */
+		switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+				       V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
+		case HNS_ROCE_SQ_OPCODE_SEND:
+			wc->opcode = IB_WC_SEND;
+			break;
+		case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
+			wc->opcode = IB_WC_SEND;
+			break;
+		case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
+			wc->opcode = IB_WC_SEND;
+			wc->wc_flags |= IB_WC_WITH_IMM;
+			break;
+		case HNS_ROCE_SQ_OPCODE_RDMA_READ:
+			wc->opcode = IB_WC_RDMA_READ;
+			wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+			break;
+		case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
+			wc->opcode = IB_WC_RDMA_WRITE;
+			break;
+		case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
+			wc->opcode = IB_WC_RDMA_WRITE;
+			wc->wc_flags |= IB_WC_WITH_IMM;
+			break;
+		case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
+			wc->opcode = IB_WC_LOCAL_INV;
+			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+			break;
+		case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
+			wc->opcode = IB_WC_COMP_SWAP;
+			wc->byte_len  = 8;
+			break;
+		case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
+			wc->opcode = IB_WC_FETCH_ADD;
+			wc->byte_len  = 8;
+			break;
+		case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
+			wc->opcode = IB_WC_MASKED_COMP_SWAP;
+			wc->byte_len  = 8;
+			break;
+		case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
+			wc->opcode = IB_WC_MASKED_FETCH_ADD;
+			wc->byte_len  = 8;
+			break;
+		case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
+			wc->opcode = IB_WC_REG_MR;
+			break;
+		case HNS_ROCE_SQ_OPCODE_BIND_MW:
+			wc->opcode = IB_WC_REG_MR;
+			break;
+		default:
+			wc->status = IB_WC_GENERAL_ERR;
+			break;
+		}
+
+		wq = &(*cur_qp)->sq;
+		if ((*cur_qp)->sq_signal_bits) {
+			/*
+			 * If sg_signal_bit is 1,
+			 * firstly tail pointer updated to wqe
+			 * which current cqe correspond to
+			 */
+			wqe_ctr = (u16)roce_get_field(cqe->byte_4,
+						      V2_CQE_BYTE_4_WQE_INDX_M,
+						      V2_CQE_BYTE_4_WQE_INDX_S);
+			wq->tail += (wqe_ctr - (u16)wq->tail) &
+				    (wq->wqe_cnt - 1);
+		}
+
+		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+		++wq->tail;
+	} else {
+		/* RQ correspond to CQE */
+		wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+
+		opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+					V2_CQE_BYTE_4_OPCODE_S);
+		switch (opcode & 0x1f) {
+		case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
+			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+			wc->wc_flags = IB_WC_WITH_IMM;
+			wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+			break;
+		case HNS_ROCE_V2_OPCODE_SEND:
+			wc->opcode = IB_WC_RECV;
+			wc->wc_flags = 0;
+			break;
+		case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
+			wc->opcode = IB_WC_RECV;
+			wc->wc_flags = IB_WC_WITH_IMM;
+			wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+			break;
+		case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
+			wc->opcode = IB_WC_RECV;
+			wc->wc_flags = IB_WC_WITH_INVALIDATE;
+			wc->ex.invalidate_rkey = cqe->rkey_immtdata;
+			break;
+		default:
+			wc->status = IB_WC_GENERAL_ERR;
+			break;
+		}
+
+		/* Update tail pointer, record wr_id */
+		wq = &(*cur_qp)->rq;
+		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+		++wq->tail;
+
+		wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
+					    V2_CQE_BYTE_32_SL_S);
+		wc->src_qp = (u8)roce_get_field(cqe->byte_32,
+						V2_CQE_BYTE_32_RMT_QPN_M,
+						V2_CQE_BYTE_32_RMT_QPN_S);
+		wc->wc_flags |= (roce_get_bit(cqe->byte_32,
+					      V2_CQE_BYTE_32_GRH_S) ?
+					      IB_WC_GRH : 0);
+	}
+
+	return 0;
+}
+
+static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
+			       struct ib_wc *wc)
+{
+	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+	struct hns_roce_qp *cur_qp = NULL;
+	unsigned long flags;
+	int npolled;
+
+	spin_lock_irqsave(&hr_cq->lock, flags);
+
+	for (npolled = 0; npolled < num_entries; ++npolled) {
+		if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
+			break;
+	}
+
+	if (npolled) {
+		/* Memory barrier */
+		wmb();
+		hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+	}
+
+	spin_unlock_irqrestore(&hr_cq->lock, flags);
+
+	return npolled;
+}
+
+static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
+			       struct hns_roce_hem_table *table, int obj,
+			       int step_idx)
+{
+	struct device *dev = hr_dev->dev;
+	struct hns_roce_cmd_mailbox *mailbox;
+	struct hns_roce_hem_iter iter;
+	struct hns_roce_hem_mhop mhop;
+	struct hns_roce_hem *hem;
+	unsigned long mhop_obj = obj;
+	int i, j, k;
+	int ret = 0;
+	u64 hem_idx = 0;
+	u64 l1_idx = 0;
+	u64 bt_ba = 0;
+	u32 chunk_ba_num;
+	u32 hop_num;
+	u16 op = 0xff;
+
+	if (!hns_roce_check_whether_mhop(hr_dev, table->type))
+		return 0;
+
+	hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+	i = mhop.l0_idx;
+	j = mhop.l1_idx;
+	k = mhop.l2_idx;
+	hop_num = mhop.hop_num;
+	chunk_ba_num = mhop.bt_chunk_size / 8;
+
+	if (hop_num == 2) {
+		hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
+			  k;
+		l1_idx = i * chunk_ba_num + j;
+	} else if (hop_num == 1) {
+		hem_idx = i * chunk_ba_num + j;
+	} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
+		hem_idx = i;
+	}
+
+	switch (table->type) {
+	case HEM_TYPE_QPC:
+		op = HNS_ROCE_CMD_WRITE_QPC_BT0;
+		break;
+	case HEM_TYPE_MTPT:
+		op = HNS_ROCE_CMD_WRITE_MPT_BT0;
+		break;
+	case HEM_TYPE_CQC:
+		op = HNS_ROCE_CMD_WRITE_CQC_BT0;
+		break;
+	case HEM_TYPE_SRQC:
+		op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+		break;
+	default:
+		dev_warn(dev, "Table %d not to be written by mailbox!\n",
+			 table->type);
+		return 0;
+	}
+	op += step_idx;
+
+	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	if (check_whether_last_step(hop_num, step_idx)) {
+		hem = table->hem[hem_idx];
+		for (hns_roce_hem_first(hem, &iter);
+		     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
+			bt_ba = hns_roce_hem_addr(&iter);
+
+			/* configure the ba, tag, and op */
+			ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
+						obj, 0, op,
+						HNS_ROCE_CMD_TIMEOUT_MSECS);
+		}
+	} else {
+		if (step_idx == 0)
+			bt_ba = table->bt_l0_dma_addr[i];
+		else if (step_idx == 1 && hop_num == 2)
+			bt_ba = table->bt_l1_dma_addr[l1_idx];
+
+		/* configure the ba, tag, and op */
+		ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
+					0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
+	}
+
+	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+	return ret;
+}
+
+static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
+				 struct hns_roce_hem_table *table, int obj,
+				 int step_idx)
+{
+	struct device *dev = hr_dev->dev;
+	struct hns_roce_cmd_mailbox *mailbox;
+	int ret = 0;
+	u16 op = 0xff;
+
+	if (!hns_roce_check_whether_mhop(hr_dev, table->type))
+		return 0;
+
+	switch (table->type) {
+	case HEM_TYPE_QPC:
+		op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
+		break;
+	case HEM_TYPE_MTPT:
+		op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
+		break;
+	case HEM_TYPE_CQC:
+		op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+		break;
+	case HEM_TYPE_SRQC:
+		op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+		break;
+	default:
+		dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
+			 table->type);
+		return 0;
+	}
+	op += step_idx;
+
+	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	/* configure the tag and op */
+	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
+				HNS_ROCE_CMD_TIMEOUT_MSECS);
+
+	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+	return ret;
+}
+
+static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
+				 struct hns_roce_mtt *mtt,
+				 enum ib_qp_state cur_state,
+				 enum ib_qp_state new_state,
+				 struct hns_roce_v2_qp_context *context,
+				 struct hns_roce_qp *hr_qp)
+{
+	struct hns_roce_cmd_mailbox *mailbox;
+	int ret;
+
+	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	memcpy(mailbox->buf, context, sizeof(*context) * 2);
+
+	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
+				HNS_ROCE_CMD_MODIFY_QPC,
+				HNS_ROCE_CMD_TIMEOUT_MSECS);
+
+	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+	return ret;
+}
+
+static void modify_qp_reset_to_init(struct ib_qp *ibqp,
+				    const struct ib_qp_attr *attr,
+				    struct hns_roce_v2_qp_context *context,
+				    struct hns_roce_v2_qp_context *qpc_mask)
+{
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+	/*
+	 * In v2 engine, software pass context and context mask to hardware
+	 * when modifying qp. If software need modify some fields in context,
+	 * we should set all bits of the relevant fields in context mask to
+	 * 0 at the same time, else set them to 0x1.
+	 */
+	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+		       V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+		       V2_QPC_BYTE_4_TST_S, 0);
+
+	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+		       V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+		       ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+		       V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
+
+	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+		       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+		       V2_QPC_BYTE_4_SQPN_S, 0);
+
+	roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+		       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+		       V2_QPC_BYTE_16_PD_S, 0);
+
+	roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
+		       V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
+	roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
+		       V2_QPC_BYTE_20_RQWS_S, 0);
+
+	roce_set_field(context->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
+		       ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
+
+	roce_set_field(context->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
+		       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+
+	/* No VLAN need to set 0xFFF */
+	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
+		       V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
+	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
+		       V2_QPC_BYTE_24_VLAN_IDX_S, 0);
+
+	/*
+	 * Set some fields in context to zero, Because the default values
+	 * of all fields in context are zero, we need not set them to 0 again.
+	 * but we should set the relevant fields of context mask to 0.
+	 */
+	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
+	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
+	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
+	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
+
+	roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
+		       V2_QPC_BYTE_60_MAPID_S, 0);
+
+	roce_set_bit(qpc_mask->byte_60_qpst_mapid,
+		     V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
+	roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
+		     0);
+	roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
+		     0);
+	roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
+		     0);
+	roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
+		     0);
+	roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
+		     0);
+	roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
+	roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
+
+	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+		     !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
+	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+		     !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
+	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+		     !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
+	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+
+	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
+
+	roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+		       V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+	roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+		       V2_QPC_BYTE_80_RX_CQN_S, 0);
+	if (ibqp->srq) {
+		roce_set_field(context->byte_76_srqn_op_en,
+			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
+			       to_hr_srq(ibqp->srq)->srqn);
+		roce_set_field(qpc_mask->byte_76_srqn_op_en,
+			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+		roce_set_bit(context->byte_76_srqn_op_en,
+			     V2_QPC_BYTE_76_SRQ_EN_S, 1);
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+			     V2_QPC_BYTE_76_SRQ_EN_S, 0);
+	}
+
+	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+
+	roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
+		       V2_QPC_BYTE_92_SRQ_INFO_S, 0);
+
+	roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
+		       V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
+
+	roce_set_field(qpc_mask->byte_104_rq_sge,
+		       V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
+		       V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
+
+	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+		     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
+	roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
+		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
+	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+		     V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
+
+	qpc_mask->rq_rnr_timer = 0;
+	qpc_mask->rx_msg_len = 0;
+	qpc_mask->rx_rkey_pkt_info = 0;
+	qpc_mask->rx_va = 0;
+
+	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+		       V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+		       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
+
+	roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
+	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
+		       V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
+	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
+		       V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
+
+	roce_set_field(qpc_mask->byte_144_raq,
+		       V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
+		       V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
+	roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
+		     0);
+	roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
+		       V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
+	roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
+
+	roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
+		       V2_QPC_BYTE_148_RQ_MSN_S, 0);
+	roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
+		       V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
+
+	roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+		       V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+	roce_set_field(qpc_mask->byte_152_raq,
+		       V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
+		       V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
+
+	roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
+		       V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
+
+	roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+		       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+		       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+	roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+		       V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
+		       V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
+
+	roce_set_field(context->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+		       V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
+		       ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+	roce_set_field(qpc_mask->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+		       V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
+
+	roce_set_bit(qpc_mask->byte_168_irrl_idx,
+		     V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
+	roce_set_field(qpc_mask->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
+		       V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
+
+	roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
+	roce_set_field(qpc_mask->byte_172_sq_psn,
+		       V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
+
+	roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
+		     0);
+
+	roce_set_field(qpc_mask->byte_176_msg_pktn,
+		       V2_QPC_BYTE_176_MSG_USE_PKTN_M,
+		       V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
+	roce_set_field(qpc_mask->byte_176_msg_pktn,
+		       V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
+		       V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
+
+	roce_set_field(qpc_mask->byte_184_irrl_idx,
+		       V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
+		       V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
+
+	qpc_mask->cur_sge_offset = 0;
+
+	roce_set_field(qpc_mask->byte_192_ext_sge,
+		       V2_QPC_BYTE_192_CUR_SGE_IDX_M,
+		       V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
+	roce_set_field(qpc_mask->byte_192_ext_sge,
+		       V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
+		       V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
+
+	roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+		       V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+
+	roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
+		       V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
+	roce_set_field(qpc_mask->byte_200_sq_max,
+		       V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
+		       V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
+
+	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
+	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
+
+	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+		       V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+	qpc_mask->sq_timer = 0;
+
+	roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+		       V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+		       V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+	roce_set_field(qpc_mask->byte_232_irrl_sge,
+		       V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
+		       V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+
+	qpc_mask->irrl_cur_sge_offset = 0;
+
+	roce_set_field(qpc_mask->byte_240_irrl_tail,
+		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+	roce_set_field(qpc_mask->byte_240_irrl_tail,
+		       V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
+		       V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
+	roce_set_field(qpc_mask->byte_240_irrl_tail,
+		       V2_QPC_BYTE_240_RX_ACK_MSN_M,
+		       V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+	roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
+		       V2_QPC_BYTE_248_IRRL_PSN_S, 0);
+	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
+		     0);
+	roce_set_field(qpc_mask->byte_248_ack_psn,
+		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
+		     0);
+	roce_set_bit(qpc_mask->byte_248_ack_psn,
+		     V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
+	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
+		     0);
+
+	hr_qp->access_flags = attr->qp_access_flags;
+	hr_qp->pkey_index = attr->pkey_index;
+	roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+		       V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+		       V2_QPC_BYTE_252_TX_CQN_S, 0);
+
+	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
+		       V2_QPC_BYTE_252_ERR_TYPE_S, 0);
+
+	roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
+		       V2_QPC_BYTE_256_RQ_CQE_IDX_M,
+		       V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
+	roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
+		       V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
+		       V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
+}
+
+static void modify_qp_init_to_init(struct ib_qp *ibqp,
+				   const struct ib_qp_attr *attr, int attr_mask,
+				   struct hns_roce_v2_qp_context *context,
+				   struct hns_roce_v2_qp_context *qpc_mask)
+{
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+	/*
+	 * In v2 engine, software pass context and context mask to hardware
+	 * when modifying qp. If software need modify some fields in context,
+	 * we should set all bits of the relevant fields in context mask to
+	 * 0 at the same time, else set them to 0x1.
+	 */
+	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+		       V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+		       V2_QPC_BYTE_4_TST_S, 0);
+
+	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+		       V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+		       ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+		       V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
+
+	if (attr_mask & IB_QP_ACCESS_FLAGS) {
+		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+			     !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+			     0);
+
+		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+			     !!(attr->qp_access_flags &
+			     IB_ACCESS_REMOTE_WRITE));
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+			     0);
+
+		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+			     !!(attr->qp_access_flags &
+			     IB_ACCESS_REMOTE_ATOMIC));
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+			     0);
+	} else {
+		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+			     0);
+
+		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+			     0);
+
+		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+			     0);
+	}
+
+	roce_set_field(context->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
+		       ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
+
+	roce_set_field(context->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
+		       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+
+	roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+		       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+		       V2_QPC_BYTE_16_PD_S, 0);
+
+	roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+		       V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+	roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+		       V2_QPC_BYTE_80_RX_CQN_S, 0);
+
+	roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+		       V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+		       V2_QPC_BYTE_252_TX_CQN_S, 0);
+
+	if (ibqp->srq) {
+		roce_set_bit(context->byte_76_srqn_op_en,
+			     V2_QPC_BYTE_76_SRQ_EN_S, 1);
+		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+			     V2_QPC_BYTE_76_SRQ_EN_S, 0);
+		roce_set_field(context->byte_76_srqn_op_en,
+			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
+			       to_hr_srq(ibqp->srq)->srqn);
+		roce_set_field(qpc_mask->byte_76_srqn_op_en,
+			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+	}
+
+	if (attr_mask & IB_QP_PKEY_INDEX)
+		context->qkey_xrcd = attr->pkey_index;
+	else
+		context->qkey_xrcd = hr_qp->pkey_index;
+
+	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+		       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+		       V2_QPC_BYTE_4_SQPN_S, 0);
+
+	roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+		       V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
+	roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+		       V2_QPC_BYTE_56_DQPN_S, 0);
+	roce_set_field(context->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+		       V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
+		       ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+	roce_set_field(qpc_mask->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+		       V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
+}
+
+static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+				 const struct ib_qp_attr *attr, int attr_mask,
+				 struct hns_roce_v2_qp_context *context,
+				 struct hns_roce_v2_qp_context *qpc_mask)
+{
+	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+	struct device *dev = hr_dev->dev;
+	dma_addr_t dma_handle_2;
+	dma_addr_t dma_handle;
+	u32 page_size;
+	u8 port_num;
+	u64 *mtts_2;
+	u64 *mtts;
+	u8 *dmac;
+	u8 *smac;
+	int port;
+
+	/* Search qp buf's mtts */
+	mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
+				   hr_qp->mtt.first_seg, &dma_handle);
+	if (!mtts) {
+		dev_err(dev, "qp buf pa find failed\n");
+		return -EINVAL;
+	}
+
+	/* Search IRRL's mtts */
+	mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+				     hr_qp->qpn, &dma_handle_2);
+	if (!mtts_2) {
+		dev_err(dev, "qp irrl_table find failed\n");
+		return -EINVAL;
+	}
+
+	if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
+	    (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
+		dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
+		return -EINVAL;
+	}
+
+	dmac = (u8 *)attr->ah_attr.roce.dmac;
+	context->wqe_sge_ba = (u32)(dma_handle >> 3);
+	qpc_mask->wqe_sge_ba = 0;
+
+	/*
+	 * In v2 engine, software pass context and context mask to hardware
+	 * when modifying qp. If software need modify some fields in context,
+	 * we should set all bits of the relevant fields in context mask to
+	 * 0 at the same time, else set them to 0x1.
+	 */
+	roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
+		       V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
+	roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
+		       V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
+
+	roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
+		       V2_QPC_BYTE_12_SQ_HOP_NUM_S,
+		       hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
+		       0 : hr_dev->caps.mtt_hop_num);
+	roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
+		       V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
+
+	roce_set_field(context->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
+		       V2_QPC_BYTE_20_SGE_HOP_NUM_S,
+		       hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
+	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
+		       V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
+
+	roce_set_field(context->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_RQ_HOP_NUM_M,
+		       V2_QPC_BYTE_20_RQ_HOP_NUM_S,
+		       hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
+		       0 : hr_dev->caps.mtt_hop_num);
+	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_RQ_HOP_NUM_M,
+		       V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
+
+	roce_set_field(context->byte_16_buf_ba_pg_sz,
+		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
+		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
+		       hr_dev->caps.mtt_ba_pg_sz);
+	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
+		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
+		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
+
+	roce_set_field(context->byte_16_buf_ba_pg_sz,
+		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
+		       hr_dev->caps.mtt_buf_pg_sz);
+	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
+		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
+
+	roce_set_field(context->byte_80_rnr_rx_cqn,
+		       V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+		       V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
+	roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
+		       V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+		       V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
+
+	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+	context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
+				    >> PAGE_ADDR_SHIFT);
+	qpc_mask->rq_cur_blk_addr = 0;
+
+	roce_set_field(context->byte_92_srq_info,
+		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
+		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
+		       mtts[hr_qp->rq.offset / page_size]
+		       >> (32 + PAGE_ADDR_SHIFT));
+	roce_set_field(qpc_mask->byte_92_srq_info,
+		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
+		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
+
+	context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
+				    >> PAGE_ADDR_SHIFT);
+	qpc_mask->rq_nxt_blk_addr = 0;
+
+	roce_set_field(context->byte_104_rq_sge,
+		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
+		       mtts[hr_qp->rq.offset / page_size + 1]
+		       >> (32 + PAGE_ADDR_SHIFT));
+	roce_set_field(qpc_mask->byte_104_rq_sge,
+		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+
+	roce_set_field(context->byte_108_rx_reqepsn,
+		       V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+		       V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
+	roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+		       V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+		       V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
+
+	context->irrl_ba = (u32)dma_handle_2;
+	qpc_mask->irrl_ba = 0;
+	roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
+		       V2_QPC_BYTE_208_IRRL_BA_S,
+		      (dma_handle_2 >> 32) & V2_QPC_BYTE_208_IRRL_BA_M);
+	roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
+		       V2_QPC_BYTE_208_IRRL_BA_S, 0);
+
+	roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
+	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
+
+	roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
+		     hr_qp->sq_signal_bits);
+	roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
+		     0);
+
+	port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
+
+	smac = (u8 *)hr_dev->dev_addr[port];
+	/* when dmac equals smac or loop_idc is 1, it should loopback */
+	if (ether_addr_equal_unaligned(dmac, smac) ||
+	    hr_dev->loop_idc == 0x1) {
+		roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
+		roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
+	}
+
+	roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+		       V2_QPC_BYTE_140_RR_MAX_S,
+		       ilog2((unsigned int)attr->max_dest_rd_atomic));
+	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+		       V2_QPC_BYTE_140_RR_MAX_S, 0);
+
+	roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+		       V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+	roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+		       V2_QPC_BYTE_56_DQPN_S, 0);
+
+	/* Configure GID index */
+	port_num = rdma_ah_get_port_num(&attr->ah_attr);
+	roce_set_field(context->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_SGID_IDX_M,
+		       V2_QPC_BYTE_20_SGID_IDX_S,
+		       hns_get_gid_index(hr_dev, port_num - 1,
+					 grh->sgid_index));
+	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+		       V2_QPC_BYTE_20_SGID_IDX_M,
+		       V2_QPC_BYTE_20_SGID_IDX_S, 0);
+	memcpy(&(context->dmac), dmac, 4);
+	roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
+		       V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
+	qpc_mask->dmac = 0;
+	roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
+		       V2_QPC_BYTE_52_DMAC_S, 0);
+
+	roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
+		       V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
+	roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
+		       V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+
+	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+		       V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
+	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+		       V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+
+	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+		       V2_QPC_BYTE_28_FL_S, grh->flow_label);
+	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+		       V2_QPC_BYTE_28_FL_S, 0);
+
+	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+		       V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+		       V2_QPC_BYTE_24_TC_S, 0);
+
+	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+		       V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+		       V2_QPC_BYTE_24_MTU_S, 0);
+
+	memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+	memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+
+	roce_set_field(context->byte_84_rq_ci_pi,
+		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
+	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+
+	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+		     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
+	roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
+		       V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
+	roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
+		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
+
+	context->rq_rnr_timer = 0;
+	qpc_mask->rq_rnr_timer = 0;
+
+	roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+		       V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
+	roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+		       V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+
+	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+		       V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+		       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
+
+	roce_set_field(context->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_LP_SGEN_INI_M,
+		       V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
+	roce_set_field(qpc_mask->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_LP_SGEN_INI_M,
+		       V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
+
+	roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+		       V2_QPC_BYTE_208_SR_MAX_S,
+		       ilog2((unsigned int)attr->max_rd_atomic));
+	roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+		       V2_QPC_BYTE_208_SR_MAX_S, 0);
+
+	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+		       V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
+	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+		       V2_QPC_BYTE_28_SL_S, 0);
+	hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+	return 0;
+}
+
+static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
+				const struct ib_qp_attr *attr, int attr_mask,
+				struct hns_roce_v2_qp_context *context,
+				struct hns_roce_v2_qp_context *qpc_mask)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+	struct device *dev = hr_dev->dev;
+	dma_addr_t dma_handle;
+	u64 *mtts;
+
+	/* Search qp buf's mtts */
+	mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
+				   hr_qp->mtt.first_seg, &dma_handle);
+	if (!mtts) {
+		dev_err(dev, "qp buf pa find failed\n");
+		return -EINVAL;
+	}
+
+	/* If exist optional param, return error */
+	if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
+	    (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) ||
+	    (attr_mask & IB_QP_CUR_STATE) ||
+	    (attr_mask & IB_QP_MIN_RNR_TIMER)) {
+		dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
+		return -EINVAL;
+	}
+
+	/*
+	 * In v2 engine, software pass context and context mask to hardware
+	 * when modifying qp. If software need modify some fields in context,
+	 * we should set all bits of the relevant fields in context mask to
+	 * 0 at the same time, else set them to 0x1.
+	 */
+	roce_set_field(context->byte_60_qpst_mapid,
+		       V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
+		       V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
+	roce_set_field(qpc_mask->byte_60_qpst_mapid,
+		       V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
+		       V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
+
+	context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+	roce_set_field(context->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
+		       mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+	qpc_mask->sq_cur_blk_addr = 0;
+	roce_set_field(qpc_mask->byte_168_irrl_idx,
+		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
+
+	context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+	roce_set_field(context->byte_232_irrl_sge,
+		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
+		       mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+	qpc_mask->rx_sq_cur_blk_addr = 0;
+	roce_set_field(qpc_mask->byte_232_irrl_sge,
+		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+
+	/*
+	 * Set some fields in context to zero, Because the default values
+	 * of all fields in context are zero, we need not set them to 0 again.
+	 * but we should set the relevant fields of context mask to 0.
+	 */
+	roce_set_field(qpc_mask->byte_232_irrl_sge,
+		       V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
+		       V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+
+	roce_set_field(qpc_mask->byte_240_irrl_tail,
+		       V2_QPC_BYTE_240_RX_ACK_MSN_M,
+		       V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+	roce_set_field(context->byte_244_rnr_rxack,
+		       V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+		       V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
+	roce_set_field(qpc_mask->byte_244_rnr_rxack,
+		       V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+		       V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+
+	roce_set_field(qpc_mask->byte_248_ack_psn,
+		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+	roce_set_bit(qpc_mask->byte_248_ack_psn,
+		     V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
+	roce_set_field(qpc_mask->byte_248_ack_psn,
+		       V2_QPC_BYTE_248_IRRL_PSN_M,
+		       V2_QPC_BYTE_248_IRRL_PSN_S, 0);
+
+	roce_set_field(qpc_mask->byte_240_irrl_tail,
+		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+
+	roce_set_field(context->byte_220_retry_psn_msn,
+		       V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+		       V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
+	roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+		       V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+		       V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
+
+	roce_set_field(context->byte_224_retry_msg,
+		       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+		       V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
+	roce_set_field(qpc_mask->byte_224_retry_msg,
+		       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+		       V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
+
+	roce_set_field(context->byte_224_retry_msg,
+		       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+		       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
+	roce_set_field(qpc_mask->byte_224_retry_msg,
+		       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+		       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
+
+	roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+		       V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+		       V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+
+	roce_set_bit(qpc_mask->byte_248_ack_psn,
+		     V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
+
+	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+		       V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+	roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+		       V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
+	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+		       V2_QPC_BYTE_212_RETRY_CNT_S, 0);
+
+	roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+		       V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
+	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+		       V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
+
+	roce_set_field(context->byte_244_rnr_rxack,
+		       V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+		       V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
+	roce_set_field(qpc_mask->byte_244_rnr_rxack,
+		       V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+		       V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
+
+	roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+		       V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
+	roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+		       V2_QPC_BYTE_244_RNR_CNT_S, 0);
+
+	roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+		       V2_QPC_BYTE_212_LSN_S, 0x100);
+	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+		       V2_QPC_BYTE_212_LSN_S, 0);
+
+	if (attr->timeout < 0xf)
+		roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+			       V2_QPC_BYTE_28_AT_S, 0xf);
+	else
+		roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+			       V2_QPC_BYTE_28_AT_S, attr->timeout);
+
+	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+		       V2_QPC_BYTE_28_AT_S, 0);
+
+	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+		       V2_QPC_BYTE_28_SL_S,
+		       rdma_ah_get_sl(&attr->ah_attr));
+	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+		       V2_QPC_BYTE_28_SL_S, 0);
+	hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+	roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+		       V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
+	roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+		       V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
+
+	roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+		       V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+	roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+		       V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
+	roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+		       V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+
+	return 0;
+}
+
+static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+				 const struct ib_qp_attr *attr,
+				 int attr_mask, enum ib_qp_state cur_state,
+				 enum ib_qp_state new_state)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+	struct hns_roce_v2_qp_context *context;
+	struct hns_roce_v2_qp_context *qpc_mask;
+	struct device *dev = hr_dev->dev;
+	int ret = -EINVAL;
+
+	context = kzalloc(2 * sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return -ENOMEM;
+
+	qpc_mask = context + 1;
+	/*
+	 * In v2 engine, software pass context and context mask to hardware
+	 * when modifying qp. If software need modify some fields in context,
+	 * we should set all bits of the relevant fields in context mask to
+	 * 0 at the same time, else set them to 0x1.
+	 */
+	memset(qpc_mask, 0xff, sizeof(*qpc_mask));
+	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+		modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+		modify_qp_init_to_init(ibqp, attr, attr_mask, context,
+				       qpc_mask);
+	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+		ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
+					    qpc_mask);
+		if (ret)
+			goto out;
+	} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+		ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
+					   qpc_mask);
+		if (ret)
+			goto out;
+	} else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
+		   (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
+		   (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
+		   (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
+		   (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
+		   (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+		   (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
+		   (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
+		   (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
+		   (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
+		   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
+		   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
+		   (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
+		   (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
+		/* Nothing */
+		;
+	} else {
+		dev_err(dev, "Illegal state for QP!\n");
+		goto out;
+	}
+
+	/* Every status migrate must change state */
+	roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+		       V2_QPC_BYTE_60_QP_ST_S, new_state);
+	roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+		       V2_QPC_BYTE_60_QP_ST_S, 0);
+
+	/* SW pass context to HW */
+	ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
+				    context, hr_qp);
+	if (ret) {
+		dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
+		goto out;
+	}
+
+	hr_qp->state = new_state;
+
+	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+		hr_qp->resp_depth = attr->max_dest_rd_atomic;
+	if (attr_mask & IB_QP_PORT) {
+		hr_qp->port = attr->port_num - 1;
+		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+	}
+
+	if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+		hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
+				     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
+		if (ibqp->send_cq != ibqp->recv_cq)
+			hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
+					     hr_qp->qpn, NULL);
+
+		hr_qp->rq.head = 0;
+		hr_qp->rq.tail = 0;
+		hr_qp->sq.head = 0;
+		hr_qp->sq.tail = 0;
+		hr_qp->sq_next_wqe = 0;
+		hr_qp->next_sge = 0;
+	}
+
+out:
+	kfree(context);
+	return ret;
+}
+
+static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
+{
+	switch (state) {
+	case HNS_ROCE_QP_ST_RST:	return IB_QPS_RESET;
+	case HNS_ROCE_QP_ST_INIT:	return IB_QPS_INIT;
+	case HNS_ROCE_QP_ST_RTR:	return IB_QPS_RTR;
+	case HNS_ROCE_QP_ST_RTS:	return IB_QPS_RTS;
+	case HNS_ROCE_QP_ST_SQ_DRAINING:
+	case HNS_ROCE_QP_ST_SQD:	return IB_QPS_SQD;
+	case HNS_ROCE_QP_ST_SQER:	return IB_QPS_SQE;
+	case HNS_ROCE_QP_ST_ERR:	return IB_QPS_ERR;
+	default:			return -1;
+	}
+}
+
+static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
+				 struct hns_roce_qp *hr_qp,
+				 struct hns_roce_v2_qp_context *hr_context)
+{
+	struct hns_roce_cmd_mailbox *mailbox;
+	int ret;
+
+	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
+				HNS_ROCE_CMD_QUERY_QPC,
+				HNS_ROCE_CMD_TIMEOUT_MSECS);
+	if (ret) {
+		dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
+		goto out;
+	}
+
+	memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
+
+out:
+	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+	return ret;
+}
+
+static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+				int qp_attr_mask,
+				struct ib_qp_init_attr *qp_init_attr)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+	struct hns_roce_v2_qp_context *context;
+	struct device *dev = hr_dev->dev;
+	int tmp_qp_state;
+	int state;
+	int ret;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return -ENOMEM;
+
+	memset(qp_attr, 0, sizeof(*qp_attr));
+	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+	mutex_lock(&hr_qp->mutex);
+
+	if (hr_qp->state == IB_QPS_RESET) {
+		qp_attr->qp_state = IB_QPS_RESET;
+		ret = 0;
+		goto done;
+	}
+
+	ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
+	if (ret) {
+		dev_err(dev, "query qpc error\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	state = roce_get_field(context->byte_60_qpst_mapid,
+			       V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
+	tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
+	if (tmp_qp_state == -1) {
+		dev_err(dev, "Illegal ib_qp_state\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	hr_qp->state = (u8)tmp_qp_state;
+	qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
+	qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
+							V2_QPC_BYTE_24_MTU_M,
+							V2_QPC_BYTE_24_MTU_S);
+	qp_attr->path_mig_state = IB_MIG_ARMED;
+	qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
+	if (hr_qp->ibqp.qp_type == IB_QPT_UD)
+		qp_attr->qkey = V2_QKEY_VAL;
+
+	qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
+					 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+					 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
+	qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
+					      V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+					      V2_QPC_BYTE_172_SQ_CUR_PSN_S);
+	qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
+						  V2_QPC_BYTE_56_DQPN_M,
+						  V2_QPC_BYTE_56_DQPN_S);
+	qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
+						  V2_QPC_BYTE_76_RRE_S)) << 2) |
+				   ((roce_get_bit(context->byte_76_srqn_op_en,
+						  V2_QPC_BYTE_76_RWE_S)) << 1) |
+				   ((roce_get_bit(context->byte_76_srqn_op_en,
+						  V2_QPC_BYTE_76_ATE_S)) << 3);
+	if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
+	    hr_qp->ibqp.qp_type == IB_QPT_UC) {
+		struct ib_global_route *grh =
+				rdma_ah_retrieve_grh(&qp_attr->ah_attr);
+
+		rdma_ah_set_sl(&qp_attr->ah_attr,
+			       roce_get_field(context->byte_28_at_fl,
+					      V2_QPC_BYTE_28_SL_M,
+					      V2_QPC_BYTE_28_SL_S));
+		grh->flow_label = roce_get_field(context->byte_28_at_fl,
+						 V2_QPC_BYTE_28_FL_M,
+						 V2_QPC_BYTE_28_FL_S);
+		grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
+						 V2_QPC_BYTE_20_SGID_IDX_M,
+						 V2_QPC_BYTE_20_SGID_IDX_S);
+		grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
+						V2_QPC_BYTE_24_HOP_LIMIT_M,
+						V2_QPC_BYTE_24_HOP_LIMIT_S);
+		grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
+						    V2_QPC_BYTE_24_TC_M,
+						    V2_QPC_BYTE_24_TC_S);
+
+		memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
+	}
+
+	qp_attr->port_num = hr_qp->port + 1;
+	qp_attr->sq_draining = 0;
+	qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
+						     V2_QPC_BYTE_208_SR_MAX_M,
+						     V2_QPC_BYTE_208_SR_MAX_S);
+	qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
+						     V2_QPC_BYTE_140_RR_MAX_M,
+						     V2_QPC_BYTE_140_RR_MAX_S);
+	qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
+						 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+						 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
+	qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
+					      V2_QPC_BYTE_28_AT_M,
+					      V2_QPC_BYTE_28_AT_S);
+	qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
+					    V2_QPC_BYTE_212_RETRY_CNT_M,
+					    V2_QPC_BYTE_212_RETRY_CNT_S);
+	qp_attr->rnr_retry = context->rq_rnr_timer;
+
+done:
+	qp_attr->cur_qp_state = qp_attr->qp_state;
+	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+
+	if (!ibqp->uobject) {
+		qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+		qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+	} else {
+		qp_attr->cap.max_send_wr = 0;
+		qp_attr->cap.max_send_sge = 0;
+	}
+
+	qp_init_attr->cap = qp_attr->cap;
+
+out:
+	mutex_unlock(&hr_qp->mutex);
+	kfree(context);
+	return ret;
+}
+
+static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+					 struct hns_roce_qp *hr_qp,
+					 int is_user)
+{
+	struct hns_roce_cq *send_cq, *recv_cq;
+	struct device *dev = hr_dev->dev;
+	int ret;
+
+	if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
+		/* Modify qp to reset before destroying qp */
+		ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
+					    hr_qp->state, IB_QPS_RESET);
+		if (ret) {
+			dev_err(dev, "modify QP %06lx to ERR failed.\n",
+				hr_qp->qpn);
+			return ret;
+		}
+	}
+
+	send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
+	recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+
+	hns_roce_lock_cqs(send_cq, recv_cq);
+
+	if (!is_user) {
+		__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
+				       to_hr_srq(hr_qp->ibqp.srq) : NULL);
+		if (send_cq != recv_cq)
+			__hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
+	}
+
+	hns_roce_qp_remove(hr_dev, hr_qp);
+
+	hns_roce_unlock_cqs(send_cq, recv_cq);
+
+	hns_roce_qp_free(hr_dev, hr_qp);
+
+	/* Not special_QP, free their QPN */
+	if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
+	    (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
+	    (hr_qp->ibqp.qp_type == IB_QPT_UD))
+		hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+
+	hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+
+	if (is_user) {
+		ib_umem_release(hr_qp->umem);
+	} else {
+		kfree(hr_qp->sq.wrid);
+		kfree(hr_qp->rq.wrid);
+		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+	}
+
+	return 0;
+}
+
+static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+	int ret;
+
+	ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+	if (ret) {
+		dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
+		return ret;
+	}
+
+	if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+		kfree(hr_to_hr_sqp(hr_qp));
+	else
+		kfree(hr_qp);
+
+	return 0;
+}
+
+static const struct hns_roce_hw hns_roce_hw_v2 = {
+	.cmq_init = hns_roce_v2_cmq_init,
+	.cmq_exit = hns_roce_v2_cmq_exit,
+	.hw_profile = hns_roce_v2_profile,
+	.post_mbox = hns_roce_v2_post_mbox,
+	.chk_mbox = hns_roce_v2_chk_mbox,
+	.set_gid = hns_roce_v2_set_gid,
+	.set_mac = hns_roce_v2_set_mac,
+	.write_mtpt = hns_roce_v2_write_mtpt,
+	.write_cqc = hns_roce_v2_write_cqc,
+	.set_hem = hns_roce_v2_set_hem,
+	.clear_hem = hns_roce_v2_clear_hem,
+	.modify_qp = hns_roce_v2_modify_qp,
+	.query_qp = hns_roce_v2_query_qp,
+	.destroy_qp = hns_roce_v2_destroy_qp,
+	.post_send = hns_roce_v2_post_send,
+	.post_recv = hns_roce_v2_post_recv,
+	.req_notify_cq = hns_roce_v2_req_notify_cq,
+	.poll_cq = hns_roce_v2_poll_cq,
+};
+
+static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+	/* required last entry */
+	{0, }
+};
+
+static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+				  struct hnae3_handle *handle)
+{
+	const struct pci_device_id *id;
+
+	id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+	if (!id) {
+		dev_err(hr_dev->dev, "device is not compatible!\n");
+		return -ENXIO;
+	}
+
+	hr_dev->hw = &hns_roce_hw_v2;
+	hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+	hr_dev->odb_offset = hr_dev->sdb_offset;
+
+	/* Get info from NIC driver. */
+	hr_dev->reg_base = handle->rinfo.roce_io_base;
+	hr_dev->caps.num_ports = 1;
+	hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
+	hr_dev->iboe.phy_port[0] = 0;
+
+	/* cmd issue mode: 0 is poll, 1 is event */
+	hr_dev->cmd_mod = 0;
+	hr_dev->loop_idc = 0;
+
+	return 0;
+}
+
+static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+{
+	struct hns_roce_dev *hr_dev;
+	int ret;
+
+	hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+	if (!hr_dev)
+		return -ENOMEM;
+
+	hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
+	if (!hr_dev->priv) {
+		ret = -ENOMEM;
+		goto error_failed_kzalloc;
+	}
+
+	hr_dev->pci_dev = handle->pdev;
+	hr_dev->dev = &handle->pdev->dev;
+	handle->priv = hr_dev;
+
+	ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
+	if (ret) {
+		dev_err(hr_dev->dev, "Get Configuration failed!\n");
+		goto error_failed_get_cfg;
+	}
+
+	ret = hns_roce_init(hr_dev);
+	if (ret) {
+		dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
+		goto error_failed_get_cfg;
+	}
+
+	return 0;
+
+error_failed_get_cfg:
+	kfree(hr_dev->priv);
+
+error_failed_kzalloc:
+	ib_dealloc_device(&hr_dev->ib_dev);
+
+	return ret;
+}
+
+static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+					   bool reset)
+{
+	struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+
+	hns_roce_exit(hr_dev);
+	kfree(hr_dev->priv);
+	ib_dealloc_device(&hr_dev->ib_dev);
+}
+
+static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
+	.init_instance = hns_roce_hw_v2_init_instance,
+	.uninit_instance = hns_roce_hw_v2_uninit_instance,
+};
+
+static struct hnae3_client hns_roce_hw_v2_client = {
+	.name = "hns_roce_hw_v2",
+	.type = HNAE3_CLIENT_ROCE,
+	.ops = &hns_roce_hw_v2_ops,
+};
+
+static int __init hns_roce_hw_v2_init(void)
+{
+	return hnae3_register_client(&hns_roce_hw_v2_client);
+}
+
+static void __exit hns_roce_hw_v2_exit(void)
+{
+	hnae3_unregister_client(&hns_roce_hw_v2_client);
+}
+
+module_init(hns_roce_hw_v2_init);
+module_exit(hns_roce_hw_v2_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
+MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");

+ 1165 - 0
drivers/infiniband/hw/hns/hns_roce_hw_v2.h

@@ -0,0 +1,1165 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HW_V2_H
+#define _HNS_ROCE_HW_V2_H
+
+#include <linux/bitops.h>
+
+#define HNS_ROCE_VF_QPC_BT_NUM			256
+#define HNS_ROCE_VF_SRQC_BT_NUM			64
+#define HNS_ROCE_VF_CQC_BT_NUM			64
+#define HNS_ROCE_VF_MPT_BT_NUM			64
+#define HNS_ROCE_VF_EQC_NUM			64
+#define HNS_ROCE_VF_SMAC_NUM			32
+#define HNS_ROCE_VF_SGID_NUM			32
+#define HNS_ROCE_VF_SL_NUM			8
+
+#define HNS_ROCE_V2_MAX_QP_NUM			0x2000
+#define HNS_ROCE_V2_MAX_WQE_NUM			0x8000
+#define HNS_ROCE_V2_MAX_CQ_NUM			0x8000
+#define HNS_ROCE_V2_MAX_CQE_NUM			0x400000
+#define HNS_ROCE_V2_MAX_RQ_SGE_NUM		0x100
+#define HNS_ROCE_V2_MAX_SQ_SGE_NUM		0xff
+#define HNS_ROCE_V2_MAX_SQ_INLINE		0x20
+#define HNS_ROCE_V2_UAR_NUM			256
+#define HNS_ROCE_V2_PHY_UAR_NUM			1
+#define HNS_ROCE_V2_MAX_MTPT_NUM		0x8000
+#define HNS_ROCE_V2_MAX_MTT_SEGS		0x100000
+#define HNS_ROCE_V2_MAX_CQE_SEGS		0x10000
+#define HNS_ROCE_V2_MAX_PD_NUM			0x400000
+#define HNS_ROCE_V2_MAX_QP_INIT_RDMA		128
+#define HNS_ROCE_V2_MAX_QP_DEST_RDMA		128
+#define HNS_ROCE_V2_MAX_SQ_DESC_SZ		64
+#define HNS_ROCE_V2_MAX_RQ_DESC_SZ		16
+#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ		64
+#define HNS_ROCE_V2_QPC_ENTRY_SZ		256
+#define HNS_ROCE_V2_IRRL_ENTRY_SZ		64
+#define HNS_ROCE_V2_CQC_ENTRY_SZ		64
+#define HNS_ROCE_V2_MTPT_ENTRY_SZ		64
+#define HNS_ROCE_V2_MTT_ENTRY_SZ		64
+#define HNS_ROCE_V2_CQE_ENTRY_SIZE		32
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED		0xFFFFF000
+#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM		2
+#define HNS_ROCE_INVALID_LKEY			0x100
+#define HNS_ROCE_CMQ_TX_TIMEOUT			200
+
+#define HNS_ROCE_CONTEXT_HOP_NUM		1
+#define HNS_ROCE_MTT_HOP_NUM			1
+#define HNS_ROCE_CQE_HOP_NUM			1
+#define HNS_ROCE_PBL_HOP_NUM			2
+
+#define HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT	0
+#define HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT	1
+#define HNS_ROCE_CMD_FLAG_NEXT_SHIFT		2
+#define HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT	3
+#define HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT		4
+#define HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT	5
+
+#define HNS_ROCE_CMD_FLAG_IN		BIT(HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT)
+#define HNS_ROCE_CMD_FLAG_OUT		BIT(HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT)
+#define HNS_ROCE_CMD_FLAG_NEXT		BIT(HNS_ROCE_CMD_FLAG_NEXT_SHIFT)
+#define HNS_ROCE_CMD_FLAG_WR		BIT(HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HNS_ROCE_CMD_FLAG_NO_INTR	BIT(HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT)
+#define HNS_ROCE_CMD_FLAG_ERR_INTR	BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT)
+
+#define HNS_ROCE_CMQ_DESC_NUM_S		3
+#define HNS_ROCE_CMQ_EN_B		16
+#define HNS_ROCE_CMQ_ENABLE		BIT(HNS_ROCE_CMQ_EN_B)
+
+#define check_whether_last_step(hop_num, step_idx) \
+	((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
+	(step_idx == 1 && hop_num == 1) || \
+	(step_idx == 2 && hop_num == 2))
+
+#define V2_CQ_DB_REQ_NOT_SOL			0
+#define V2_CQ_DB_REQ_NOT			1
+
+#define V2_CQ_STATE_VALID			1
+#define V2_QKEY_VAL				0x80010000
+
+#define	GID_LEN_V2				16
+
+#define HNS_ROCE_V2_CQE_QPN_MASK		0x3ffff
+
+enum {
+	HNS_ROCE_V2_WQE_OP_SEND				= 0x0,
+	HNS_ROCE_V2_WQE_OP_SEND_WITH_INV		= 0x1,
+	HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM		= 0x2,
+	HNS_ROCE_V2_WQE_OP_RDMA_WRITE			= 0x3,
+	HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM		= 0x4,
+	HNS_ROCE_V2_WQE_OP_RDMA_READ			= 0x5,
+	HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP		= 0x6,
+	HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD		= 0x7,
+	HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP	= 0x8,
+	HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD	= 0x9,
+	HNS_ROCE_V2_WQE_OP_FAST_REG_PMR			= 0xa,
+	HNS_ROCE_V2_WQE_OP_LOCAL_INV			= 0xb,
+	HNS_ROCE_V2_WQE_OP_BIND_MW_TYPE			= 0xc,
+	HNS_ROCE_V2_WQE_OP_MASK				= 0x1f,
+};
+
+enum {
+	HNS_ROCE_SQ_OPCODE_SEND = 0x0,
+	HNS_ROCE_SQ_OPCODE_SEND_WITH_INV = 0x1,
+	HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM = 0x2,
+	HNS_ROCE_SQ_OPCODE_RDMA_WRITE = 0x3,
+	HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM = 0x4,
+	HNS_ROCE_SQ_OPCODE_RDMA_READ = 0x5,
+	HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP = 0x6,
+	HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD = 0x7,
+	HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP = 0x8,
+	HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD = 0x9,
+	HNS_ROCE_SQ_OPCODE_FAST_REG_WR = 0xa,
+	HNS_ROCE_SQ_OPCODE_LOCAL_INV = 0xb,
+	HNS_ROCE_SQ_OPCODE_BIND_MW = 0xc,
+};
+
+enum {
+	/* rq operations */
+	HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM = 0x0,
+	HNS_ROCE_V2_OPCODE_SEND = 0x1,
+	HNS_ROCE_V2_OPCODE_SEND_WITH_IMM = 0x2,
+	HNS_ROCE_V2_OPCODE_SEND_WITH_INV = 0x3,
+};
+
+enum {
+	HNS_ROCE_V2_SQ_DB	= 0x0,
+	HNS_ROCE_V2_RQ_DB	= 0x1,
+	HNS_ROCE_V2_SRQ_DB	= 0x2,
+	HNS_ROCE_V2_CQ_DB_PTR	= 0x3,
+	HNS_ROCE_V2_CQ_DB_NTR	= 0x4,
+};
+
+enum {
+	HNS_ROCE_CQE_V2_SUCCESS				= 0x00,
+	HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR		= 0x01,
+	HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR			= 0x02,
+	HNS_ROCE_CQE_V2_LOCAL_PROT_ERR			= 0x04,
+	HNS_ROCE_CQE_V2_WR_FLUSH_ERR			= 0x05,
+	HNS_ROCE_CQE_V2_MW_BIND_ERR			= 0x06,
+	HNS_ROCE_CQE_V2_BAD_RESP_ERR			= 0x10,
+	HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR		= 0x11,
+	HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR		= 0x12,
+	HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR		= 0x13,
+	HNS_ROCE_CQE_V2_REMOTE_OP_ERR			= 0x14,
+	HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR		= 0x15,
+	HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR		= 0x16,
+	HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR		= 0x22,
+
+	HNS_ROCE_V2_CQE_STATUS_MASK			= 0xff,
+};
+
+/* CMQ command */
+enum hns_roce_opcode_type {
+	HNS_ROCE_OPC_QUERY_HW_VER			= 0x8000,
+	HNS_ROCE_OPC_CFG_GLOBAL_PARAM			= 0x8001,
+	HNS_ROCE_OPC_ALLOC_PF_RES			= 0x8004,
+	HNS_ROCE_OPC_QUERY_PF_RES			= 0x8400,
+	HNS_ROCE_OPC_ALLOC_VF_RES			= 0x8401,
+	HNS_ROCE_OPC_CFG_BT_ATTR			= 0x8506,
+};
+
+enum {
+	TYPE_CRQ,
+	TYPE_CSQ,
+};
+
+enum hns_roce_cmd_return_status {
+	CMD_EXEC_SUCCESS	= 0,
+	CMD_NO_AUTH		= 1,
+	CMD_NOT_EXEC		= 2,
+	CMD_QUEUE_FULL		= 3,
+};
+
+struct hns_roce_v2_cq_context {
+	u32	byte_4_pg_ceqn;
+	u32	byte_8_cqn;
+	u32	cqe_cur_blk_addr;
+	u32	byte_16_hop_addr;
+	u32	cqe_nxt_blk_addr;
+	u32	byte_24_pgsz_addr;
+	u32	byte_28_cq_pi;
+	u32	byte_32_cq_ci;
+	u32	cqe_ba;
+	u32	byte_40_cqe_ba;
+	u32	byte_44_db_record;
+	u32	db_record_addr;
+	u32	byte_52_cqe_cnt;
+	u32	byte_56_cqe_period_maxcnt;
+	u32	cqe_report_timer;
+	u32	byte_64_se_cqe_idx;
+};
+#define	V2_CQC_BYTE_4_CQ_ST_S 0
+#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
+
+#define	V2_CQC_BYTE_4_POLL_S 2
+
+#define	V2_CQC_BYTE_4_SE_S 3
+
+#define	V2_CQC_BYTE_4_OVER_IGNORE_S 4
+
+#define	V2_CQC_BYTE_4_COALESCE_S 5
+
+#define	V2_CQC_BYTE_4_ARM_ST_S 6
+#define V2_CQC_BYTE_4_ARM_ST_M GENMASK(7, 6)
+
+#define	V2_CQC_BYTE_4_SHIFT_S 8
+#define V2_CQC_BYTE_4_SHIFT_M GENMASK(12, 8)
+
+#define	V2_CQC_BYTE_4_CMD_SN_S 13
+#define V2_CQC_BYTE_4_CMD_SN_M GENMASK(14, 13)
+
+#define	V2_CQC_BYTE_4_CEQN_S 15
+#define V2_CQC_BYTE_4_CEQN_M GENMASK(23, 15)
+
+#define	V2_CQC_BYTE_4_PAGE_OFFSET_S 24
+#define V2_CQC_BYTE_4_PAGE_OFFSET_M GENMASK(31, 24)
+
+#define	V2_CQC_BYTE_8_CQN_S 0
+#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
+
+#define	V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
+#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define	V2_CQC_BYTE_16_CQE_HOP_NUM_S 30
+#define V2_CQC_BYTE_16_CQE_HOP_NUM_M GENMASK(31, 30)
+
+#define	V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S 0
+#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M GENMASK(19, 0)
+
+#define	V2_CQC_BYTE_24_CQE_BA_PG_SZ_S 24
+#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_M GENMASK(27, 24)
+
+#define	V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S 28
+#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M GENMASK(31, 28)
+
+#define	V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S 0
+#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M GENMASK(23, 0)
+
+#define	V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S 0
+#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M GENMASK(23, 0)
+
+#define	V2_CQC_BYTE_40_CQE_BA_S 0
+#define V2_CQC_BYTE_40_CQE_BA_M GENMASK(28, 0)
+
+#define	V2_CQC_BYTE_44_DB_RECORD_EN_S 0
+
+#define	V2_CQC_BYTE_52_CQE_CNT_S 0
+#define	V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0)
+
+#define	V2_CQC_BYTE_56_CQ_MAX_CNT_S 0
+#define V2_CQC_BYTE_56_CQ_MAX_CNT_M GENMASK(15, 0)
+
+#define	V2_CQC_BYTE_56_CQ_PERIOD_S 16
+#define V2_CQC_BYTE_56_CQ_PERIOD_M GENMASK(31, 16)
+
+#define	V2_CQC_BYTE_64_SE_CQE_IDX_S 0
+#define	V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0)
+
+enum{
+	V2_MPT_ST_VALID = 0x1,
+};
+
+enum hns_roce_v2_qp_state {
+	HNS_ROCE_QP_ST_RST,
+	HNS_ROCE_QP_ST_INIT,
+	HNS_ROCE_QP_ST_RTR,
+	HNS_ROCE_QP_ST_RTS,
+	HNS_ROCE_QP_ST_SQER,
+	HNS_ROCE_QP_ST_SQD,
+	HNS_ROCE_QP_ST_ERR,
+	HNS_ROCE_QP_ST_SQ_DRAINING,
+	HNS_ROCE_QP_NUM_ST
+};
+
+struct hns_roce_v2_qp_context {
+	u32	byte_4_sqpn_tst;
+	u32	wqe_sge_ba;
+	u32	byte_12_sq_hop;
+	u32	byte_16_buf_ba_pg_sz;
+	u32	byte_20_smac_sgid_idx;
+	u32	byte_24_mtu_tc;
+	u32	byte_28_at_fl;
+	u8	dgid[GID_LEN_V2];
+	u32	dmac;
+	u32	byte_52_udpspn_dmac;
+	u32	byte_56_dqpn_err;
+	u32	byte_60_qpst_mapid;
+	u32	qkey_xrcd;
+	u32	byte_68_rq_db;
+	u32	rq_db_record_addr;
+	u32	byte_76_srqn_op_en;
+	u32	byte_80_rnr_rx_cqn;
+	u32	byte_84_rq_ci_pi;
+	u32	rq_cur_blk_addr;
+	u32	byte_92_srq_info;
+	u32	byte_96_rx_reqmsn;
+	u32	rq_nxt_blk_addr;
+	u32	byte_104_rq_sge;
+	u32	byte_108_rx_reqepsn;
+	u32	rq_rnr_timer;
+	u32	rx_msg_len;
+	u32	rx_rkey_pkt_info;
+	u64	rx_va;
+	u32	byte_132_trrl;
+	u32	trrl_ba;
+	u32	byte_140_raq;
+	u32	byte_144_raq;
+	u32	byte_148_raq;
+	u32	byte_152_raq;
+	u32	byte_156_raq;
+	u32	byte_160_sq_ci_pi;
+	u32	sq_cur_blk_addr;
+	u32	byte_168_irrl_idx;
+	u32	byte_172_sq_psn;
+	u32	byte_176_msg_pktn;
+	u32	sq_cur_sqe_blk_addr;
+	u32	byte_184_irrl_idx;
+	u32	cur_sge_offset;
+	u32	byte_192_ext_sge;
+	u32	byte_196_sq_psn;
+	u32	byte_200_sq_max;
+	u32	irrl_ba;
+	u32	byte_208_irrl;
+	u32	byte_212_lsn;
+	u32	sq_timer;
+	u32	byte_220_retry_psn_msn;
+	u32	byte_224_retry_msg;
+	u32	rx_sq_cur_blk_addr;
+	u32	byte_232_irrl_sge;
+	u32	irrl_cur_sge_offset;
+	u32	byte_240_irrl_tail;
+	u32	byte_244_rnr_rxack;
+	u32	byte_248_ack_psn;
+	u32	byte_252_err_txcqn;
+	u32	byte_256_sqflush_rqcqe;
+};
+
+#define	V2_QPC_BYTE_4_TST_S 0
+#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
+
+#define	V2_QPC_BYTE_4_SGE_SHIFT_S 3
+#define V2_QPC_BYTE_4_SGE_SHIFT_M GENMASK(7, 3)
+
+#define	V2_QPC_BYTE_4_SQPN_S 8
+#define V2_QPC_BYTE_4_SQPN_M  GENMASK(31, 8)
+
+#define	V2_QPC_BYTE_12_WQE_SGE_BA_S 0
+#define V2_QPC_BYTE_12_WQE_SGE_BA_M GENMASK(28, 0)
+
+#define	V2_QPC_BYTE_12_SQ_HOP_NUM_S 29
+#define V2_QPC_BYTE_12_SQ_HOP_NUM_M GENMASK(30, 29)
+
+#define V2_QPC_BYTE_12_RSVD_LKEY_EN_S 31
+
+#define	V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S 0
+#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M GENMASK(3, 0)
+
+#define	V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S 4
+#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define	V2_QPC_BYTE_16_PD_S 8
+#define V2_QPC_BYTE_16_PD_M GENMASK(31, 8)
+
+#define	V2_QPC_BYTE_20_RQ_HOP_NUM_S 0
+#define V2_QPC_BYTE_20_RQ_HOP_NUM_M GENMASK(1, 0)
+
+#define	V2_QPC_BYTE_20_SGE_HOP_NUM_S 2
+#define V2_QPC_BYTE_20_SGE_HOP_NUM_M GENMASK(3, 2)
+
+#define	V2_QPC_BYTE_20_RQWS_S 4
+#define V2_QPC_BYTE_20_RQWS_M GENMASK(7, 4)
+
+#define	V2_QPC_BYTE_20_SQ_SHIFT_S 8
+#define V2_QPC_BYTE_20_SQ_SHIFT_M GENMASK(11, 8)
+
+#define	V2_QPC_BYTE_20_RQ_SHIFT_S 12
+#define V2_QPC_BYTE_20_RQ_SHIFT_M GENMASK(15, 12)
+
+#define	V2_QPC_BYTE_20_SGID_IDX_S 16
+#define V2_QPC_BYTE_20_SGID_IDX_M GENMASK(23, 16)
+
+#define	V2_QPC_BYTE_20_SMAC_IDX_S 24
+#define V2_QPC_BYTE_20_SMAC_IDX_M GENMASK(31, 24)
+
+#define	V2_QPC_BYTE_24_HOP_LIMIT_S 0
+#define V2_QPC_BYTE_24_HOP_LIMIT_M GENMASK(7, 0)
+
+#define	V2_QPC_BYTE_24_TC_S 8
+#define V2_QPC_BYTE_24_TC_M GENMASK(15, 8)
+
+#define	V2_QPC_BYTE_24_VLAN_IDX_S 16
+#define V2_QPC_BYTE_24_VLAN_IDX_M GENMASK(27, 16)
+
+#define	V2_QPC_BYTE_24_MTU_S 28
+#define V2_QPC_BYTE_24_MTU_M GENMASK(31, 28)
+
+#define	V2_QPC_BYTE_28_FL_S 0
+#define V2_QPC_BYTE_28_FL_M GENMASK(19, 0)
+
+#define	V2_QPC_BYTE_28_SL_S 20
+#define V2_QPC_BYTE_28_SL_M GENMASK(23, 20)
+
+#define V2_QPC_BYTE_28_CNP_TX_FLAG_S 24
+
+#define V2_QPC_BYTE_28_CE_FLAG_S 25
+
+#define V2_QPC_BYTE_28_LBI_S 26
+
+#define	V2_QPC_BYTE_28_AT_S 27
+#define V2_QPC_BYTE_28_AT_M GENMASK(31, 27)
+
+#define	V2_QPC_BYTE_52_DMAC_S 0
+#define V2_QPC_BYTE_52_DMAC_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_52_UDPSPN_S 16
+#define V2_QPC_BYTE_52_UDPSPN_M GENMASK(31, 16)
+
+#define	V2_QPC_BYTE_56_DQPN_S 0
+#define V2_QPC_BYTE_56_DQPN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_56_SQ_TX_ERR_S 24
+#define	V2_QPC_BYTE_56_SQ_RX_ERR_S 25
+#define	V2_QPC_BYTE_56_RQ_TX_ERR_S 26
+#define	V2_QPC_BYTE_56_RQ_RX_ERR_S 27
+
+#define	V2_QPC_BYTE_56_LP_PKTN_INI_S 28
+#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
+
+#define	V2_QPC_BYTE_60_MAPID_S 0
+#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0)
+
+#define	V2_QPC_BYTE_60_INNER_MAP_IND_S 13
+
+#define	V2_QPC_BYTE_60_SQ_MAP_IND_S 14
+
+#define	V2_QPC_BYTE_60_RQ_MAP_IND_S 15
+
+#define	V2_QPC_BYTE_60_TEMPID_S 16
+#define V2_QPC_BYTE_60_TEMPID_M  GENMASK(22, 16)
+
+#define	V2_QPC_BYTE_60_EXT_MAP_IND_S 23
+
+#define	V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
+#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
+
+#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
+
+#define	V2_QPC_BYTE_60_SQ_EXT_IND_S 28
+
+#define	V2_QPC_BYTE_60_QP_ST_S 29
+#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
+
+#define	V2_QPC_BYTE_68_RQ_RECORD_EN_S 0
+
+#define	V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S 1
+#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M GENMASK(31, 1)
+
+#define	V2_QPC_BYTE_76_SRQN_S 0
+#define V2_QPC_BYTE_76_SRQN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_76_SRQ_EN_S 24
+
+#define	V2_QPC_BYTE_76_RRE_S 25
+
+#define	V2_QPC_BYTE_76_RWE_S 26
+
+#define	V2_QPC_BYTE_76_ATE_S 27
+
+#define	V2_QPC_BYTE_76_RQIE_S 28
+
+#define	V2_QPC_BYTE_80_RX_CQN_S 0
+#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
+#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
+
+#define	V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S 0
+#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M GENMASK(15, 0)
+
+#define	V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S 16
+#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M GENMASK(31, 16)
+
+#define	V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define	V2_QPC_BYTE_92_SRQ_INFO_S 20
+#define V2_QPC_BYTE_92_SRQ_INFO_M GENMASK(31, 20)
+
+#define	V2_QPC_BYTE_96_RX_REQ_MSN_S 0
+#define V2_QPC_BYTE_96_RX_REQ_MSN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S 0
+#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M GENMASK(19, 0)
+
+#define	V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S 24
+#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_108_INV_CREDIT_S 0
+
+#define V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S 3
+
+#define	V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S 4
+#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M GENMASK(6, 4)
+
+#define V2_QPC_BYTE_108_RX_REQ_RNR_S 7
+
+#define	V2_QPC_BYTE_108_RX_REQ_EPSN_S 8
+#define V2_QPC_BYTE_108_RX_REQ_EPSN_M GENMASK(31, 8)
+
+#define	V2_QPC_BYTE_132_TRRL_HEAD_MAX_S 0
+#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_M GENMASK(7, 0)
+
+#define	V2_QPC_BYTE_132_TRRL_TAIL_MAX_S 8
+#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_M GENMASK(15, 8)
+
+#define	V2_QPC_BYTE_132_TRRL_BA_S 16
+#define V2_QPC_BYTE_132_TRRL_BA_M GENMASK(31, 16)
+
+#define	V2_QPC_BYTE_140_TRRL_BA_S 0
+#define V2_QPC_BYTE_140_TRRL_BA_M GENMASK(11, 0)
+
+#define	V2_QPC_BYTE_140_RR_MAX_S 12
+#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
+
+#define	V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15
+
+#define	V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
+#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
+
+#define	V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S 24
+#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M GENMASK(31, 24)
+
+#define	V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
+#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
+
+#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
+#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
+
+#define V2_QPC_BYTE_144_RESP_RTY_FLG_S 31
+
+#define	V2_QPC_BYTE_148_RQ_MSN_S 0
+#define V2_QPC_BYTE_148_RQ_MSN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
+#define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
+
+#define	V2_QPC_BYTE_152_RAQ_PSN_S 8
+#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
+
+#define	V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
+#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
+
+#define	V2_QPC_BYTE_156_RAQ_USE_PKTN_S 0
+#define V2_QPC_BYTE_156_RAQ_USE_PKTN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S 0
+#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M GENMASK(15, 0)
+
+#define	V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S 16
+#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M GENMASK(31, 16)
+
+#define	V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S 20
+
+#define	V2_QPC_BYTE_168_LP_SGEN_INI_S 21
+#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 21)
+
+#define	V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24
+#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24)
+
+#define	V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
+#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
+
+#define	V2_QPC_BYTE_172_ACK_REQ_FREQ_S 0
+#define V2_QPC_BYTE_172_ACK_REQ_FREQ_M GENMASK(5, 0)
+
+#define V2_QPC_BYTE_172_MSG_RNR_FLG_S 6
+
+#define V2_QPC_BYTE_172_FRE_S 7
+
+#define	V2_QPC_BYTE_172_SQ_CUR_PSN_S 8
+#define V2_QPC_BYTE_172_SQ_CUR_PSN_M GENMASK(31, 8)
+
+#define	V2_QPC_BYTE_176_MSG_USE_PKTN_S 0
+#define V2_QPC_BYTE_176_MSG_USE_PKTN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_176_IRRL_HEAD_PRE_S 24
+#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_M GENMASK(31, 24)
+
+#define	V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S 0
+#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M GENMASK(19, 0)
+
+#define	V2_QPC_BYTE_184_IRRL_IDX_MSB_S 20
+#define V2_QPC_BYTE_184_IRRL_IDX_MSB_M GENMASK(31, 20)
+
+#define	V2_QPC_BYTE_192_CUR_SGE_IDX_S 0
+#define V2_QPC_BYTE_192_CUR_SGE_IDX_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S 24
+#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M GENMASK(31, 24)
+
+#define	V2_QPC_BYTE_196_IRRL_HEAD_S 0
+#define V2_QPC_BYTE_196_IRRL_HEAD_M GENMASK(7, 0)
+
+#define	V2_QPC_BYTE_196_SQ_MAX_PSN_S 8
+#define V2_QPC_BYTE_196_SQ_MAX_PSN_M GENMASK(31, 8)
+
+#define	V2_QPC_BYTE_200_SQ_MAX_IDX_S 0
+#define V2_QPC_BYTE_200_SQ_MAX_IDX_M GENMASK(15, 0)
+
+#define	V2_QPC_BYTE_200_LCL_OPERATED_CNT_S 16
+#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_M GENMASK(31, 16)
+
+#define	V2_QPC_BYTE_208_IRRL_BA_S 0
+#define V2_QPC_BYTE_208_IRRL_BA_M GENMASK(25, 0)
+
+#define V2_QPC_BYTE_208_PKT_RNR_FLG_S 26
+
+#define V2_QPC_BYTE_208_PKT_RTY_FLG_S 27
+
+#define V2_QPC_BYTE_208_RMT_E2E_S 28
+
+#define	V2_QPC_BYTE_208_SR_MAX_S 29
+#define V2_QPC_BYTE_208_SR_MAX_M GENMASK(31, 29)
+
+#define	V2_QPC_BYTE_212_LSN_S 0
+#define V2_QPC_BYTE_212_LSN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_212_RETRY_NUM_INIT_S 24
+#define V2_QPC_BYTE_212_RETRY_NUM_INIT_M GENMASK(26, 24)
+
+#define	V2_QPC_BYTE_212_CHECK_FLG_S 27
+#define V2_QPC_BYTE_212_CHECK_FLG_M GENMASK(28, 27)
+
+#define	V2_QPC_BYTE_212_RETRY_CNT_S 29
+#define V2_QPC_BYTE_212_RETRY_CNT_M GENMASK(31, 29)
+
+#define	V2_QPC_BYTE_220_RETRY_MSG_MSN_S 0
+#define V2_QPC_BYTE_220_RETRY_MSG_MSN_M GENMASK(15, 0)
+
+#define	V2_QPC_BYTE_220_RETRY_MSG_PSN_S 16
+#define V2_QPC_BYTE_220_RETRY_MSG_PSN_M GENMASK(31, 16)
+
+#define	V2_QPC_BYTE_224_RETRY_MSG_PSN_S 0
+#define V2_QPC_BYTE_224_RETRY_MSG_PSN_M GENMASK(7, 0)
+
+#define	V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S 8
+#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M GENMASK(31, 8)
+
+#define	V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define	V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
+#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
+
+#define	V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
+#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
+
+#define	V2_QPC_BYTE_240_IRRL_TAIL_RD_S 8
+#define V2_QPC_BYTE_240_IRRL_TAIL_RD_M GENMASK(15, 8)
+
+#define	V2_QPC_BYTE_240_RX_ACK_MSN_S 16
+#define V2_QPC_BYTE_240_RX_ACK_MSN_M GENMASK(31, 16)
+
+#define	V2_QPC_BYTE_244_RX_ACK_EPSN_S 0
+#define V2_QPC_BYTE_244_RX_ACK_EPSN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_244_RNR_NUM_INIT_S 24
+#define V2_QPC_BYTE_244_RNR_NUM_INIT_M GENMASK(26, 24)
+
+#define	V2_QPC_BYTE_244_RNR_CNT_S 27
+#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
+
+#define	V2_QPC_BYTE_248_IRRL_PSN_S 0
+#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_248_ACK_PSN_ERR_S 24
+
+#define	V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S 25
+#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M GENMASK(26, 25)
+
+#define V2_QPC_BYTE_248_IRRL_PSN_VLD_S 27
+
+#define V2_QPC_BYTE_248_RNR_RETRY_FLAG_S 28
+
+#define V2_QPC_BYTE_248_CQ_ERR_IND_S 31
+
+#define	V2_QPC_BYTE_252_TX_CQN_S 0
+#define V2_QPC_BYTE_252_TX_CQN_M GENMASK(23, 0)
+
+#define	V2_QPC_BYTE_252_SIG_TYPE_S 24
+
+#define	V2_QPC_BYTE_252_ERR_TYPE_S 25
+#define V2_QPC_BYTE_252_ERR_TYPE_M GENMASK(31, 25)
+
+#define	V2_QPC_BYTE_256_RQ_CQE_IDX_S 0
+#define V2_QPC_BYTE_256_RQ_CQE_IDX_M GENMASK(15, 0)
+
+#define	V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
+#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
+
+struct hns_roce_v2_cqe {
+	u32	byte_4;
+	u32	rkey_immtdata;
+	u32	byte_12;
+	u32	byte_16;
+	u32	byte_cnt;
+	u32	smac;
+	u32	byte_28;
+	u32	byte_32;
+};
+
+#define	V2_CQE_BYTE_4_OPCODE_S 0
+#define V2_CQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define	V2_CQE_BYTE_4_RQ_INLINE_S 5
+
+#define	V2_CQE_BYTE_4_S_R_S 6
+
+#define	V2_CQE_BYTE_4_OWNER_S 7
+
+#define	V2_CQE_BYTE_4_STATUS_S 8
+#define V2_CQE_BYTE_4_STATUS_M GENMASK(15, 8)
+
+#define	V2_CQE_BYTE_4_WQE_INDX_S 16
+#define V2_CQE_BYTE_4_WQE_INDX_M GENMASK(31, 16)
+
+#define	V2_CQE_BYTE_12_XRC_SRQN_S 0
+#define V2_CQE_BYTE_12_XRC_SRQN_M GENMASK(23, 0)
+
+#define	V2_CQE_BYTE_16_LCL_QPN_S 0
+#define V2_CQE_BYTE_16_LCL_QPN_M GENMASK(23, 0)
+
+#define	V2_CQE_BYTE_16_SUB_STATUS_S 24
+#define V2_CQE_BYTE_16_SUB_STATUS_M GENMASK(31, 24)
+
+#define	V2_CQE_BYTE_28_SMAC_4_S 0
+#define V2_CQE_BYTE_28_SMAC_4_M	GENMASK(7, 0)
+
+#define	V2_CQE_BYTE_28_SMAC_5_S 8
+#define V2_CQE_BYTE_28_SMAC_5_M	GENMASK(15, 8)
+
+#define	V2_CQE_BYTE_28_PORT_TYPE_S 16
+#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
+
+#define	V2_CQE_BYTE_32_RMT_QPN_S 0
+#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
+
+#define	V2_CQE_BYTE_32_SL_S 24
+#define V2_CQE_BYTE_32_SL_M GENMASK(26, 24)
+
+#define	V2_CQE_BYTE_32_PORTN_S 27
+#define V2_CQE_BYTE_32_PORTN_M GENMASK(29, 27)
+
+#define	V2_CQE_BYTE_32_GRH_S 30
+
+#define	V2_CQE_BYTE_32_LPK_S 31
+
+struct hns_roce_v2_mpt_entry {
+	__le32	byte_4_pd_hop_st;
+	__le32	byte_8_mw_cnt_en;
+	__le32	byte_12_mw_pa;
+	__le32	bound_lkey;
+	__le32	len_l;
+	__le32	len_h;
+	__le32	lkey;
+	__le32	va_l;
+	__le32	va_h;
+	__le32	pbl_size;
+	__le32	pbl_ba_l;
+	__le32	byte_48_mode_ba;
+	__le32	pa0_l;
+	__le32	byte_56_pa0_h;
+	__le32	pa1_l;
+	__le32	byte_64_buf_pa1;
+};
+
+#define V2_MPT_BYTE_4_MPT_ST_S 0
+#define V2_MPT_BYTE_4_MPT_ST_M GENMASK(1, 0)
+
+#define V2_MPT_BYTE_4_PBL_HOP_NUM_S 2
+#define V2_MPT_BYTE_4_PBL_HOP_NUM_M GENMASK(3, 2)
+
+#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_S 4
+#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_M GENMASK(7, 4)
+
+#define V2_MPT_BYTE_4_PD_S 8
+#define V2_MPT_BYTE_4_PD_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_8_RA_EN_S 0
+
+#define V2_MPT_BYTE_8_R_INV_EN_S 1
+
+#define V2_MPT_BYTE_8_L_INV_EN_S 2
+
+#define V2_MPT_BYTE_8_BIND_EN_S 3
+
+#define V2_MPT_BYTE_8_ATOMIC_EN_S 4
+
+#define V2_MPT_BYTE_8_RR_EN_S 5
+
+#define V2_MPT_BYTE_8_RW_EN_S 6
+
+#define V2_MPT_BYTE_8_LW_EN_S 7
+
+#define V2_MPT_BYTE_12_PA_S 1
+
+#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
+
+#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
+#define V2_MPT_BYTE_12_MW_BIND_QPN_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_48_PBL_BA_H_S 0
+#define V2_MPT_BYTE_48_PBL_BA_H_M GENMASK(28, 0)
+
+#define V2_MPT_BYTE_48_BLK_MODE_S 29
+
+#define V2_MPT_BYTE_56_PA0_H_S 0
+#define V2_MPT_BYTE_56_PA0_H_M GENMASK(25, 0)
+
+#define V2_MPT_BYTE_64_PA1_H_S 0
+#define V2_MPT_BYTE_64_PA1_H_M GENMASK(25, 0)
+
+#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
+#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
+
+#define	V2_DB_BYTE_4_TAG_S 0
+#define V2_DB_BYTE_4_TAG_M GENMASK(23, 0)
+
+#define	V2_DB_BYTE_4_CMD_S 24
+#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
+
+#define V2_DB_PARAMETER_CONS_IDX_S 0
+#define V2_DB_PARAMETER_CONS_IDX_M GENMASK(15, 0)
+
+#define V2_DB_PARAMETER_SL_S 16
+#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
+
+struct hns_roce_v2_cq_db {
+	u32	byte_4;
+	u32	parameter;
+};
+
+#define	V2_CQ_DB_BYTE_4_TAG_S 0
+#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
+
+#define	V2_CQ_DB_BYTE_4_CMD_S 24
+#define V2_CQ_DB_BYTE_4_CMD_M GENMASK(27, 24)
+
+#define V2_CQ_DB_PARAMETER_CONS_IDX_S 0
+#define V2_CQ_DB_PARAMETER_CONS_IDX_M GENMASK(23, 0)
+
+#define V2_CQ_DB_PARAMETER_CMD_SN_S 25
+#define V2_CQ_DB_PARAMETER_CMD_SN_M GENMASK(26, 25)
+
+#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+
+struct hns_roce_v2_rc_send_wqe {
+	u32		byte_4;
+	u32		msg_len;
+	u32		inv_key_immtdata;
+	u32		byte_16;
+	u32		byte_20;
+	u32		rkey;
+	u64		va;
+};
+
+#define	V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
+#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7
+
+#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8
+
+#define V2_RC_SEND_WQE_BYTE_4_FENCE_S 9
+
+#define V2_RC_SEND_WQE_BYTE_4_SO_S 10
+
+#define V2_RC_SEND_WQE_BYTE_4_SE_S 11
+
+#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
+
+#define	V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
+#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
+
+#define	V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S 24
+#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+
+struct hns_roce_v2_wqe_data_seg {
+	__be32    len;
+	__be32    lkey;
+	__be64    addr;
+};
+
+struct hns_roce_v2_db {
+	u32	byte_4;
+	u32	parameter;
+};
+
+struct hns_roce_query_version {
+	__le16 rocee_vendor_id;
+	__le16 rocee_hw_version;
+	__le32 rsv[5];
+};
+
+struct hns_roce_cfg_global_param {
+	__le32 time_cfg_udp_port;
+	__le32 rsv[5];
+};
+
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S 0
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M GENMASK(9, 0)
+
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
+
+struct hns_roce_pf_res {
+	__le32	rsv;
+	__le32	qpc_bt_idx_num;
+	__le32	srqc_bt_idx_num;
+	__le32	cqc_bt_idx_num;
+	__le32	mpt_bt_idx_num;
+	__le32	eqc_bt_idx_num;
+};
+
+#define PF_RES_DATA_1_PF_QPC_BT_IDX_S 0
+#define PF_RES_DATA_1_PF_QPC_BT_IDX_M GENMASK(10, 0)
+
+#define PF_RES_DATA_1_PF_QPC_BT_NUM_S 16
+#define PF_RES_DATA_1_PF_QPC_BT_NUM_M GENMASK(27, 16)
+
+#define PF_RES_DATA_2_PF_SRQC_BT_IDX_S 0
+#define PF_RES_DATA_2_PF_SRQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_2_PF_SRQC_BT_NUM_S 16
+#define PF_RES_DATA_2_PF_SRQC_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_3_PF_CQC_BT_IDX_S 0
+#define PF_RES_DATA_3_PF_CQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_3_PF_CQC_BT_NUM_S 16
+#define PF_RES_DATA_3_PF_CQC_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_4_PF_MPT_BT_IDX_S 0
+#define PF_RES_DATA_4_PF_MPT_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_4_PF_MPT_BT_NUM_S 16
+#define PF_RES_DATA_4_PF_MPT_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_5_PF_EQC_BT_IDX_S 0
+#define PF_RES_DATA_5_PF_EQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
+#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
+
+struct hns_roce_vf_res_a {
+	u32 vf_id;
+	u32 vf_qpc_bt_idx_num;
+	u32 vf_srqc_bt_idx_num;
+	u32 vf_cqc_bt_idx_num;
+	u32 vf_mpt_bt_idx_num;
+	u32 vf_eqc_bt_idx_num;
+};
+
+#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
+#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_M GENMASK(10, 0)
+
+#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_S 16
+#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_M GENMASK(27, 16)
+
+#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S 0
+#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S 16
+#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_S 0
+#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_S 16
+#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_S 0
+#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_S 16
+#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_5_VF_EQC_IDX_S 0
+#define VF_RES_A_DATA_5_VF_EQC_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_5_VF_EQC_NUM_S 16
+#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
+
+struct hns_roce_vf_res_b {
+	u32 rsv0;
+	u32 vf_smac_idx_num;
+	u32 vf_sgid_idx_num;
+	u32 vf_qid_idx_sl_num;
+	u32 rsv[2];
+};
+
+#define VF_RES_B_DATA_0_VF_ID_S 0
+#define VF_RES_B_DATA_0_VF_ID_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_1_VF_SMAC_IDX_S 0
+#define VF_RES_B_DATA_1_VF_SMAC_IDX_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_1_VF_SMAC_NUM_S 8
+#define VF_RES_B_DATA_1_VF_SMAC_NUM_M GENMASK(16, 8)
+
+#define VF_RES_B_DATA_2_VF_SGID_IDX_S 0
+#define VF_RES_B_DATA_2_VF_SGID_IDX_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_2_VF_SGID_NUM_S 8
+#define VF_RES_B_DATA_2_VF_SGID_NUM_M GENMASK(16, 8)
+
+#define VF_RES_B_DATA_3_VF_QID_IDX_S 0
+#define VF_RES_B_DATA_3_VF_QID_IDX_M GENMASK(9, 0)
+
+#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
+#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
+
+/* Reg field definition */
+#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S 0
+#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M GENMASK(15, 0)
+
+#define ROCEE_VF_SGID_CFG4_SGID_TYPE_S 0
+#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0)
+
+struct hns_roce_cfg_bt_attr {
+	u32 vf_qpc_cfg;
+	u32 vf_srqc_cfg;
+	u32 vf_cqc_cfg;
+	u32 vf_mpt_cfg;
+	u32 rsv[2];
+};
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+
+struct hns_roce_cmq_desc {
+	u16 opcode;
+	u16 flag;
+	u16 retval;
+	u16 rsv;
+	u32 data[6];
+};
+
+#define ROCEE_VF_MB_CFG0_REG		0x40
+#define ROCEE_VF_MB_STATUS_REG		0x58
+
+#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS	10000
+
+#define HNS_ROCE_HW_RUN_BIT_SHIFT	31
+#define HNS_ROCE_HW_MB_STATUS_MASK	0xFF
+
+#define HNS_ROCE_VF_MB4_TAG_MASK	0xFFFFFF00
+#define HNS_ROCE_VF_MB4_TAG_SHIFT	8
+
+#define HNS_ROCE_VF_MB4_CMD_MASK	0xFF
+#define HNS_ROCE_VF_MB4_CMD_SHIFT	0
+
+#define HNS_ROCE_VF_MB5_EVENT_MASK	0x10000
+#define HNS_ROCE_VF_MB5_EVENT_SHIFT	16
+
+#define HNS_ROCE_VF_MB5_TOKEN_MASK	0xFFFF
+#define HNS_ROCE_VF_MB5_TOKEN_SHIFT	0
+
+struct hns_roce_v2_cmq_ring {
+	dma_addr_t desc_dma_addr;
+	struct hns_roce_cmq_desc *desc;
+	u32 head;
+	u32 tail;
+
+	u16 buf_size;
+	u16 desc_num;
+	int next_to_use;
+	int next_to_clean;
+	u8 flag;
+	spinlock_t lock; /* command queue lock */
+};
+
+struct hns_roce_v2_cmq {
+	struct hns_roce_v2_cmq_ring csq;
+	struct hns_roce_v2_cmq_ring crq;
+	u16 tx_timeout;
+	u16 last_status;
+};
+
+struct hns_roce_v2_priv {
+	struct hns_roce_v2_cmq cmq;
+};
+
+#endif

+ 92 - 250
drivers/infiniband/hw/hns/hns_roce_main.c

@@ -57,20 +57,21 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
 {
 	return gid_index * hr_dev->caps.num_ports + port;
 }
+EXPORT_SYMBOL_GPL(hns_get_gid_index);
 
-static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
 {
 	u8 phy_port;
 	u32 i = 0;
 
 	if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
-		return;
+		return 0;
 
 	for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
 		hr_dev->dev_addr[port][i] = addr[i];
 
 	phy_port = hr_dev->iboe.phy_port[port];
-	hr_dev->hw->set_mac(hr_dev, phy_port, addr);
+	return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
 }
 
 static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
@@ -116,8 +117,9 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
 static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 			   unsigned long event)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct net_device *netdev;
+	int ret = 0;
 
 	netdev = hr_dev->iboe.netdevs[port];
 	if (!netdev) {
@@ -130,7 +132,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 	case NETDEV_CHANGE:
 	case NETDEV_REGISTER:
 	case NETDEV_CHANGEADDR:
-		hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
+		ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
 		break;
 	case NETDEV_DOWN:
 		/*
@@ -142,7 +144,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 		break;
 	}
 
-	return 0;
+	return ret;
 }
 
 static int hns_roce_netdev_event(struct notifier_block *self,
@@ -171,12 +173,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
 
 static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
 {
+	int ret;
 	u8 i;
 
 	for (i = 0; i < hr_dev->caps.num_ports; i++) {
-		hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
-				    hr_dev->caps.max_mtu);
-		hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
+		if (hr_dev->hw->set_mtu)
+			hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
+					    hr_dev->caps.max_mtu);
+		ret = hns_roce_set_mac(hr_dev, i,
+				       hr_dev->iboe.netdevs[i]->dev_addr);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
@@ -200,7 +207,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
 	props->max_qp_wr = hr_dev->caps.max_wqes;
 	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
 				  IB_DEVICE_RC_RNR_NAK_GEN;
-	props->max_sge = hr_dev->caps.max_sq_sg;
+	props->max_sge = max(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
 	props->max_sge_rd = 1;
 	props->max_cq = hr_dev->caps.num_cqs;
 	props->max_cqe = hr_dev->caps.max_cqes;
@@ -238,7 +245,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
 			       struct ib_port_attr *props)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct net_device *net_dev;
 	unsigned long flags;
 	enum ib_mtu mtu;
@@ -379,7 +386,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
 				       to_hr_ucontext(context)->uar.pfn,
 				       PAGE_SIZE, vma->vm_page_prot))
 			return -EAGAIN;
-	} else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+	} else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
+		   hr_dev->tptr_size) {
 		/* vm_pgoff: 1 -- TPTR */
 		if (io_remap_pfn_range(vma, vma->vm_start,
 				       hr_dev->tptr_dma_addr >> PAGE_SHIFT,
@@ -416,7 +424,6 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
 {
 	struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
 
-	unregister_inetaddr_notifier(&iboe->nb_inet);
 	unregister_netdevice_notifier(&iboe->nb);
 	ib_unregister_device(&hr_dev->ib_dev);
 }
@@ -426,7 +433,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 	int ret;
 	struct hns_roce_ib_iboe *iboe = NULL;
 	struct ib_device *ib_dev = NULL;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	iboe = &hr_dev->iboe;
 	spin_lock_init(&iboe->lock);
@@ -531,173 +538,10 @@ error_failed_setup_mtu_mac:
 	return ret;
 }
 
-static const struct of_device_id hns_roce_of_match[] = {
-	{ .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
-	{},
-};
-MODULE_DEVICE_TABLE(of, hns_roce_of_match);
-
-static const struct acpi_device_id hns_roce_acpi_match[] = {
-	{ "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
-	{},
-};
-MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
-
-static int hns_roce_node_match(struct device *dev, void *fwnode)
-{
-	return dev->fwnode == fwnode;
-}
-
-static struct
-platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
-{
-	struct device *dev;
-
-	/* get the 'device'corresponding to matching 'fwnode' */
-	dev = bus_find_device(&platform_bus_type, NULL,
-			      fwnode, hns_roce_node_match);
-	/* get the platform device */
-	return dev ? to_platform_device(dev) : NULL;
-}
-
-static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
-{
-	int i;
-	int ret;
-	u8 phy_port;
-	int port_cnt = 0;
-	struct device *dev = &hr_dev->pdev->dev;
-	struct device_node *net_node;
-	struct net_device *netdev = NULL;
-	struct platform_device *pdev = NULL;
-	struct resource *res;
-
-	/* check if we are compatible with the underlying SoC */
-	if (dev_of_node(dev)) {
-		const struct of_device_id *of_id;
-
-		of_id = of_match_node(hns_roce_of_match, dev->of_node);
-		if (!of_id) {
-			dev_err(dev, "device is not compatible!\n");
-			return -ENXIO;
-		}
-		hr_dev->hw = (struct hns_roce_hw *)of_id->data;
-		if (!hr_dev->hw) {
-			dev_err(dev, "couldn't get H/W specific DT data!\n");
-			return -ENXIO;
-		}
-	} else if (is_acpi_device_node(dev->fwnode)) {
-		const struct acpi_device_id *acpi_id;
-
-		acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
-		if (!acpi_id) {
-			dev_err(dev, "device is not compatible!\n");
-			return -ENXIO;
-		}
-		hr_dev->hw = (struct hns_roce_hw *) acpi_id->driver_data;
-		if (!hr_dev->hw) {
-			dev_err(dev, "couldn't get H/W specific ACPI data!\n");
-			return -ENXIO;
-		}
-	} else {
-		dev_err(dev, "can't read compatibility data from DT or ACPI\n");
-		return -ENXIO;
-	}
-
-	/* get the mapped register base address */
-	res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "memory resource not found!\n");
-		return -EINVAL;
-	}
-	hr_dev->reg_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(hr_dev->reg_base))
-		return PTR_ERR(hr_dev->reg_base);
-
-	/* read the node_guid of IB device from the DT or ACPI */
-	ret = device_property_read_u8_array(dev, "node-guid",
-					    (u8 *)&hr_dev->ib_dev.node_guid,
-					    GUID_LEN);
-	if (ret) {
-		dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
-		return ret;
-	}
-
-	/* get the RoCE associated ethernet ports or netdevices */
-	for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
-		if (dev_of_node(dev)) {
-			net_node = of_parse_phandle(dev->of_node, "eth-handle",
-						    i);
-			if (!net_node)
-				continue;
-			pdev = of_find_device_by_node(net_node);
-		} else if (is_acpi_device_node(dev->fwnode)) {
-			struct acpi_reference_args args;
-			struct fwnode_handle *fwnode;
-
-			ret = acpi_node_get_property_reference(dev->fwnode,
-							       "eth-handle",
-							       i, &args);
-			if (ret)
-				continue;
-			fwnode = acpi_fwnode_handle(args.adev);
-			pdev = hns_roce_find_pdev(fwnode);
-		} else {
-			dev_err(dev, "cannot read data from DT or ACPI\n");
-			return -ENXIO;
-		}
-
-		if (pdev) {
-			netdev = platform_get_drvdata(pdev);
-			phy_port = (u8)i;
-			if (netdev) {
-				hr_dev->iboe.netdevs[port_cnt] = netdev;
-				hr_dev->iboe.phy_port[port_cnt] = phy_port;
-			} else {
-				dev_err(dev, "no netdev found with pdev %s\n",
-					pdev->name);
-				return -ENODEV;
-			}
-			port_cnt++;
-		}
-	}
-
-	if (port_cnt == 0) {
-		dev_err(dev, "unable to get eth-handle for available ports!\n");
-		return -EINVAL;
-	}
-
-	hr_dev->caps.num_ports = port_cnt;
-
-	/* cmd issue mode: 0 is poll, 1 is event */
-	hr_dev->cmd_mod = 1;
-	hr_dev->loop_idc = 0;
-
-	/* read the interrupt names from the DT or ACPI */
-	ret = device_property_read_string_array(dev, "interrupt-names",
-						hr_dev->irq_names,
-						HNS_ROCE_MAX_IRQ_NUM);
-	if (ret < 0) {
-		dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
-		return ret;
-	}
-
-	/* fetch the interrupt numbers */
-	for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
-		hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
-		if (hr_dev->irq[i] <= 0) {
-			dev_err(dev, "platform get of irq[=%d] failed!\n", i);
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
 static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 {
 	int ret;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
 				      HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
@@ -707,6 +551,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 		return ret;
 	}
 
+	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
+		ret = hns_roce_init_hem_table(hr_dev,
+				      &hr_dev->mr_table.mtt_cqe_table,
+				      HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
+				      hr_dev->caps.num_cqe_segs, 1);
+		if (ret) {
+			dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
+			goto err_unmap_cqe;
+		}
+	}
+
 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
 				      HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
 				      hr_dev->caps.num_mtpts, 1);
@@ -754,6 +609,12 @@ err_unmap_dmpt:
 
 err_unmap_mtt:
 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+		hns_roce_cleanup_hem_table(hr_dev,
+					   &hr_dev->mr_table.mtt_cqe_table);
+
+err_unmap_cqe:
+	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
 
 	return ret;
 }
@@ -766,7 +627,7 @@ err_unmap_mtt:
 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
 {
 	int ret;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	spin_lock_init(&hr_dev->sm_lock);
 	spin_lock_init(&hr_dev->bt_cmd_lock);
@@ -826,56 +687,45 @@ err_uar_table_free:
 	return ret;
 }
 
-/**
- * hns_roce_probe - RoCE driver entrance
- * @pdev: pointer to platform device
- * Return : int
- *
- */
-static int hns_roce_probe(struct platform_device *pdev)
+int hns_roce_init(struct hns_roce_dev *hr_dev)
 {
 	int ret;
-	struct hns_roce_dev *hr_dev;
-	struct device *dev = &pdev->dev;
-
-	hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
-	if (!hr_dev)
-		return -ENOMEM;
+	struct device *dev = hr_dev->dev;
 
-	hr_dev->pdev = pdev;
-	platform_set_drvdata(pdev, hr_dev);
-
-	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
-	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
-		dev_err(dev, "Not usable DMA addressing mode\n");
-		ret = -EIO;
-		goto error_failed_get_cfg;
+	if (hr_dev->hw->reset) {
+		ret = hr_dev->hw->reset(hr_dev, true);
+		if (ret) {
+			dev_err(dev, "Reset RoCE engine failed!\n");
+			return ret;
+		}
 	}
 
-	ret = hns_roce_get_cfg(hr_dev);
-	if (ret) {
-		dev_err(dev, "Get Configuration failed!\n");
-		goto error_failed_get_cfg;
+	if (hr_dev->hw->cmq_init) {
+		ret = hr_dev->hw->cmq_init(hr_dev);
+		if (ret) {
+			dev_err(dev, "Init RoCE Command Queue failed!\n");
+			goto error_failed_cmq_init;
+		}
 	}
 
-	ret = hr_dev->hw->reset(hr_dev, true);
+	ret = hr_dev->hw->hw_profile(hr_dev);
 	if (ret) {
-		dev_err(dev, "Reset RoCE engine failed!\n");
-		goto error_failed_get_cfg;
+		dev_err(dev, "Get RoCE engine profile failed!\n");
+		goto error_failed_cmd_init;
 	}
 
-	hr_dev->hw->hw_profile(hr_dev);
-
 	ret = hns_roce_cmd_init(hr_dev);
 	if (ret) {
 		dev_err(dev, "cmd init failed!\n");
 		goto error_failed_cmd_init;
 	}
 
-	ret = hns_roce_init_eq_table(hr_dev);
-	if (ret) {
-		dev_err(dev, "eq init failed!\n");
-		goto error_failed_eq_table;
+	if (hr_dev->cmd_mod) {
+		ret = hns_roce_init_eq_table(hr_dev);
+		if (ret) {
+			dev_err(dev, "eq init failed!\n");
+			goto error_failed_eq_table;
+		}
 	}
 
 	if (hr_dev->cmd_mod) {
@@ -898,10 +748,12 @@ static int hns_roce_probe(struct platform_device *pdev)
 		goto error_failed_setup_hca;
 	}
 
-	ret = hr_dev->hw->hw_init(hr_dev);
-	if (ret) {
-		dev_err(dev, "hw_init failed!\n");
-		goto error_failed_engine_init;
+	if (hr_dev->hw->hw_init) {
+		ret = hr_dev->hw->hw_init(hr_dev);
+		if (ret) {
+			dev_err(dev, "hw_init failed!\n");
+			goto error_failed_engine_init;
+		}
 	}
 
 	ret = hns_roce_register_device(hr_dev);
@@ -911,7 +763,8 @@ static int hns_roce_probe(struct platform_device *pdev)
 	return 0;
 
 error_failed_register_device:
-	hr_dev->hw->hw_exit(hr_dev);
+	if (hr_dev->hw->hw_exit)
+		hr_dev->hw->hw_exit(hr_dev);
 
 error_failed_engine_init:
 	hns_roce_cleanup_bitmap(hr_dev);
@@ -924,58 +777,47 @@ error_failed_init_hem:
 		hns_roce_cmd_use_polling(hr_dev);
 
 error_failed_use_event:
-	hns_roce_cleanup_eq_table(hr_dev);
+	if (hr_dev->cmd_mod)
+		hns_roce_cleanup_eq_table(hr_dev);
 
 error_failed_eq_table:
 	hns_roce_cmd_cleanup(hr_dev);
 
 error_failed_cmd_init:
-	ret = hr_dev->hw->reset(hr_dev, false);
-	if (ret)
-		dev_err(&hr_dev->pdev->dev, "roce_engine reset fail\n");
+	if (hr_dev->hw->cmq_exit)
+		hr_dev->hw->cmq_exit(hr_dev);
 
-error_failed_get_cfg:
-	ib_dealloc_device(&hr_dev->ib_dev);
+error_failed_cmq_init:
+	if (hr_dev->hw->reset) {
+		ret = hr_dev->hw->reset(hr_dev, false);
+		if (ret)
+			dev_err(dev, "Dereset RoCE engine failed!\n");
+	}
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(hns_roce_init);
 
-/**
- * hns_roce_remove - remove RoCE device
- * @pdev: pointer to platform device
- */
-static int hns_roce_remove(struct platform_device *pdev)
+void hns_roce_exit(struct hns_roce_dev *hr_dev)
 {
-	struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
-
 	hns_roce_unregister_device(hr_dev);
-	hr_dev->hw->hw_exit(hr_dev);
+	if (hr_dev->hw->hw_exit)
+		hr_dev->hw->hw_exit(hr_dev);
 	hns_roce_cleanup_bitmap(hr_dev);
 	hns_roce_cleanup_hem(hr_dev);
 
 	if (hr_dev->cmd_mod)
 		hns_roce_cmd_use_polling(hr_dev);
 
-	hns_roce_cleanup_eq_table(hr_dev);
+	if (hr_dev->cmd_mod)
+		hns_roce_cleanup_eq_table(hr_dev);
 	hns_roce_cmd_cleanup(hr_dev);
-	hr_dev->hw->reset(hr_dev, false);
-
-	ib_dealloc_device(&hr_dev->ib_dev);
-
-	return 0;
+	if (hr_dev->hw->cmq_exit)
+		hr_dev->hw->cmq_exit(hr_dev);
+	if (hr_dev->hw->reset)
+		hr_dev->hw->reset(hr_dev, false);
 }
-
-static struct platform_driver hns_roce_driver = {
-	.probe = hns_roce_probe,
-	.remove = hns_roce_remove,
-	.driver = {
-		.name = DRV_NAME,
-		.of_match_table = hns_roce_of_match,
-		.acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
-	},
-};
-
-module_platform_driver(hns_roce_driver);
+EXPORT_SYMBOL_GPL(hns_roce_exit);
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");

+ 465 - 47
drivers/infiniband/hw/hns/hns_roce_mr.c

@@ -47,6 +47,7 @@ unsigned long key_to_hw_index(u32 key)
 {
 	return (key << 24) | (key >> 8);
 }
+EXPORT_SYMBOL_GPL(key_to_hw_index);
 
 static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_cmd_mailbox *mailbox,
@@ -65,6 +66,7 @@ int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
 				 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
 				 HNS_ROCE_CMD_TIMEOUT_MSECS);
 }
+EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
 
 static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
 				unsigned long *seg)
@@ -175,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
 }
 
 static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
-				    unsigned long *seg)
+				    unsigned long *seg, u32 mtt_type)
 {
 	struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
-	int ret = 0;
+	struct hns_roce_hem_table *table;
+	struct hns_roce_buddy *buddy;
+	int ret;
+
+	if (mtt_type == MTT_TYPE_WQE) {
+		buddy = &mr_table->mtt_buddy;
+		table = &mr_table->mtt_table;
+	} else {
+		buddy = &mr_table->mtt_cqe_buddy;
+		table = &mr_table->mtt_cqe_table;
+	}
 
-	ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
+	ret = hns_roce_buddy_alloc(buddy, order, seg);
 	if (ret == -1)
 		return -1;
 
-	if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
+	if (hns_roce_table_get_range(hr_dev, table, *seg,
 				     *seg + (1 << order) - 1)) {
-		hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
+		hns_roce_buddy_free(buddy, *seg, order);
 		return -1;
 	}
 
@@ -196,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
 		      struct hns_roce_mtt *mtt)
 {
-	int ret = 0;
+	int ret;
 	int i;
 
 	/* Page num is zero, correspond to DMA memory register */
@@ -215,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
 		++mtt->order;
 
 	/* Allocate MTT entry */
-	ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
+	ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
+				       mtt->mtt_type);
 	if (ret == -1)
 		return -ENOMEM;
 
@@ -229,18 +242,261 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
 	if (mtt->order < 0)
 		return;
 
-	hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
-	hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
-				 mtt->first_seg + (1 << mtt->order) - 1);
+	if (mtt->mtt_type == MTT_TYPE_WQE) {
+		hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
+				    mtt->order);
+		hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
+					mtt->first_seg,
+					mtt->first_seg + (1 << mtt->order) - 1);
+	} else {
+		hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
+				    mtt->order);
+		hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
+					mtt->first_seg,
+					mtt->first_seg + (1 << mtt->order) - 1);
+	}
+}
+EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
+
+static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
+			       struct hns_roce_mr *mr, int err_loop_index,
+			       int loop_i, int loop_j)
+{
+	struct device *dev = hr_dev->dev;
+	u32 mhop_num;
+	u32 pbl_bt_sz;
+	u64 bt_idx;
+	int i, j;
+
+	pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+	mhop_num = hr_dev->caps.pbl_hop_num;
+
+	i = loop_i;
+	if (mhop_num == 3 && err_loop_index == 2) {
+		for (; i >= 0; i--) {
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+
+			for (j = 0; j < pbl_bt_sz / 8; j++) {
+				if (i == loop_i && j >= loop_j)
+					break;
+
+				bt_idx = i * pbl_bt_sz / 8 + j;
+				dma_free_coherent(dev, pbl_bt_sz,
+						  mr->pbl_bt_l2[bt_idx],
+						  mr->pbl_l2_dma_addr[bt_idx]);
+			}
+		}
+	} else if (mhop_num == 3 && err_loop_index == 1) {
+		for (i -= 1; i >= 0; i--) {
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+
+			for (j = 0; j < pbl_bt_sz / 8; j++) {
+				bt_idx = i * pbl_bt_sz / 8 + j;
+				dma_free_coherent(dev, pbl_bt_sz,
+						  mr->pbl_bt_l2[bt_idx],
+						  mr->pbl_l2_dma_addr[bt_idx]);
+			}
+		}
+	} else if (mhop_num == 2 && err_loop_index == 1) {
+		for (i -= 1; i >= 0; i--)
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+	} else {
+		dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
+			 mhop_num, err_loop_index);
+		return;
+	}
+
+	dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
+	mr->pbl_bt_l0 = NULL;
+	mr->pbl_l0_dma_addr = 0;
+}
+
+/* PBL multi hop addressing */
+static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
+			       struct hns_roce_mr *mr)
+{
+	struct device *dev = hr_dev->dev;
+	int mr_alloc_done = 0;
+	int npages_allocated;
+	int i = 0, j = 0;
+	u32 pbl_bt_sz;
+	u32 mhop_num;
+	u64 pbl_last_bt_num;
+	u64 pbl_bt_cnt = 0;
+	u64 bt_idx;
+	u64 size;
+
+	mhop_num = hr_dev->caps.pbl_hop_num;
+	pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+	pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
+
+	if (mhop_num == HNS_ROCE_HOP_NUM_0)
+		return 0;
+
+	/* hop_num = 1 */
+	if (mhop_num == 1) {
+		if (npages > pbl_bt_sz / 8) {
+			dev_err(dev, "npages %d is larger than buf_pg_sz!",
+				npages);
+			return -EINVAL;
+		}
+		mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+						 &(mr->pbl_dma_addr),
+						 GFP_KERNEL);
+		if (!mr->pbl_buf)
+			return -ENOMEM;
+
+		mr->pbl_size = npages;
+		mr->pbl_ba = mr->pbl_dma_addr;
+		mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+		mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+		mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+		return 0;
+	}
+
+	mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
+				      sizeof(*mr->pbl_l1_dma_addr),
+				      GFP_KERNEL);
+	if (!mr->pbl_l1_dma_addr)
+		return -ENOMEM;
+
+	mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
+				GFP_KERNEL);
+	if (!mr->pbl_bt_l1)
+		goto err_kcalloc_bt_l1;
+
+	if (mhop_num == 3) {
+		mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
+					      sizeof(*mr->pbl_l2_dma_addr),
+					      GFP_KERNEL);
+		if (!mr->pbl_l2_dma_addr)
+			goto err_kcalloc_l2_dma;
+
+		mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
+					sizeof(*mr->pbl_bt_l2),
+					GFP_KERNEL);
+		if (!mr->pbl_bt_l2)
+			goto err_kcalloc_bt_l2;
+	}
+
+	/* alloc L0 BT */
+	mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
+					   &(mr->pbl_l0_dma_addr),
+					   GFP_KERNEL);
+	if (!mr->pbl_bt_l0)
+		goto err_dma_alloc_l0;
+
+	if (mhop_num == 2) {
+		/* alloc L1 BT */
+		for (i = 0; i < pbl_bt_sz / 8; i++) {
+			if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+				size = pbl_bt_sz;
+			} else {
+				npages_allocated = i * (pbl_bt_sz / 8);
+				size = (npages - npages_allocated) * 8;
+			}
+			mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
+						    &(mr->pbl_l1_dma_addr[i]),
+						    GFP_KERNEL);
+			if (!mr->pbl_bt_l1[i]) {
+				hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+				goto err_dma_alloc_l0;
+			}
+
+			*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+			pbl_bt_cnt++;
+			if (pbl_bt_cnt >= pbl_last_bt_num)
+				break;
+		}
+	} else if (mhop_num == 3) {
+		/* alloc L1, L2 BT */
+		for (i = 0; i < pbl_bt_sz / 8; i++) {
+			mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
+						    &(mr->pbl_l1_dma_addr[i]),
+						    GFP_KERNEL);
+			if (!mr->pbl_bt_l1[i]) {
+				hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+				goto err_dma_alloc_l0;
+			}
+
+			*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+			for (j = 0; j < pbl_bt_sz / 8; j++) {
+				bt_idx = i * pbl_bt_sz / 8 + j;
+
+				if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+					size = pbl_bt_sz;
+				} else {
+					npages_allocated = bt_idx *
+							   (pbl_bt_sz / 8);
+					size = (npages - npages_allocated) * 8;
+				}
+				mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
+					      dev, size,
+					      &(mr->pbl_l2_dma_addr[bt_idx]),
+					      GFP_KERNEL);
+				if (!mr->pbl_bt_l2[bt_idx]) {
+					hns_roce_loop_free(hr_dev, mr, 2, i, j);
+					goto err_dma_alloc_l0;
+				}
+
+				*(mr->pbl_bt_l1[i] + j) =
+						mr->pbl_l2_dma_addr[bt_idx];
+
+				pbl_bt_cnt++;
+				if (pbl_bt_cnt >= pbl_last_bt_num) {
+					mr_alloc_done = 1;
+					break;
+				}
+			}
+
+			if (mr_alloc_done)
+				break;
+		}
+	}
+
+	mr->l0_chunk_last_num = i + 1;
+	if (mhop_num == 3)
+		mr->l1_chunk_last_num = j + 1;
+
+	mr->pbl_size = npages;
+	mr->pbl_ba = mr->pbl_l0_dma_addr;
+	mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+	mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+	mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+
+	return 0;
+
+err_dma_alloc_l0:
+	kfree(mr->pbl_bt_l2);
+	mr->pbl_bt_l2 = NULL;
+
+err_kcalloc_bt_l2:
+	kfree(mr->pbl_l2_dma_addr);
+	mr->pbl_l2_dma_addr = NULL;
+
+err_kcalloc_l2_dma:
+	kfree(mr->pbl_bt_l1);
+	mr->pbl_bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+	kfree(mr->pbl_l1_dma_addr);
+	mr->pbl_l1_dma_addr = NULL;
+
+	return -ENOMEM;
 }
 
 static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
 			     u64 size, u32 access, int npages,
 			     struct hns_roce_mr *mr)
 {
+	struct device *dev = hr_dev->dev;
 	unsigned long index = 0;
 	int ret = 0;
-	struct device *dev = &hr_dev->pdev->dev;
 
 	/* Allocate a key for mr from mr_table */
 	ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
@@ -258,22 +514,117 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
 		mr->type = MR_TYPE_DMA;
 		mr->pbl_buf = NULL;
 		mr->pbl_dma_addr = 0;
+		/* PBL multi-hop addressing parameters */
+		mr->pbl_bt_l2 = NULL;
+		mr->pbl_bt_l1 = NULL;
+		mr->pbl_bt_l0 = NULL;
+		mr->pbl_l2_dma_addr = NULL;
+		mr->pbl_l1_dma_addr = NULL;
+		mr->pbl_l0_dma_addr = 0;
 	} else {
 		mr->type = MR_TYPE_MR;
-		mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
-						 &(mr->pbl_dma_addr),
-						 GFP_KERNEL);
-		if (!mr->pbl_buf)
-			return -ENOMEM;
+		if (!hr_dev->caps.pbl_hop_num) {
+			mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+							 &(mr->pbl_dma_addr),
+							 GFP_KERNEL);
+			if (!mr->pbl_buf)
+				return -ENOMEM;
+		} else {
+			ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+		}
 	}
 
-	return 0;
+	return ret;
+}
+
+static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
+			       struct hns_roce_mr *mr)
+{
+	struct device *dev = hr_dev->dev;
+	int npages_allocated;
+	int npages;
+	int i, j;
+	u32 pbl_bt_sz;
+	u32 mhop_num;
+	u64 bt_idx;
+
+	npages = ib_umem_page_count(mr->umem);
+	pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+	mhop_num = hr_dev->caps.pbl_hop_num;
+
+	if (mhop_num == HNS_ROCE_HOP_NUM_0)
+		return;
+
+	/* hop_num = 1 */
+	if (mhop_num == 1) {
+		dma_free_coherent(dev, (unsigned int)(npages * 8),
+				  mr->pbl_buf, mr->pbl_dma_addr);
+		return;
+	}
+
+	dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
+			  mr->pbl_l0_dma_addr);
+
+	if (mhop_num == 2) {
+		for (i = 0; i < mr->l0_chunk_last_num; i++) {
+			if (i == mr->l0_chunk_last_num - 1) {
+				npages_allocated = i * (pbl_bt_sz / 8);
+
+				dma_free_coherent(dev,
+					      (npages - npages_allocated) * 8,
+					      mr->pbl_bt_l1[i],
+					      mr->pbl_l1_dma_addr[i]);
+
+				break;
+			}
+
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+		}
+	} else if (mhop_num == 3) {
+		for (i = 0; i < mr->l0_chunk_last_num; i++) {
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+
+			for (j = 0; j < pbl_bt_sz / 8; j++) {
+				bt_idx = i * (pbl_bt_sz / 8) + j;
+
+				if ((i == mr->l0_chunk_last_num - 1)
+				    && j == mr->l1_chunk_last_num - 1) {
+					npages_allocated = bt_idx *
+							   (pbl_bt_sz / 8);
+
+					dma_free_coherent(dev,
+					      (npages - npages_allocated) * 8,
+					      mr->pbl_bt_l2[bt_idx],
+					      mr->pbl_l2_dma_addr[bt_idx]);
+
+					break;
+				}
+
+				dma_free_coherent(dev, pbl_bt_sz,
+						mr->pbl_bt_l2[bt_idx],
+						mr->pbl_l2_dma_addr[bt_idx]);
+			}
+		}
+	}
+
+	kfree(mr->pbl_bt_l1);
+	kfree(mr->pbl_l1_dma_addr);
+	mr->pbl_bt_l1 = NULL;
+	mr->pbl_l1_dma_addr = NULL;
+	if (mhop_num == 3) {
+		kfree(mr->pbl_bt_l2);
+		kfree(mr->pbl_l2_dma_addr);
+		mr->pbl_bt_l2 = NULL;
+		mr->pbl_l2_dma_addr = NULL;
+	}
 }
 
 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
 			     struct hns_roce_mr *mr)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int npages = 0;
 	int ret;
 
@@ -286,10 +637,18 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
 
 	if (mr->size != ~0ULL) {
 		npages = ib_umem_page_count(mr->umem);
-		dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
-				  mr->pbl_dma_addr);
+
+		if (!hr_dev->caps.pbl_hop_num)
+			dma_free_coherent(dev, (unsigned int)(npages * 8),
+					  mr->pbl_buf, mr->pbl_dma_addr);
+		else
+			hns_roce_mhop_free(hr_dev, mr);
 	}
 
+	if (mr->enabled)
+		hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
+				   key_to_hw_index(mr->key));
+
 	hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
 			     key_to_hw_index(mr->key), BITMAP_NO_RR);
 }
@@ -299,7 +658,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
 {
 	int ret;
 	unsigned long mtpt_idx = key_to_hw_index(mr->key);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_cmd_mailbox *mailbox;
 	struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
 
@@ -345,10 +704,11 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
 				    struct hns_roce_mtt *mtt, u32 start_index,
 				    u32 npages, u64 *page_list)
 {
-	u32 i = 0;
-	__le64 *mtts = NULL;
+	struct hns_roce_hem_table *table;
 	dma_addr_t dma_handle;
+	__le64 *mtts;
 	u32 s = start_index * sizeof(u64);
+	u32 i;
 
 	/* All MTTs must fit in the same page */
 	if (start_index / (PAGE_SIZE / sizeof(u64)) !=
@@ -358,15 +718,24 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
 	if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
 		return -EINVAL;
 
-	mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+	if (mtt->mtt_type == MTT_TYPE_WQE)
+		table = &hr_dev->mr_table.mtt_table;
+	else
+		table = &hr_dev->mr_table.mtt_cqe_table;
+
+	mtts = hns_roce_table_find(hr_dev, table,
 				mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
 				&dma_handle);
 	if (!mtts)
 		return -ENOMEM;
 
 	/* Save page addr, low 12 bits : 0 */
-	for (i = 0; i < npages; ++i)
-		mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
+	for (i = 0; i < npages; ++i) {
+		if (!hr_dev->caps.mtt_hop_num)
+			mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
+		else
+			mtts[i] = cpu_to_le64(page_list[i]);
+	}
 
 	return 0;
 }
@@ -400,9 +769,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
 			   struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
 {
-	u32 i = 0;
-	int ret = 0;
-	u64 *page_list = NULL;
+	u64 *page_list;
+	int ret;
+	u32 i;
 
 	page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
 	if (!page_list)
@@ -425,7 +794,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
 {
 	struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
-	int ret = 0;
+	int ret;
 
 	ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
 				   hr_dev->caps.num_mtpts,
@@ -439,8 +808,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
 	if (ret)
 		goto err_buddy;
 
+	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
+		ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
+					  ilog2(hr_dev->caps.num_cqe_segs));
+		if (ret)
+			goto err_buddy_cqe;
+	}
 	return 0;
 
+err_buddy_cqe:
+	hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+
 err_buddy:
 	hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
 	return ret;
@@ -451,13 +829,15 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
 	struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
 
 	hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+		hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
 	hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
 }
 
 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
 {
-	int ret = 0;
-	struct hns_roce_mr *mr = NULL;
+	struct hns_roce_mr *mr;
+	int ret;
 
 	mr = kmalloc(sizeof(*mr), GFP_KERNEL);
 	if (mr == NULL)
@@ -526,16 +906,36 @@ out:
 	return ret;
 }
 
-static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
+static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
+				     struct hns_roce_mr *mr,
 				     struct ib_umem *umem)
 {
-	int i = 0;
-	int entry;
 	struct scatterlist *sg;
+	int i = 0, j = 0;
+	int entry;
+
+	if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
+		return 0;
 
 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-		mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
-		i++;
+		if (!hr_dev->caps.pbl_hop_num) {
+			mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
+			i++;
+		} else if (hr_dev->caps.pbl_hop_num == 1) {
+			mr->pbl_buf[i] = sg_dma_address(sg);
+			i++;
+		} else {
+			if (hr_dev->caps.pbl_hop_num == 2)
+				mr->pbl_bt_l1[i][j] = sg_dma_address(sg);
+			else if (hr_dev->caps.pbl_hop_num == 3)
+				mr->pbl_bt_l2[i][j] = sg_dma_address(sg);
+
+			j++;
+			if (j >= (PAGE_SIZE / 8)) {
+				i++;
+				j = 0;
+			}
+		}
 	}
 
 	/* Memory barrier */
@@ -549,10 +949,12 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 				   struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
-	struct device *dev = &hr_dev->pdev->dev;
-	struct hns_roce_mr *mr = NULL;
-	int ret = 0;
-	int n = 0;
+	struct device *dev = hr_dev->dev;
+	struct hns_roce_mr *mr;
+	int bt_size;
+	int ret;
+	int n;
+	int i;
 
 	mr = kmalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr)
@@ -573,11 +975,27 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 		goto err_umem;
 	}
 
-	if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
-		dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
-			length);
-		ret = -EINVAL;
-		goto err_umem;
+	if (!hr_dev->caps.pbl_hop_num) {
+		if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
+			dev_err(dev,
+			     " MR len %lld err. MR is limited to 4G at most!\n",
+			     length);
+			ret = -EINVAL;
+			goto err_umem;
+		}
+	} else {
+		int pbl_size = 1;
+
+		bt_size = (1 << PAGE_SHIFT) / 8;
+		for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
+			pbl_size *= bt_size;
+		if (n > pbl_size) {
+			dev_err(dev,
+			    " MR len %lld err. MR page num is limited to %d!\n",
+			    length, pbl_size);
+			ret = -EINVAL;
+			goto err_umem;
+		}
 	}
 
 	ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
@@ -585,7 +1003,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	if (ret)
 		goto err_umem;
 
-	ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
+	ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
 	if (ret)
 		goto err_mr;
 

+ 14 - 6
drivers/infiniband/hw/hns/hns_roce_pd.c

@@ -31,6 +31,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/pci.h>
 #include "hns_roce_device.h"
 
 static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
@@ -60,7 +61,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
 				struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_pd *pd;
 	int ret;
 
@@ -86,6 +87,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
 
 	return &pd->ibpd;
 }
+EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
 
 int hns_roce_dealloc_pd(struct ib_pd *pd)
 {
@@ -94,6 +96,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);
 
 int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
 {
@@ -109,12 +112,17 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
 		uar->index = (uar->index - 1) %
 			     (hr_dev->caps.phy_num_uars - 1) + 1;
 
-	res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
-		return -EINVAL;
+	if (!dev_is_pci(hr_dev->dev)) {
+		res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+		if (!res) {
+			dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
+			return -EINVAL;
+		}
+		uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
+	} else {
+		uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
+			   >> PAGE_SHIFT);
 	}
-	uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
 
 	return 0;
 }

+ 137 - 42
drivers/infiniband/hw/hns/hns_roce_qp.c

@@ -44,7 +44,7 @@
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
 {
 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_qp *qp;
 
 	spin_lock(&qp_table->lock);
@@ -136,6 +136,7 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
 		return HNS_ROCE_QP_NUM_STATE;
 	}
 }
+EXPORT_SYMBOL_GPL(to_hns_roce_state);
 
 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
 				 struct hns_roce_qp *hr_qp)
@@ -153,7 +154,7 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
 				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
 	spin_unlock_irq(&qp_table->lock);
 	if (ret) {
-		dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
+		dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
 		goto err_put_irrl;
 	}
 
@@ -171,7 +172,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
 			     struct hns_roce_qp *hr_qp)
 {
 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int ret;
 
 	if (!qpn)
@@ -227,6 +228,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 			  hr_qp->qpn & (hr_dev->caps.num_qps - 1));
 	spin_unlock_irqrestore(&qp_table->lock, flags);
 }
+EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
 
 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 {
@@ -241,6 +243,7 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 		hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
 	}
 }
+EXPORT_SYMBOL_GPL(hns_roce_qp_free);
 
 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
 			       int cnt)
@@ -252,13 +255,14 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
 
 	hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
 }
+EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
 
 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
 				struct ib_qp_cap *cap, int is_user, int has_srq,
 				struct hns_roce_qp *hr_qp)
 {
+	struct device *dev = hr_dev->dev;
 	u32 max_cnt;
-	struct device *dev = &hr_dev->pdev->dev;
 
 	/* Check the validity of QP support capacity */
 	if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
@@ -282,20 +286,27 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
 			return -EINVAL;
 		}
 
-		/* In v1 engine, parameter verification procession */
-		max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
-			  cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
+		if (hr_dev->caps.min_wqes)
+			max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
+		else
+			max_cnt = cap->max_recv_wr;
+
 		hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
 
 		if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
-			dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
+			dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
 			return -EINVAL;
 		}
 
 		max_cnt = max(1U, cap->max_recv_sge);
 		hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
-		/* WQE is fixed for 64B */
-		hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+		if (hr_dev->caps.max_rq_sg <= 2)
+			hr_qp->rq.wqe_shift =
+					ilog2(hr_dev->caps.max_rq_desc_sz);
+		else
+			hr_qp->rq.wqe_shift =
+					ilog2(hr_dev->caps.max_rq_desc_sz
+					      * hr_qp->rq.max_gs);
 	}
 
 	cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
@@ -305,32 +316,77 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
 }
 
 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+				     struct ib_qp_cap *cap,
 				     struct hns_roce_qp *hr_qp,
 				     struct hns_roce_ib_create_qp *ucmd)
 {
 	u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
 	u8 max_sq_stride = ilog2(roundup_sq_stride);
+	u32 max_cnt;
 
 	/* Sanity check SQ size before proceeding */
 	if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
 	     ucmd->log_sq_stride > max_sq_stride ||
 	     ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
-		dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
+		dev_err(hr_dev->dev, "check SQ size error!\n");
+		return -EINVAL;
+	}
+
+	if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
+		dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
+			cap->max_send_sge);
 		return -EINVAL;
 	}
 
 	hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
 	hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
 
+	max_cnt = max(1U, cap->max_send_sge);
+	if (hr_dev->caps.max_sq_sg <= 2)
+		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+	else
+		hr_qp->sq.max_gs = max_cnt;
+
+	if (hr_qp->sq.max_gs > 2)
+		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+							(hr_qp->sq.max_gs - 2));
+	hr_qp->sge.sge_shift = 4;
+
 	/* Get buf size, SQ and RQ  are aligned to page_szie */
-	hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+	if (hr_dev->caps.max_sq_sg <= 2) {
+		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
 					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
-			   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
 
-	hr_qp->sq.offset = 0;
-	hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+		hr_qp->sq.offset = 0;
+		hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
+	} else {
+		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
+				   HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+					     hr_qp->sge.sge_shift), PAGE_SIZE) +
+				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+					     hr_qp->sq.wqe_shift), PAGE_SIZE);
+
+		hr_qp->sq.offset = 0;
+		if (hr_qp->sge.sge_cnt) {
+			hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
+							(hr_qp->sq.wqe_cnt <<
+							hr_qp->sq.wqe_shift),
+							PAGE_SIZE);
+			hr_qp->rq.offset = hr_qp->sge.offset +
+					HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+						hr_qp->sge.sge_shift),
+						PAGE_SIZE);
+		} else {
+			hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
+							(hr_qp->sq.wqe_cnt <<
+							hr_qp->sq.wqe_shift),
+							PAGE_SIZE);
+		}
+	}
 
 	return 0;
 }
@@ -339,13 +395,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 				       struct ib_qp_cap *cap,
 				       struct hns_roce_qp *hr_qp)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	u32 max_cnt;
+	int size;
 
 	if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
 	    cap->max_send_sge > hr_dev->caps.max_sq_sg ||
 	    cap->max_inline_data > hr_dev->caps.max_sq_inline) {
-		dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
+		dev_err(dev, "SQ WR or sge or inline data error!\n");
 		return -EINVAL;
 	}
 
@@ -353,27 +410,45 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 	hr_qp->sq_max_wqes_per_wr = 1;
 	hr_qp->sq_spare_wqes = 0;
 
-	/* In v1 engine, parameter verification procession */
-	max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
-		  cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
+	if (hr_dev->caps.min_wqes)
+		max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
+	else
+		max_cnt = cap->max_send_wr;
+
 	hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
 	if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
-		dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
+		dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
 		return -EINVAL;
 	}
 
 	/* Get data_seg numbers */
 	max_cnt = max(1U, cap->max_send_sge);
-	hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+	if (hr_dev->caps.max_sq_sg <= 2)
+		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+	else
+		hr_qp->sq.max_gs = max_cnt;
 
-	/* Get buf size, SQ and RQ  are aligned to page_szie */
-	hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
-					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
-			   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
-					     hr_qp->sq.wqe_shift), PAGE_SIZE);
+	if (hr_qp->sq.max_gs > 2) {
+		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+				     (hr_qp->sq.max_gs - 2));
+		hr_qp->sge.sge_shift = 4;
+	}
+
+	/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
 	hr_qp->sq.offset = 0;
-	hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
-					      hr_qp->sq.wqe_shift), PAGE_SIZE);
+	size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
+				 PAGE_SIZE);
+
+	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+		hr_qp->sge.offset = size;
+		size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
+					  hr_qp->sge.sge_shift, PAGE_SIZE);
+	}
+
+	hr_qp->rq.offset = size;
+	size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
+				  PAGE_SIZE);
+	hr_qp->buff_size = size;
 
 	/* Get wr and sge number which send */
 	cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
@@ -391,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 				     struct ib_udata *udata, unsigned long sqpn,
 				     struct hns_roce_qp *hr_qp)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_ib_create_qp ucmd;
 	unsigned long qpn = 0;
 	int ret = 0;
@@ -421,7 +496,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 			goto err_out;
 		}
 
-		ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
+		ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
+						&ucmd);
 		if (ret) {
 			dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
 			goto err_out;
@@ -436,6 +512,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 			goto err_out;
 		}
 
+		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
 		ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
 					hr_qp->umem->page_shift, &hr_qp->mtt);
 		if (ret) {
@@ -472,10 +549,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 		}
 
 		/* QP doorbell register address */
-		hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
+		hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
-		hr_qp->rq.db_reg_l = hr_dev->reg_base +
-				     ROCEE_DB_OTHERS_L_0_REG +
+		hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
 
 		/* Allocate QP buf */
@@ -486,6 +562,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 			goto err_out;
 		}
 
+		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
 		/* Write MTT */
 		ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
 					hr_qp->hr_buf.page_shift, &hr_qp->mtt);
@@ -522,7 +599,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 		}
 	}
 
-	if ((init_attr->qp_type) == IB_QPT_GSI) {
+	if (init_attr->qp_type == IB_QPT_GSI &&
+	    hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+		/* In v1 engine, GSI QP context in RoCE engine's register */
 		ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
 		if (ret) {
 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
@@ -571,7 +650,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
 				 struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_sqp *hr_sqp;
 	struct hns_roce_qp *hr_qp;
 	int ret;
@@ -629,6 +708,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
 
 	return &hr_qp->ibqp;
 }
+EXPORT_SYMBOL_GPL(hns_roce_create_qp);
 
 int to_hr_qp_type(int qp_type)
 {
@@ -647,6 +727,7 @@ int to_hr_qp_type(int qp_type)
 
 	return transport_type;
 }
+EXPORT_SYMBOL_GPL(to_hr_qp_type);
 
 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 		       int attr_mask, struct ib_udata *udata)
@@ -654,7 +735,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 	enum ib_qp_state cur_state, new_state;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int ret = -EINVAL;
 	int p;
 	enum ib_mtu active_mtu;
@@ -692,7 +773,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
 		active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
 
-		if (attr->path_mtu > IB_MTU_2048 ||
+		if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
+		    attr->path_mtu > IB_MTU_4096) ||
+		    (hr_dev->caps.max_mtu == IB_MTU_2048 &&
+		    attr->path_mtu > IB_MTU_2048) ||
 		    attr->path_mtu < IB_MTU_256 ||
 		    attr->path_mtu > active_mtu) {
 			dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
@@ -716,9 +800,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 	}
 
 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
-		ret = -EPERM;
-		dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
-			new_state);
+		ret = 0;
 		goto out;
 	}
 
@@ -745,6 +827,7 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
 	}
 }
+EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
 
 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
 			 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
@@ -761,6 +844,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
 		spin_unlock_irq(&recv_cq->lock);
 	}
 }
+EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
 
 __be32 send_ieth(struct ib_send_wr *wr)
 {
@@ -774,6 +858,7 @@ __be32 send_ieth(struct ib_send_wr *wr)
 		return 0;
 	}
 }
+EXPORT_SYMBOL_GPL(send_ieth);
 
 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
 {
@@ -785,11 +870,20 @@ void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
 {
 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
 }
+EXPORT_SYMBOL_GPL(get_recv_wqe);
 
 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
 {
 	return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
 }
+EXPORT_SYMBOL_GPL(get_send_wqe);
+
+void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
+{
+	return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
+					(n << hr_qp->sge.sge_shift));
+}
+EXPORT_SYMBOL_GPL(get_send_extend_sge);
 
 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
 			  struct ib_cq *ib_cq)
@@ -808,6 +902,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
 
 	return cur + nreq >= hr_wq->max_post;
 }
+EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
 
 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
 {
@@ -823,7 +918,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
 				   hr_dev->caps.num_qps - 1, SQP_NUM,
 				   reserved_from_top);
 	if (ret) {
-		dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
+		dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
 			ret);
 		return ret;
 	}

+ 1 - 0
drivers/infiniband/hw/i40iw/Kconfig

@@ -1,6 +1,7 @@
 config INFINIBAND_I40IW
 	tristate "Intel(R) Ethernet X722 iWARP Driver"
 	depends on INET && I40E
+	depends on PCI
 	select GENERIC_ALLOCATOR
 	---help---
 	Intel(R) Ethernet X722 iWARP Driver

+ 13 - 7
drivers/infiniband/hw/i40iw/i40iw_cm.c

@@ -1199,7 +1199,6 @@ static void i40iw_cm_timer_tick(unsigned long pass)
 	struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
 	u32 settimer = 0;
 	unsigned long timetosend;
-	struct i40iw_sc_dev *dev;
 	unsigned long flags;
 
 	struct list_head timer_list;
@@ -1267,13 +1266,15 @@ static void i40iw_cm_timer_tick(unsigned long pass)
 			spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
 			goto done;
 		}
-		cm_node->cm_core->stats_pkt_retrans++;
 		spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
 
 		vsi = &cm_node->iwdev->vsi;
-		dev = cm_node->dev;
-		atomic_inc(&send_entry->sqbuf->refcount);
-		i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+
+		if (!cm_node->ack_rcvd) {
+			atomic_inc(&send_entry->sqbuf->refcount);
+			i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+			cm_node->cm_core->stats_pkt_retrans++;
+		}
 		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
 		if (send_entry->send_retrans) {
 			send_entry->retranscount--;
@@ -1524,8 +1525,8 @@ static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool acti
 				break;
 			}
 		}
-			if (!ret)
-				clear_bit(port, cm_core->active_side_ports);
+		if (!ret)
+			clear_bit(port, cm_core->active_side_ports);
 		spin_unlock_irqrestore(&cm_core->ht_lock, flags);
 	} else {
 		spin_lock_irqsave(&cm_core->listen_list_lock, flags);
@@ -2181,6 +2182,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
 	cm_node->cm_id = cm_info->cm_id;
 	ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
 	spin_lock_init(&cm_node->retrans_list_lock);
+	cm_node->ack_rcvd = false;
 
 	atomic_set(&cm_node->ref_count, 1);
 	/* associate our parent CM core */
@@ -2406,6 +2408,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
 	case I40IW_CM_STATE_FIN_WAIT1:
 	case I40IW_CM_STATE_LAST_ACK:
 		cm_node->cm_id->rem_ref(cm_node->cm_id);
+		/* fall through */
 	case I40IW_CM_STATE_TIME_WAIT:
 		cm_node->state = I40IW_CM_STATE_CLOSED;
 		i40iw_rem_ref_cm_node(cm_node);
@@ -2719,7 +2722,10 @@ static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
 		cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
 		if (datasize) {
 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+			cm_node->ack_rcvd = false;
 			i40iw_handle_rcv_mpa(cm_node, rbuf);
+		} else {
+			cm_node->ack_rcvd = true;
 		}
 		break;
 	case I40IW_CM_STATE_LISTENING:

+ 1 - 0
drivers/infiniband/hw/i40iw/i40iw_cm.h

@@ -360,6 +360,7 @@ struct i40iw_cm_node {
 
 	u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
 	struct i40iw_kmem_info mpa_hdr;
+	bool ack_rcvd;
 };
 
 /* structure for client or CM to fill when making CM api calls. */

+ 1 - 0
drivers/infiniband/hw/i40iw/i40iw_hw.c

@@ -410,6 +410,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
 		case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
 		case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
 			ctx_info->err_rq_idx_valid = false;
+			/* fall through */
 		default:
 			if (!info->sq && ctx_info->err_rq_idx_valid) {
 				ctx_info->err_rq_idx = info->wqe_idx;

+ 1 - 0
drivers/infiniband/hw/i40iw/i40iw_puda.c

@@ -813,6 +813,7 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
 	switch (rsrc->completion) {
 	case PUDA_HASH_CRC_COMPLETE:
 		i40iw_free_hash_desc(rsrc->hash_desc);
+		/* fall through */
 	case PUDA_QP_CREATED:
 		if (!reset)
 			i40iw_puda_free_qp(rsrc);

+ 13 - 0
drivers/infiniband/hw/i40iw/i40iw_uk.c

@@ -821,6 +821,18 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
 		I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
 		pring = &qp->rq_ring;
 	} else {
+		if (qp->first_sq_wq) {
+			qp->first_sq_wq = false;
+			if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) {
+				I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+				I40IW_RING_MOVE_TAIL(cq->cq_ring);
+				set_64bit_val(cq->shadow_area, 0,
+					      I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+				memset(info, 0, sizeof(struct i40iw_cq_poll_info));
+				return i40iw_cq_poll_completion(cq, info);
+			}
+		}
+
 		if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
 			info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
@@ -988,6 +1000,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
 	I40IW_RING_MOVE_TAIL(qp->sq_ring);
 	I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
 	qp->swqe_polarity = 1;
+	qp->first_sq_wq = true;
 	qp->swqe_polarity_deferred = 1;
 	qp->rwqe_polarity = 0;
 

+ 1 - 0
drivers/infiniband/hw/i40iw/i40iw_user.h

@@ -376,6 +376,7 @@ struct i40iw_qp_uk {
 	u8 rwqe_polarity;
 	u8 rq_wqe_size;
 	u8 rq_wqe_size_multiplier;
+	bool first_sq_wq;
 	bool deferred_flag;
 };
 

+ 9 - 4
drivers/infiniband/hw/i40iw/i40iw_utils.c

@@ -168,11 +168,16 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
 	if (netdev != event_netdev)
 		return NOTIFY_DONE;
 
-	if (upper_dev)
-		local_ipaddr = ntohl(
-			((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
-	else
+	if (upper_dev) {
+		struct in_device *in;
+
+		rcu_read_lock();
+		in = __in_dev_get_rcu(upper_dev);
+		local_ipaddr = ntohl(in->ifa_list->ifa_address);
+		rcu_read_unlock();
+	} else {
 		local_ipaddr = ntohl(ifa->ifa_address);
+	}
 	switch (event) {
 	case NETDEV_DOWN:
 		action = I40IW_ARP_DELETE;

+ 13 - 0
drivers/infiniband/hw/i40iw/i40iw_verbs.c

@@ -2204,6 +2204,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
 	ukqp = &iwqp->sc_qp.qp_uk;
 
 	spin_lock_irqsave(&iwqp->lock, flags);
+
+	if (iwqp->flush_issued) {
+		err = -EINVAL;
+		goto out;
+	}
+
 	while (ib_wr) {
 		inv_stag = false;
 		memset(&info, 0, sizeof(info));
@@ -2346,6 +2352,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
 		ib_wr = ib_wr->next;
 	}
 
+out:
 	if (err)
 		*bad_wr = ib_wr;
 	else
@@ -2378,6 +2385,12 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
 
 	memset(&post_recv, 0, sizeof(post_recv));
 	spin_lock_irqsave(&iwqp->lock, flags);
+
+	if (iwqp->flush_issued) {
+		err = -EINVAL;
+		goto out;
+	}
+
 	while (ib_wr) {
 		post_recv.num_sges = ib_wr->num_sge;
 		post_recv.wr_id = ib_wr->wr_id;

+ 2 - 0
drivers/infiniband/hw/mlx4/cq.c

@@ -768,11 +768,13 @@ repoll:
 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
 		case MLX4_OPCODE_RDMA_WRITE_IMM:
 			wc->wc_flags |= IB_WC_WITH_IMM;
+			/* fall through */
 		case MLX4_OPCODE_RDMA_WRITE:
 			wc->opcode    = IB_WC_RDMA_WRITE;
 			break;
 		case MLX4_OPCODE_SEND_IMM:
 			wc->wc_flags |= IB_WC_WITH_IMM;
+			/* fall through */
 		case MLX4_OPCODE_SEND:
 		case MLX4_OPCODE_SEND_INVAL:
 			wc->opcode    = IB_WC_SEND;

+ 1 - 0
drivers/infiniband/hw/mlx4/mcg.c

@@ -944,6 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
 	switch (sa_mad->mad_hdr.method) {
 	case IB_MGMT_METHOD_SET:
 		may_create = 1;
+		/* fall through */
 	case IB_SA_METHOD_DELETE:
 		req = kzalloc(sizeof *req, GFP_KERNEL);
 		if (!req)

+ 2 - 0
drivers/infiniband/hw/mlx5/cq.c

@@ -124,11 +124,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
 	switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
 	case MLX5_OPCODE_RDMA_WRITE_IMM:
 		wc->wc_flags |= IB_WC_WITH_IMM;
+		/* fall through */
 	case MLX5_OPCODE_RDMA_WRITE:
 		wc->opcode    = IB_WC_RDMA_WRITE;
 		break;
 	case MLX5_OPCODE_SEND_IMM:
 		wc->wc_flags |= IB_WC_WITH_IMM;
+		/* fall through */
 	case MLX5_OPCODE_SEND:
 	case MLX5_OPCODE_SEND_INVAL:
 		wc->opcode    = IB_WC_SEND;

+ 2 - 2
drivers/infiniband/hw/mlx5/mr.c

@@ -1230,13 +1230,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 		mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
 					 page_shift, order, access_flags);
 		if (PTR_ERR(mr) == -EAGAIN) {
-			mlx5_ib_dbg(dev, "cache empty for order %d", order);
+			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
 			mr = NULL;
 		}
 	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
 		if (access_flags & IB_ACCESS_ON_DEMAND) {
 			err = -EINVAL;
-			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+			pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
 			goto error;
 		}
 		use_umr = false;

+ 1 - 2
drivers/infiniband/hw/mlx5/qp.c

@@ -3858,7 +3858,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 	unsigned long flags;
 	unsigned idx;
 	int err = 0;
-	int inl = 0;
 	int num_sge;
 	void *seg;
 	int nreq;
@@ -4053,6 +4052,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				*bad_wr = wr;
 				goto out;
 			}
+			/* fall through */
 		case MLX5_IB_QPT_HW_GSI:
 			set_datagram_seg(seg, wr);
 			seg += sizeof(struct mlx5_wqe_datagram_seg);
@@ -4116,7 +4116,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				*bad_wr = wr;
 				goto out;
 			}
-			inl = 1;
 			size += sz;
 		} else {
 			dpseg = seg;

+ 5 - 5
drivers/infiniband/hw/mthca/mthca_main.c

@@ -473,11 +473,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
 		goto err_unmap_eqp;
 	}
 
-       mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
-						    dev_lim->cqc_entry_sz,
-						    mdev->limits.num_cqs,
-						    mdev->limits.reserved_cqs,
-						    0, 0);
+	mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
+						     dev_lim->cqc_entry_sz,
+						     mdev->limits.num_cqs,
+						     mdev->limits.reserved_cqs,
+						     0, 0);
 	if (!mdev->cq_table.table) {
 		mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
 		err = -ENOMEM;

+ 19 - 14
drivers/infiniband/hw/nes/nes.c

@@ -178,11 +178,16 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
 					/* fall through */
 				case NETDEV_CHANGEADDR:
 					/* Add the address to the IP table */
-					if (upper_dev)
-						nesvnic->local_ipaddr =
-							((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
-					else
+					if (upper_dev) {
+						struct in_device *in;
+
+						rcu_read_lock();
+						in = __in_dev_get_rcu(upper_dev);
+						nesvnic->local_ipaddr = in->ifa_list->ifa_address;
+						rcu_read_unlock();
+					} else {
 						nesvnic->local_ipaddr = ifa->ifa_address;
+					}
 
 					nes_write_indexed(nesdev,
 							NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
@@ -757,18 +762,18 @@ static void nes_remove(struct pci_dev *pcidev)
 	int netdev_index = 0;
 	unsigned long flags;
 
-		if (nesdev->netdev_count) {
-			netdev = nesdev->netdev[netdev_index];
-			if (netdev) {
-				netif_stop_queue(netdev);
-				unregister_netdev(netdev);
-				nes_netdev_destroy(netdev);
+	if (nesdev->netdev_count) {
+		netdev = nesdev->netdev[netdev_index];
+		if (netdev) {
+			netif_stop_queue(netdev);
+			unregister_netdev(netdev);
+			nes_netdev_destroy(netdev);
 
-				nesdev->netdev[netdev_index] = NULL;
-				nesdev->netdev_count--;
-				nesdev->nesadapter->netdev_count--;
-			}
+			nesdev->netdev[netdev_index] = NULL;
+			nesdev->netdev_count--;
+			nesdev->nesadapter->netdev_count--;
 		}
+	}
 
 	nes_notifiers_registered--;
 	if (nes_notifiers_registered == 0) {

+ 1 - 8
drivers/infiniband/hw/nes/nes_cm.c

@@ -1389,7 +1389,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
 	struct rtable *rt;
 	struct neighbour *neigh;
 	int rc = arpindex;
-	struct net_device *netdev;
 	struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
 	__be32 dst_ipaddr = htonl(dst_ip);
 
@@ -1400,11 +1399,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
 		return rc;
 	}
 
-	if (netif_is_bond_slave(nesvnic->netdev))
-		netdev = netdev_master_upper_dev_get(nesvnic->netdev);
-	else
-		netdev = nesvnic->netdev;
-
 	neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
 
 	rcu_read_lock();
@@ -1768,6 +1762,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
 	case NES_CM_STATE_FIN_WAIT1:
 	case NES_CM_STATE_LAST_ACK:
 		cm_node->cm_id->rem_ref(cm_node->cm_id);
+		/* fall through */
 	case NES_CM_STATE_TIME_WAIT:
 		cm_node->state = NES_CM_STATE_CLOSED;
 		rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -3074,7 +3069,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 	u32 crc_value;
 	int ret;
 	int passive_state;
-	struct nes_ib_device *nesibdev;
 	struct ib_mr *ibmr = NULL;
 	struct nes_pd *nespd;
 	u64 tagged_offset;
@@ -3157,7 +3151,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
 	if (raddr->sin_addr.s_addr != laddr->sin_addr.s_addr) {
 		u64temp = (unsigned long)nesqp;
-		nesibdev = nesvnic->nesibdev;
 		nespd = nesqp->nespd;
 		tagged_offset = (u64)(unsigned long)*start_buff;
 		ibmr = nes_reg_phys_mr(&nespd->ibpd,

部分文件因为文件数量过多而无法显示