Эх сурвалжийг харах

Merge branch 'qed-RoCE-fixes'

Yuval Mintz says:

====================
qed: RoCE related pseudo-fixes

This series contains multiple small corrections to the RoCE logic
in qed plus some debug information and inter-module parameter
meant to prevent issues further along.

 - #1, #6 Share information with protocol driver
   [either new or filling missing bits in existing API].
 - #2, #3 correct error flows in qed.
 - #4 add debug related information.
 - #5 fixes a minor issue in the HW configuration.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 8 жил өмнө
parent
commit
a6e8ab8e72

+ 2 - 0
drivers/net/ethernet/qlogic/qed/qed.h

@@ -72,6 +72,7 @@ extern const struct qed_common_ops qed_common_ops_pass;
 #define QED_WFQ_UNIT	100
 #define QED_WFQ_UNIT	100
 
 
 #define QED_WID_SIZE            (1024)
 #define QED_WID_SIZE            (1024)
+#define QED_MIN_WIDS		(4)
 #define QED_PF_DEMS_SIZE        (4)
 #define QED_PF_DEMS_SIZE        (4)
 
 
 /* cau states */
 /* cau states */
@@ -525,6 +526,7 @@ struct qed_hwfn {
 	struct dbg_tools_data		dbg_info;
 	struct dbg_tools_data		dbg_info;
 
 
 	/* PWM region specific data */
 	/* PWM region specific data */
+	u16				wid_count;
 	u32				dpi_size;
 	u32				dpi_size;
 	u32				dpi_count;
 	u32				dpi_count;
 
 

+ 8 - 8
drivers/net/ethernet/qlogic/qed/qed_dev.c

@@ -1318,17 +1318,15 @@ static int
 qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
 qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
 		     struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
 		     struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
 {
 {
-	u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
-	u32 dpi_bit_shift, dpi_count;
+	u32 dpi_bit_shift, dpi_count, dpi_page_size;
 	u32 min_dpis;
 	u32 min_dpis;
+	u32 n_wids;
 
 
 	/* Calculate DPI size */
 	/* Calculate DPI size */
-	dpi_page_size_1 = QED_WID_SIZE * n_cpus;
-	dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
-	dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
-	dpi_page_size = roundup_pow_of_two(dpi_page_size);
+	n_wids = max_t(u32, QED_MIN_WIDS, n_cpus);
+	dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids);
+	dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
 	dpi_bit_shift = ilog2(dpi_page_size / 4096);
 	dpi_bit_shift = ilog2(dpi_page_size / 4096);
-
 	dpi_count = pwm_region_size / dpi_page_size;
 	dpi_count = pwm_region_size / dpi_page_size;
 
 
 	min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
 	min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
@@ -1356,7 +1354,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	u32 pwm_regsize, norm_regsize;
 	u32 pwm_regsize, norm_regsize;
 	u32 non_pwm_conn, min_addr_reg1;
 	u32 non_pwm_conn, min_addr_reg1;
-	u32 db_bar_size, n_cpus;
+	u32 db_bar_size, n_cpus = 1;
 	u32 roce_edpm_mode;
 	u32 roce_edpm_mode;
 	u32 pf_dems_shift;
 	u32 pf_dems_shift;
 	int rc = 0;
 	int rc = 0;
@@ -1417,6 +1415,8 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 			qed_rdma_dpm_bar(p_hwfn, p_ptt);
 			qed_rdma_dpm_bar(p_hwfn, p_ptt);
 	}
 	}
 
 
+	p_hwfn->wid_count = (u16) n_cpus;
+
 	DP_INFO(p_hwfn,
 	DP_INFO(p_hwfn,
 		"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
 		"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
 		norm_regsize,
 		norm_regsize,

+ 87 - 29
drivers/net/ethernet/qlogic/qed/qed_roce.c

@@ -90,7 +90,7 @@ void qed_roce_async_event(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
-			       struct qed_bmap *bmap, u32 max_count)
+			       struct qed_bmap *bmap, u32 max_count, char *name)
 {
 {
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
 
 
@@ -104,26 +104,24 @@ static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
-		   bmap->bitmap);
+	snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
 	return 0;
 	return 0;
 }
 }
 
 
 static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
 static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
 				  struct qed_bmap *bmap, u32 *id_num)
 				  struct qed_bmap *bmap, u32 *id_num)
 {
 {
-	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
-
 	*id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
 	*id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
-
-	if (*id_num >= bmap->max_count) {
-		DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
-			  bmap->max_count);
+	if (*id_num >= bmap->max_count)
 		return -EINVAL;
 		return -EINVAL;
-	}
 
 
 	__set_bit(*id_num, bmap->bitmap);
 	__set_bit(*id_num, bmap->bitmap);
 
 
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
+		   bmap->name, *id_num);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -141,15 +139,18 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
 {
 {
 	bool b_acquired;
 	bool b_acquired;
 
 
-	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
 	if (id_num >= bmap->max_count)
 	if (id_num >= bmap->max_count)
 		return;
 		return;
 
 
 	b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
 	b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
 	if (!b_acquired) {
 	if (!b_acquired) {
-		DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+		DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
+			  bmap->name, id_num);
 		return;
 		return;
 	}
 	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
+		   bmap->name, id_num);
 }
 }
 
 
 static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
 static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
@@ -224,7 +225,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 	}
 	}
 
 
 	/* Allocate bit map for pd's */
 	/* Allocate bit map for pd's */
-	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
+				 "PD");
 	if (rc) {
 	if (rc) {
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 			   "Failed to allocate pd_map, rc = %d\n",
 			   "Failed to allocate pd_map, rc = %d\n",
@@ -234,7 +236,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 
 
 	/* Allocate DPI bitmap */
 	/* Allocate DPI bitmap */
 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
-				 p_hwfn->dpi_count);
+				 p_hwfn->dpi_count, "DPI");
 	if (rc) {
 	if (rc) {
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 			   "Failed to allocate DPI bitmap, rc = %d\n", rc);
 			   "Failed to allocate DPI bitmap, rc = %d\n", rc);
@@ -245,7 +247,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 	 * twice the number of QPs.
 	 * twice the number of QPs.
 	 */
 	 */
 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
-				 p_rdma_info->num_qps * 2);
+				 p_rdma_info->num_qps * 2, "CQ");
 	if (rc) {
 	if (rc) {
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 			   "Failed to allocate cq bitmap, rc = %d\n", rc);
 			   "Failed to allocate cq bitmap, rc = %d\n", rc);
@@ -257,7 +259,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 	 * The maximum number of CQs is bounded to  twice the number of QPs.
 	 * The maximum number of CQs is bounded to  twice the number of QPs.
 	 */
 	 */
 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
-				 p_rdma_info->num_qps * 2);
+				 p_rdma_info->num_qps * 2, "Toggle");
 	if (rc) {
 	if (rc) {
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 			   "Failed to allocate toogle bits, rc = %d\n", rc);
 			   "Failed to allocate toogle bits, rc = %d\n", rc);
@@ -266,7 +268,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 
 
 	/* Allocate bitmap for itids */
 	/* Allocate bitmap for itids */
 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
 	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
-				 p_rdma_info->num_mrs);
+				 p_rdma_info->num_mrs, "MR");
 	if (rc) {
 	if (rc) {
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 			   "Failed to allocate itids bitmaps, rc = %d\n", rc);
 			   "Failed to allocate itids bitmaps, rc = %d\n", rc);
@@ -274,7 +276,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 	}
 	}
 
 
 	/* Allocate bitmap for cids used for qps. */
 	/* Allocate bitmap for cids used for qps. */
-	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
+				 "CID");
 	if (rc) {
 	if (rc) {
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 			   "Failed to allocate cid bitmap, rc = %d\n", rc);
 			   "Failed to allocate cid bitmap, rc = %d\n", rc);
@@ -282,7 +285,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 	}
 	}
 
 
 	/* Allocate bitmap for cids used for responders/requesters. */
 	/* Allocate bitmap for cids used for responders/requesters. */
-	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
+	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
+				 "REAL_CID");
 	if (rc) {
 	if (rc) {
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 			   "Failed to allocate real cid bitmap, rc = %d\n", rc);
 			   "Failed to allocate real cid bitmap, rc = %d\n", rc);
@@ -313,6 +317,54 @@ free_rdma_info:
 	return rc;
 	return rc;
 }
 }
 
 
+static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
+			       struct qed_bmap *bmap, bool check)
+{
+	int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+	int last_line = bmap->max_count / (64 * 8);
+	int last_item = last_line * 8 +
+	    DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
+	u64 *pmap = (u64 *)bmap->bitmap;
+	int line, item, offset;
+	u8 str_last_line[200] = { 0 };
+
+	if (!weight || !check)
+		goto end;
+
+	DP_NOTICE(p_hwfn,
+		  "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
+		  bmap->name, bmap->max_count, weight);
+
+	/* print aligned non-zero lines, if any */
+	for (item = 0, line = 0; line < last_line; line++, item += 8)
+		if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+			DP_NOTICE(p_hwfn,
+				  "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+				  line,
+				  pmap[item],
+				  pmap[item + 1],
+				  pmap[item + 2],
+				  pmap[item + 3],
+				  pmap[item + 4],
+				  pmap[item + 5],
+				  pmap[item + 6], pmap[item + 7]);
+
+	/* print last unaligned non-zero line, if any */
+	if ((bmap->max_count % (64 * 8)) &&
+	    (bitmap_weight((unsigned long *)&pmap[item],
+			   bmap->max_count - item * 64))) {
+		offset = sprintf(str_last_line, "line 0x%04x: ", line);
+		for (; item < last_item; item++)
+			offset += sprintf(str_last_line + offset,
+					  "0x%016llx ", pmap[item]);
+		DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+	}
+
+end:
+	kfree(bmap->bitmap);
+	bmap->bitmap = NULL;
+}
+
 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 {
 {
 	struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
 	struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
@@ -332,12 +384,12 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 		}
 		}
 	}
 	}
 
 
-	kfree(p_rdma_info->cid_map.bitmap);
-	kfree(p_rdma_info->tid_map.bitmap);
-	kfree(p_rdma_info->toggle_bits.bitmap);
-	kfree(p_rdma_info->cq_map.bitmap);
-	kfree(p_rdma_info->dpi_map.bitmap);
-	kfree(p_rdma_info->pd_map.bitmap);
+	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
+	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
+	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
+	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
 
 
 	kfree(p_rdma_info->port);
 	kfree(p_rdma_info->port);
 	kfree(p_rdma_info->dev);
 	kfree(p_rdma_info->dev);
@@ -732,6 +784,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
 				    ((out_params->dpi) * p_hwfn->dpi_size);
 				    ((out_params->dpi) * p_hwfn->dpi_size);
 
 
 	out_params->dpi_size = p_hwfn->dpi_size;
 	out_params->dpi_size = p_hwfn->dpi_size;
+	out_params->wid_count = p_hwfn->wid_count;
 
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
 	return rc;
 	return rc;
@@ -750,6 +803,8 @@ static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
 
 
 	p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
 	p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
 
 
+	p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
+
 	return p_port;
 	return p_port;
 }
 }
 
 
@@ -802,9 +857,12 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
 static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
 static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
 				  struct qed_dev_rdma_info *info)
 				  struct qed_dev_rdma_info *info)
 {
 {
+	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+
 	memset(info, 0, sizeof(*info));
 	memset(info, 0, sizeof(*info));
 
 
 	info->rdma_type = QED_RDMA_TYPE_ROCE;
 	info->rdma_type = QED_RDMA_TYPE_ROCE;
+	info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
 
 
 	qed_fill_dev_info(cdev, &info->common);
 	qed_fill_dev_info(cdev, &info->common);
 
 
@@ -952,8 +1010,7 @@ static int qed_rdma_create_cq(void *rdma_cxt,
 
 
 	/* Allocate icid */
 	/* Allocate icid */
 	spin_lock_bh(&p_info->lock);
 	spin_lock_bh(&p_info->lock);
-	rc = qed_rdma_bmap_alloc_id(p_hwfn,
-				    &p_info->cq_map, &returned_id);
+	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
 	spin_unlock_bh(&p_info->lock);
 	spin_unlock_bh(&p_info->lock);
 
 
 	if (rc) {
 	if (rc) {
@@ -2189,8 +2246,7 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
 						  params->modify_flags);
 						  params->modify_flags);
 
 
 		return rc;
 		return rc;
-	} else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
-		   qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+	} else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
 		/* ->ERR */
 		/* ->ERR */
 		rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
 		rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
 						  params->modify_flags);
 						  params->modify_flags);
@@ -2456,6 +2512,8 @@ qed_rdma_register_tid(void *rdma_cxt,
 	}
 	}
 
 
 	rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
 	rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+	if (rc)
+		return rc;
 
 
 	if (fw_return_code != RDMA_RETURN_OK) {
 	if (fw_return_code != RDMA_RETURN_OK) {
 		DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
 		DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);

+ 2 - 0
drivers/net/ethernet/qlogic/qed/qed_roce.h

@@ -67,9 +67,11 @@ enum qed_rdma_toggle_bit {
 	QED_RDMA_TOGGLE_BIT_SET = 1
 	QED_RDMA_TOGGLE_BIT_SET = 1
 };
 };
 
 
+#define QED_RDMA_MAX_BMAP_NAME	(10)
 struct qed_bmap {
 struct qed_bmap {
 	unsigned long *bitmap;
 	unsigned long *bitmap;
 	u32 max_count;
 	u32 max_count;
+	char name[QED_RDMA_MAX_BMAP_NAME];
 };
 };
 
 
 struct qed_rdma_info {
 struct qed_rdma_info {

+ 2 - 0
include/linux/qed/qed_roce_if.h

@@ -240,6 +240,7 @@ struct qed_rdma_add_user_out_params {
 	u64 dpi_addr;
 	u64 dpi_addr;
 	u64 dpi_phys_addr;
 	u64 dpi_phys_addr;
 	u32 dpi_size;
 	u32 dpi_size;
+	u16 wid_count;
 };
 };
 
 
 enum roce_mode {
 enum roce_mode {
@@ -533,6 +534,7 @@ enum qed_rdma_type {
 struct qed_dev_rdma_info {
 struct qed_dev_rdma_info {
 	struct qed_dev_info common;
 	struct qed_dev_info common;
 	enum qed_rdma_type rdma_type;
 	enum qed_rdma_type rdma_type;
+	u8 user_dpm_enabled;
 };
 };
 
 
 struct qed_rdma_ops {
 struct qed_rdma_ops {