Эх сурвалжийг харах

Merge branch 'qed-janitorial'

Yuval Mintz says:

====================
qed*: Janitorial series [semantic & prints]

Some day 1 slips in coding style exist in the qed* code
[incorrect alignments, conditions using (== 0), etc.].
This series comes to address those, and do some additional
cosmetic changes along the way [such as reducing the number of lines
for function declerations].

The series is broken to 3 parts - purely semantic changes, cosmetic
changes that required minor changes in the code, and print-related
changes. All-in-all, no real change in driver behavior is expected.

[This is a repost; Original was sent when net-next closed].

Please consider applying this to `net-next'.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 9 жил өмнө
parent
commit
a2956ecdc6

+ 18 - 29
drivers/net/ethernet/qlogic/qed/qed_cxt.c

@@ -377,9 +377,8 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
 	}
 	}
 }
 }
 
 
-u32 qed_cxt_get_proto_cid_count(struct qed_hwfn		*p_hwfn,
-				enum protocol_type	type,
-				u32			*vf_cid)
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+				enum protocol_type type, u32 *vf_cid)
 {
 {
 	if (vf_cid)
 	if (vf_cid)
 		*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
 		*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
@@ -405,10 +404,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
 	return cnt;
 	return cnt;
 }
 }
 
 
-static void
-qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
-			    enum protocol_type proto,
-			    u8 seg, u8 seg_type, u32 count, bool has_fl)
+static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+					enum protocol_type proto,
+					u8 seg,
+					u8 seg_type, u32 count, bool has_fl)
 {
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
 	struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
@@ -420,8 +419,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
 
 
 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
 				 struct qed_ilt_cli_blk *p_blk,
 				 struct qed_ilt_cli_blk *p_blk,
-				 u32 start_line, u32 total_size,
-				 u32 elem_size)
+				 u32 start_line, u32 total_size, u32 elem_size)
 {
 {
 	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 
 
@@ -448,8 +446,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
 		p_cli->first.val = *p_line;
 		p_cli->first.val = *p_line;
 
 
 	p_cli->active = true;
 	p_cli->active = true;
-	*p_line += DIV_ROUND_UP(p_blk->total_size,
-				p_blk->real_size_in_page);
+	*p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
 	p_cli->last.val = *p_line - 1;
 	p_cli->last.val = *p_line - 1;
 
 
 	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -926,12 +923,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
 		void *p_virt;
 		void *p_virt;
 		u32 size;
 		u32 size;
 
 
-		size = min_t(u32, sz_left,
-			     p_blk->real_size_in_page);
+		size = min_t(u32, sz_left, p_blk->real_size_in_page);
 		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-					    size,
-					    &p_phys,
-					    GFP_KERNEL);
+					    size, &p_phys, GFP_KERNEL);
 		if (!p_virt)
 		if (!p_virt)
 			return -ENOMEM;
 			return -ENOMEM;
 		memset(p_virt, 0, size);
 		memset(p_virt, 0, size);
@@ -976,7 +970,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
 		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
 		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
 			p_blk = &clients[i].pf_blks[j];
 			p_blk = &clients[i].pf_blks[j];
 			rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
 			rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
-			if (rc != 0)
+			if (rc)
 				goto ilt_shadow_fail;
 				goto ilt_shadow_fail;
 		}
 		}
 		for (k = 0; k < p_mngr->vf_count; k++) {
 		for (k = 0; k < p_mngr->vf_count; k++) {
@@ -985,7 +979,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
 
 
 				p_blk = &clients[i].vf_blks[j];
 				p_blk = &clients[i].vf_blks[j];
 				rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
 				rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
-				if (rc != 0)
+				if (rc)
 					goto ilt_shadow_fail;
 					goto ilt_shadow_fail;
 			}
 			}
 		}
 		}
@@ -1672,7 +1666,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
 		     p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
 		     p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
 
 
 		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
 		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
-		active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+		active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
 
 
 		tm_offset += tm_iids.pf_tids[i];
 		tm_offset += tm_iids.pf_tids[i];
 	}
 	}
@@ -1702,8 +1696,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
 }
 }
 
 
 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
-			enum protocol_type type,
-			u32 *p_cid)
+			enum protocol_type type, u32 *p_cid)
 {
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	u32 rel_cid;
 	u32 rel_cid;
@@ -1717,8 +1710,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
 				      p_mngr->acquired[type].max_count);
 				      p_mngr->acquired[type].max_count);
 
 
 	if (rel_cid >= p_mngr->acquired[type].max_count) {
 	if (rel_cid >= p_mngr->acquired[type].max_count) {
-		DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
-			  type);
+		DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
@@ -1730,8 +1722,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
-				      u32 cid,
-				      enum protocol_type *p_type)
+				      u32 cid, enum protocol_type *p_type)
 {
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_cid_acquired_map *p_map;
 	struct qed_cid_acquired_map *p_map;
@@ -1763,8 +1754,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
 	return true;
 	return true;
 }
 }
 
 
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
-			 u32 cid)
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
 {
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	enum protocol_type type;
 	enum protocol_type type;
@@ -1781,8 +1771,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
 	__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
 	__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
 }
 }
 
 
-int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
-			 struct qed_cxt_info *p_info)
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
 {
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
 	u32 conn_cxt_size, hw_p_size, cxts_per_p, line;

+ 49 - 66
drivers/net/ethernet/qlogic/qed/qed_dev.c

@@ -43,8 +43,7 @@ enum BAR_ID {
 	BAR_ID_1        /* Used for doorbells */
 	BAR_ID_1        /* Used for doorbells */
 };
 };
 
 
-static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
-			   enum BAR_ID		bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
 {
 {
 	u32 bar_reg = (bar_id == BAR_ID_0 ?
 	u32 bar_reg = (bar_id == BAR_ID_0 ?
 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -69,8 +68,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
 	}
 	}
 }
 }
 
 
-void qed_init_dp(struct qed_dev *cdev,
-		 u32 dp_module, u8 dp_level)
+void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
 {
 {
 	u32 i;
 	u32 i;
 
 
@@ -542,8 +540,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
 	if (!cdev->reset_stats) {
 	if (!cdev->reset_stats) {
 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
-		rc = -ENOMEM;
-		goto alloc_err;
+		goto alloc_no_mem;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -604,9 +601,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
 
 
 	/* Make sure notification is not set before initiating final cleanup */
 	/* Make sure notification is not set before initiating final cleanup */
 	if (REG_RD(p_hwfn, addr)) {
 	if (REG_RD(p_hwfn, addr)) {
-		DP_NOTICE(
-			p_hwfn,
-			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
+		DP_NOTICE(p_hwfn,
+			  "Unexpected; Found final cleanup notification before initiating final cleanup\n");
 		REG_WR(p_hwfn, addr, 0);
 		REG_WR(p_hwfn, addr, 0);
 	}
 	}
 
 
@@ -700,17 +696,14 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
 				continue;
 				continue;
 
 
 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
-					      p_block->function_id,
-					      0, 0);
-			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
-					 sb_entry);
+					      p_block->function_id, 0, 0);
+			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
 		}
 		}
 	}
 	}
 }
 }
 
 
 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
-			      struct qed_ptt *p_ptt,
-			      int hw_mode)
+			      struct qed_ptt *p_ptt, int hw_mode)
 {
 {
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 	struct qed_qm_common_rt_init_params params;
 	struct qed_qm_common_rt_init_params params;
@@ -758,7 +751,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
 	qed_port_unpretend(p_hwfn, p_ptt);
 	qed_port_unpretend(p_hwfn, p_ptt);
 
 
 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
-	if (rc != 0)
+	if (rc)
 		return rc;
 		return rc;
 
 
 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
@@ -787,13 +780,12 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
-			    struct qed_ptt *p_ptt,
-			    int hw_mode)
+			    struct qed_ptt *p_ptt, int hw_mode)
 {
 {
 	int rc = 0;
 	int rc = 0;
 
 
 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
-	if (rc != 0)
+	if (rc)
 		return rc;
 		return rc;
 
 
 	if (hw_mode & (1 << MODE_MF_SI)) {
 	if (hw_mode & (1 << MODE_MF_SI)) {
@@ -847,7 +839,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
 	qed_int_igu_init_rt(p_hwfn);
 	qed_int_igu_init_rt(p_hwfn);
 
 
 	/* Set VLAN in NIG if needed */
 	/* Set VLAN in NIG if needed */
-	if (hw_mode & (1 << MODE_MF_SD)) {
+	if (hw_mode & BIT(MODE_MF_SD)) {
 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
@@ -855,7 +847,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
 	}
 	}
 
 
 	/* Enable classification by MAC if needed */
 	/* Enable classification by MAC if needed */
-	if (hw_mode & (1 << MODE_MF_SI)) {
+	if (hw_mode & BIT(MODE_MF_SI)) {
 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
 			   "Configuring TAGMAC_CLS_TYPE\n");
 			   "Configuring TAGMAC_CLS_TYPE\n");
 		STORE_RT_REG(p_hwfn,
 		STORE_RT_REG(p_hwfn,
@@ -870,7 +862,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
 
 
 	/* Cleanup chip from previous driver if such remains exist */
 	/* Cleanup chip from previous driver if such remains exist */
 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
-	if (rc != 0)
+	if (rc)
 		return rc;
 		return rc;
 
 
 	/* PF Init sequence */
 	/* PF Init sequence */
@@ -949,8 +941,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
 	/* Read shadow of current MFW mailbox */
 	/* Read shadow of current MFW mailbox */
 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
-	       p_hwfn->mcp_info->mfw_mb_cur,
-	       p_hwfn->mcp_info->mfw_mb_length);
+	       p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
 }
 }
 
 
 int qed_hw_init(struct qed_dev *cdev,
 int qed_hw_init(struct qed_dev *cdev,
@@ -970,7 +961,7 @@ int qed_hw_init(struct qed_dev *cdev,
 
 
 	if (IS_PF(cdev)) {
 	if (IS_PF(cdev)) {
 		rc = qed_init_fw_data(cdev, bin_fw_data);
 		rc = qed_init_fw_data(cdev, bin_fw_data);
-		if (rc != 0)
+		if (rc)
 			return rc;
 			return rc;
 	}
 	}
 
 
@@ -987,8 +978,7 @@ int qed_hw_init(struct qed_dev *cdev,
 
 
 		qed_calc_hw_mode(p_hwfn);
 		qed_calc_hw_mode(p_hwfn);
 
 
-		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
-				      &load_code);
+		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
 		if (rc) {
 		if (rc) {
 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
 			return rc;
 			return rc;
@@ -1065,9 +1055,8 @@ int qed_hw_init(struct qed_dev *cdev,
 }
 }
 
 
 #define QED_HW_STOP_RETRY_LIMIT (10)
 #define QED_HW_STOP_RETRY_LIMIT (10)
-static inline void qed_hw_timers_stop(struct qed_dev *cdev,
-				      struct qed_hwfn *p_hwfn,
-				      struct qed_ptt *p_ptt)
+static void qed_hw_timers_stop(struct qed_dev *cdev,
+			       struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	int i;
 	int i;
 
 
@@ -1078,8 +1067,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev,
 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
 		if ((!qed_rd(p_hwfn, p_ptt,
 		if ((!qed_rd(p_hwfn, p_ptt,
 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
-		    (!qed_rd(p_hwfn, p_ptt,
-			     TM_REG_PF_SCAN_ACTIVE_TASK)))
+		    (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
 			break;
 			break;
 
 
 		/* Dependent on number of connection/tasks, possibly
 		/* Dependent on number of connection/tasks, possibly
@@ -1184,8 +1172,7 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
 		}
 		}
 
 
 		DP_VERBOSE(p_hwfn,
 		DP_VERBOSE(p_hwfn,
-			   NETIF_MSG_IFDOWN,
-			   "Shutting down the fastpath\n");
+			   NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
 
 
 		qed_wr(p_hwfn, p_ptt,
 		qed_wr(p_hwfn, p_ptt,
 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1213,14 +1200,13 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
 }
 }
 
 
-static int qed_reg_assert(struct qed_hwfn *hwfn,
-			  struct qed_ptt *ptt, u32 reg,
-			  bool expected)
+static int qed_reg_assert(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt, u32 reg, bool expected)
 {
 {
-	u32 assert_val = qed_rd(hwfn, ptt, reg);
+	u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
 
 
 	if (assert_val != expected) {
 	if (assert_val != expected) {
-		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+		DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
 			  reg, expected);
 			  reg, expected);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
@@ -1300,8 +1286,7 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
 
 
 	/* Clean Previous errors if such exist */
 	/* Clean Previous errors if such exist */
 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
-	       1 << p_hwfn->abs_pf_id);
+	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
 
 
 	/* enable internal target-read */
 	/* enable internal target-read */
 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1311,7 +1296,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
 static void get_function_id(struct qed_hwfn *p_hwfn)
 static void get_function_id(struct qed_hwfn *p_hwfn)
 {
 {
 	/* ME Register */
 	/* ME Register */
-	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+	p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
+						  PXP_PF_ME_OPAQUE_ADDR);
 
 
 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
 
 
@@ -1320,6 +1306,10 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
 				      PXP_CONCRETE_FID_PFID);
 				      PXP_CONCRETE_FID_PFID);
 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
 				    PXP_CONCRETE_FID_PORT);
 				    PXP_CONCRETE_FID_PORT);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+		   "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+		   p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
 }
 }
 
 
 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
@@ -1411,8 +1401,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
 	return 0;
 	return 0;
 }
 }
 
 
-static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
-			       struct qed_ptt *p_ptt)
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
@@ -1466,8 +1455,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
 		break;
 		break;
 	default:
 	default:
-		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
-			  core_cfg);
+		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
 		break;
 		break;
 	}
 	}
 
 
@@ -1478,11 +1466,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
 	link_temp = qed_rd(p_hwfn, p_ptt,
 	link_temp = qed_rd(p_hwfn, p_ptt,
 			   port_cfg_addr +
 			   port_cfg_addr +
 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
-	link->speed.advertised_speeds =
-		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+	link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+	link->speed.advertised_speeds = link_temp;
 
 
-	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
-						link->speed.advertised_speeds;
+	link_temp = link->speed.advertised_speeds;
+	p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
 
 
 	link_temp = qed_rd(p_hwfn, p_ptt,
 	link_temp = qed_rd(p_hwfn, p_ptt,
 			   port_cfg_addr +
 			   port_cfg_addr +
@@ -1511,8 +1499,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
 		link->speed.forced_speed = 100000;
 		link->speed.forced_speed = 100000;
 		break;
 		break;
 	default:
 	default:
-		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
-			  link_temp);
+		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
 	}
 	}
 
 
 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
@@ -1622,10 +1609,10 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 
 
 	DP_VERBOSE(p_hwfn,
 	DP_VERBOSE(p_hwfn,
 		   NETIF_MSG_PROBE,
 		   NETIF_MSG_PROBE,
-		   "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+		   "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
 		   p_hwfn->rel_pf_id,
 		   p_hwfn->rel_pf_id,
 		   p_hwfn->abs_pf_id,
 		   p_hwfn->abs_pf_id,
-		   p_hwfn->num_funcs_on_engine);
+		   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
 }
 }
 
 
 static int
 static int
@@ -1697,10 +1684,9 @@ static int qed_get_dev_info(struct qed_dev *cdev)
 	u32 tmp;
 	u32 tmp;
 
 
 	/* Read Vendor Id / Device Id */
 	/* Read Vendor Id / Device Id */
-	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
-			     &cdev->vendor_id);
-	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
-			     &cdev->device_id);
+	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
+	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
+
 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
 				     MISCS_REG_CHIP_NUM);
 				     MISCS_REG_CHIP_NUM);
 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
@@ -1776,7 +1762,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
 	/* First hwfn learns basic information, e.g., number of hwfns */
 	/* First hwfn learns basic information, e.g., number of hwfns */
 	if (!p_hwfn->my_id) {
 	if (!p_hwfn->my_id) {
 		rc = qed_get_dev_info(p_hwfn->cdev);
 		rc = qed_get_dev_info(p_hwfn->cdev);
-		if (rc != 0)
+		if (rc)
 			goto err1;
 			goto err1;
 	}
 	}
 
 
@@ -2177,8 +2163,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
 	return 0;
 	return 0;
 }
 }
 
 
-int qed_fw_vport(struct qed_hwfn *p_hwfn,
-		 u8 src_id, u8 *dst_id)
+int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
 {
 {
 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
 		u8 min, max;
 		u8 min, max;
@@ -2197,8 +2182,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn,
 	return 0;
 	return 0;
 }
 }
 
 
-int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
-		   u8 src_id, u8 *dst_id)
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
 {
 {
 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
 		u8 min, max;
 		u8 min, max;
@@ -2380,8 +2364,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
  */
  */
 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
-			      u16 vport_id, u32 req_rate,
-			      u32 min_pf_rate)
+			      u16 vport_id, u32 req_rate, u32 min_pf_rate)
 {
 {
 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
 	int non_requested_count = 0, req_count = 0, i, num_vports;
 	int non_requested_count = 0, req_count = 0, i, num_vports;
@@ -2465,7 +2448,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
 
 
 	rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
 	rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
 
 
-	if (rc == 0)
+	if (!rc)
 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
 						 p_link->min_pf_rate);
 						 p_link->min_pf_rate);
 	else
 	else

+ 53 - 84
drivers/net/ethernet/qlogic/qed/qed_hw.c

@@ -44,8 +44,7 @@ struct qed_ptt_pool {
 
 
 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
 {
 {
-	struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
-					      GFP_KERNEL);
+	struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
 	int i;
 	int i;
 
 
 	if (!p_pool)
 	if (!p_pool)
@@ -113,16 +112,14 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
 	return NULL;
 	return NULL;
 }
 }
 
 
-void qed_ptt_release(struct qed_hwfn *p_hwfn,
-		     struct qed_ptt *p_ptt)
+void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
 	spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
 	list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
 	list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
 	spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
 	spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
 }
 }
 
 
-u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
-			struct qed_ptt *p_ptt)
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	/* The HW is using DWORDS and we need to translate it to Bytes */
 	/* The HW is using DWORDS and we need to translate it to Bytes */
 	return le32_to_cpu(p_ptt->pxp.offset) << 2;
 	return le32_to_cpu(p_ptt->pxp.offset) << 2;
@@ -141,8 +138,7 @@ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
 }
 }
 
 
 void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
 void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
-		     struct qed_ptt *p_ptt,
-		     u32 new_hw_addr)
+		     struct qed_ptt *p_ptt, u32 new_hw_addr)
 {
 {
 	u32 prev_hw_addr;
 	u32 prev_hw_addr;
 
 
@@ -166,8 +162,7 @@ void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
-		       struct qed_ptt *p_ptt,
-		       u32 hw_addr)
+		       struct qed_ptt *p_ptt, u32 hw_addr)
 {
 {
 	u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
 	u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
 	u32 offset;
 	u32 offset;
@@ -224,10 +219,7 @@ u32 qed_rd(struct qed_hwfn *p_hwfn,
 
 
 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt,
 			  struct qed_ptt *p_ptt,
-			  void *addr,
-			  u32 hw_addr,
-			  size_t n,
-			  bool to_device)
+			  void *addr, u32 hw_addr, size_t n, bool to_device)
 {
 {
 	u32 dw_count, *host_addr, hw_offset;
 	u32 dw_count, *host_addr, hw_offset;
 	size_t quota, done = 0;
 	size_t quota, done = 0;
@@ -259,8 +251,7 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 void qed_memcpy_from(struct qed_hwfn *p_hwfn,
 void qed_memcpy_from(struct qed_hwfn *p_hwfn,
-		     struct qed_ptt *p_ptt,
-		     void *dest, u32 hw_addr, size_t n)
+		     struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
 {
 {
 	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
 	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
 		   "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
 		   "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
@@ -270,8 +261,7 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 void qed_memcpy_to(struct qed_hwfn *p_hwfn,
 void qed_memcpy_to(struct qed_hwfn *p_hwfn,
-		   struct qed_ptt *p_ptt,
-		   u32 hw_addr, void *src, size_t n)
+		   struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
 {
 {
 	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
 	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
 		   "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
 		   "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
@@ -280,9 +270,7 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
 	qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
 	qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
 }
 }
 
 
-void qed_fid_pretend(struct qed_hwfn *p_hwfn,
-		     struct qed_ptt *p_ptt,
-		     u16 fid)
+void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
 {
 {
 	u16 control = 0;
 	u16 control = 0;
 
 
@@ -309,8 +297,7 @@ void qed_fid_pretend(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 void qed_port_pretend(struct qed_hwfn *p_hwfn,
 void qed_port_pretend(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt,
-		      u8 port_id)
+		      struct qed_ptt *p_ptt, u8 port_id)
 {
 {
 	u16 control = 0;
 	u16 control = 0;
 
 
@@ -326,8 +313,7 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
 	       *(u32 *)&p_ptt->pxp.pretend);
 	       *(u32 *)&p_ptt->pxp.pretend);
 }
 }
 
 
-void qed_port_unpretend(struct qed_hwfn *p_hwfn,
-			struct qed_ptt *p_ptt)
+void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	u16 control = 0;
 	u16 control = 0;
 
 
@@ -429,28 +415,27 @@ u32 qed_dmae_idx_to_go_cmd(u8 idx)
 	return DMAE_REG_GO_C0 + (idx << 2);
 	return DMAE_REG_GO_C0 + (idx << 2);
 }
 }
 
 
-static int
-qed_dmae_post_command(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt)
+static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt)
 {
 {
-	struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+	struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
 	u8 idx_cmd = p_hwfn->dmae_info.channel, i;
 	u8 idx_cmd = p_hwfn->dmae_info.channel, i;
 	int qed_status = 0;
 	int qed_status = 0;
 
 
 	/* verify address is not NULL */
 	/* verify address is not NULL */
-	if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
-	     ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+	if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+	     ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
 		DP_NOTICE(p_hwfn,
 		DP_NOTICE(p_hwfn,
 			  "source or destination address 0 idx_cmd=%d\n"
 			  "source or destination address 0 idx_cmd=%d\n"
 			  "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
 			  "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
-			   idx_cmd,
-			   le32_to_cpu(command->opcode),
-			   le16_to_cpu(command->opcode_b),
-			   le16_to_cpu(command->length_dw),
-			   le32_to_cpu(command->src_addr_hi),
-			   le32_to_cpu(command->src_addr_lo),
-			   le32_to_cpu(command->dst_addr_hi),
-			   le32_to_cpu(command->dst_addr_lo));
+			  idx_cmd,
+			  le32_to_cpu(p_command->opcode),
+			  le16_to_cpu(p_command->opcode_b),
+			  le16_to_cpu(p_command->length_dw),
+			  le32_to_cpu(p_command->src_addr_hi),
+			  le32_to_cpu(p_command->src_addr_lo),
+			  le32_to_cpu(p_command->dst_addr_hi),
+			  le32_to_cpu(p_command->dst_addr_lo));
 
 
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
@@ -459,13 +444,13 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
 		   NETIF_MSG_HW,
 		   NETIF_MSG_HW,
 		   "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
 		   "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
 		   idx_cmd,
 		   idx_cmd,
-		   le32_to_cpu(command->opcode),
-		   le16_to_cpu(command->opcode_b),
-		   le16_to_cpu(command->length_dw),
-		   le32_to_cpu(command->src_addr_hi),
-		   le32_to_cpu(command->src_addr_lo),
-		   le32_to_cpu(command->dst_addr_hi),
-		   le32_to_cpu(command->dst_addr_lo));
+		   le32_to_cpu(p_command->opcode),
+		   le16_to_cpu(p_command->opcode_b),
+		   le16_to_cpu(p_command->length_dw),
+		   le32_to_cpu(p_command->src_addr_hi),
+		   le32_to_cpu(p_command->src_addr_lo),
+		   le32_to_cpu(p_command->dst_addr_hi),
+		   le32_to_cpu(p_command->dst_addr_lo));
 
 
 	/* Copy the command to DMAE - need to do it before every call
 	/* Copy the command to DMAE - need to do it before every call
 	 * for source/dest address no reset.
 	 * for source/dest address no reset.
@@ -475,7 +460,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
 	 */
 	 */
 	for (i = 0; i < DMAE_CMD_SIZE; i++) {
 	for (i = 0; i < DMAE_CMD_SIZE; i++) {
 		u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
 		u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
-			   *(((u32 *)command) + i) : 0;
+			   *(((u32 *)p_command) + i) : 0;
 
 
 		qed_wr(p_hwfn, p_ptt,
 		qed_wr(p_hwfn, p_ptt,
 		       DMAE_REG_CMD_MEM +
 		       DMAE_REG_CMD_MEM +
@@ -483,9 +468,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
 		       (i * sizeof(u32)), data);
 		       (i * sizeof(u32)), data);
 	}
 	}
 
 
-	qed_wr(p_hwfn, p_ptt,
-	       qed_dmae_idx_to_go_cmd(idx_cmd),
-	       DMAE_GO_VALUE);
+	qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
 
 
 	return qed_status;
 	return qed_status;
 }
 }
@@ -498,9 +481,7 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
 	u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
 	u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
 
 
 	*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 	*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-				     sizeof(u32),
-				     p_addr,
-				     GFP_KERNEL);
+				     sizeof(u32), p_addr, GFP_KERNEL);
 	if (!*p_comp) {
 	if (!*p_comp) {
 		DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
 		DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
 		goto err;
 		goto err;
@@ -543,8 +524,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
 		p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
 		p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 				  sizeof(u32),
 				  sizeof(u32),
-				  p_hwfn->dmae_info.p_completion_word,
-				  p_phys);
+				  p_hwfn->dmae_info.p_completion_word, p_phys);
 		p_hwfn->dmae_info.p_completion_word = NULL;
 		p_hwfn->dmae_info.p_completion_word = NULL;
 	}
 	}
 
 
@@ -552,8 +532,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
 		p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
 		p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 				  sizeof(struct dmae_cmd),
 				  sizeof(struct dmae_cmd),
-				  p_hwfn->dmae_info.p_dmae_cmd,
-				  p_phys);
+				  p_hwfn->dmae_info.p_dmae_cmd, p_phys);
 		p_hwfn->dmae_info.p_dmae_cmd = NULL;
 		p_hwfn->dmae_info.p_dmae_cmd = NULL;
 	}
 	}
 
 
@@ -571,9 +550,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
 
 
 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
 {
 {
-	u32 wait_cnt = 0;
-	u32 wait_cnt_limit = 10000;
-
+	u32 wait_cnt_limit = 10000, wait_cnt = 0;
 	int qed_status = 0;
 	int qed_status = 0;
 
 
 	barrier();
 	barrier();
@@ -606,7 +583,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
 					  u64 dst_addr,
 					  u64 dst_addr,
 					  u8 src_type,
 					  u8 src_type,
 					  u8 dst_type,
 					  u8 dst_type,
-					  u32 length)
+					  u32 length_dw)
 {
 {
 	dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
 	dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
@@ -624,7 +601,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
 		cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
 		cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
 		memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
 		memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
 		       (void *)(uintptr_t)src_addr,
 		       (void *)(uintptr_t)src_addr,
-		       length * sizeof(u32));
+		       length_dw * sizeof(u32));
 		break;
 		break;
 	default:
 	default:
 		return -EINVAL;
 		return -EINVAL;
@@ -645,7 +622,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	cmd->length_dw = cpu_to_le16((u16)length);
+	cmd->length_dw = cpu_to_le16((u16)length_dw);
 
 
 	qed_dmae_post_command(p_hwfn, p_ptt);
 	qed_dmae_post_command(p_hwfn, p_ptt);
 
 
@@ -654,16 +631,14 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
 	if (qed_status) {
 	if (qed_status) {
 		DP_NOTICE(p_hwfn,
 		DP_NOTICE(p_hwfn,
 			  "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
 			  "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
-			  src_addr,
-			  dst_addr,
-			  length);
+			  src_addr, dst_addr, length_dw);
 		return qed_status;
 		return qed_status;
 	}
 	}
 
 
 	if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
 	if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
 		memcpy((void *)(uintptr_t)(dst_addr),
 		memcpy((void *)(uintptr_t)(dst_addr),
 		       &p_hwfn->dmae_info.p_intermediate_buffer[0],
 		       &p_hwfn->dmae_info.p_intermediate_buffer[0],
-		       length * sizeof(u32));
+		       length_dw * sizeof(u32));
 
 
 	return 0;
 	return 0;
 }
 }
@@ -730,10 +705,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
 		if (qed_status) {
 		if (qed_status) {
 			DP_NOTICE(p_hwfn,
 			DP_NOTICE(p_hwfn,
 				  "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
 				  "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
-				  qed_status,
-				  src_addr,
-				  dst_addr,
-				  length_cur);
+				  qed_status, src_addr, dst_addr, length_cur);
 			break;
 			break;
 		}
 		}
 	}
 	}
@@ -743,10 +715,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
 
 
 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
 		      struct qed_ptt *p_ptt,
 		      struct qed_ptt *p_ptt,
-		      u64 source_addr,
-		      u32 grc_addr,
-		      u32 size_in_dwords,
-		      u32 flags)
+		  u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
 {
 {
 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
 	struct qed_dmae_params params;
 	struct qed_dmae_params params;
@@ -768,9 +737,10 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
 	return rc;
 	return rc;
 }
 }
 
 
-int
-qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
-		  dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      u32 grc_addr,
+		      dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
 {
 {
 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
 	struct qed_dmae_params params;
 	struct qed_dmae_params params;
@@ -791,12 +761,11 @@ qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
 	return rc;
 	return rc;
 }
 }
 
 
-int
-qed_dmae_host2host(struct qed_hwfn *p_hwfn,
-		   struct qed_ptt *p_ptt,
-		   dma_addr_t source_addr,
-		   dma_addr_t dest_addr,
-		   u32 size_in_dwords, struct qed_dmae_params *p_params)
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+		       struct qed_ptt *p_ptt,
+		       dma_addr_t source_addr,
+		       dma_addr_t dest_addr,
+		       u32 size_in_dwords, struct qed_dmae_params *p_params)
 {
 {
 	int rc;
 	int rc;
 
 

+ 35 - 58
drivers/net/ethernet/qlogic/qed/qed_init_ops.c

@@ -59,17 +59,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
 		p_hwfn->rt_data.b_valid[i] = false;
 		p_hwfn->rt_data.b_valid[i] = false;
 }
 }
 
 
-void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
-			   u32 rt_offset,
-			   u32 val)
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
 {
 {
 	p_hwfn->rt_data.init_val[rt_offset] = val;
 	p_hwfn->rt_data.init_val[rt_offset] = val;
 	p_hwfn->rt_data.b_valid[rt_offset] = true;
 	p_hwfn->rt_data.b_valid[rt_offset] = true;
 }
 }
 
 
 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
-			   u32 rt_offset, u32 *p_val,
-			   size_t size)
+			   u32 rt_offset, u32 *p_val, size_t size)
 {
 {
 	size_t i;
 	size_t i;
 
 
@@ -81,10 +78,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
 
 
 static int qed_init_rt(struct qed_hwfn	*p_hwfn,
 static int qed_init_rt(struct qed_hwfn	*p_hwfn,
 		       struct qed_ptt *p_ptt,
 		       struct qed_ptt *p_ptt,
-		       u32 addr,
-		       u16 rt_offset,
-		       u16 size,
-		       bool b_must_dmae)
+		       u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
 {
 {
 	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
 	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
 	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
 	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
@@ -102,8 +96,7 @@ static int qed_init_rt(struct qed_hwfn	*p_hwfn,
 		 * simply write the data instead of using dmae.
 		 * simply write the data instead of using dmae.
 		 */
 		 */
 		if (!b_must_dmae) {
 		if (!b_must_dmae) {
-			qed_wr(p_hwfn, p_ptt, addr + (i << 2),
-			       p_init_val[i]);
+			qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -115,7 +108,7 @@ static int qed_init_rt(struct qed_hwfn	*p_hwfn,
 		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
 		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
 				       (uintptr_t)(p_init_val + i),
 				       (uintptr_t)(p_init_val + i),
 				       addr + (i << 2), segment, 0);
 				       addr + (i << 2), segment, 0);
-		if (rc != 0)
+		if (rc)
 			return rc;
 			return rc;
 
 
 		/* Jump over the entire segment, including invalid entry */
 		/* Jump over the entire segment, including invalid entry */
@@ -182,9 +175,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
 
 
 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
 			      struct qed_ptt *p_ptt,
 			      struct qed_ptt *p_ptt,
-			      u32 addr,
-			      u32 fill,
-			      u32 fill_count)
+			      u32 addr, u32 fill, u32 fill_count)
 {
 {
 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
 
 
@@ -199,15 +190,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
 
 
 	return qed_dmae_host2grc(p_hwfn, p_ptt,
 	return qed_dmae_host2grc(p_hwfn, p_ptt,
 				 (uintptr_t)(&zero_buffer[0]),
 				 (uintptr_t)(&zero_buffer[0]),
-				 addr, fill_count,
-				 QED_DMAE_FLAG_RW_REPL_SRC);
+				 addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
 }
 }
 
 
 static void qed_init_fill(struct qed_hwfn *p_hwfn,
 static void qed_init_fill(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt,
 			  struct qed_ptt *p_ptt,
-			  u32 addr,
-			  u32 fill,
-			  u32 fill_count)
+			  u32 addr, u32 fill, u32 fill_count)
 {
 {
 	u32 i;
 	u32 i;
 
 
@@ -218,12 +206,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn,
 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
 			      struct qed_ptt *p_ptt,
 			      struct qed_ptt *p_ptt,
 			      struct init_write_op *cmd,
 			      struct init_write_op *cmd,
-			      bool b_must_dmae,
-			      bool b_can_dmae)
+			      bool b_must_dmae, bool b_can_dmae)
 {
 {
+	u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
 	u32 data = le32_to_cpu(cmd->data);
 	u32 data = le32_to_cpu(cmd->data);
 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
-	u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+
 	u32 offset, output_len, input_len, max_size;
 	u32 offset, output_len, input_len, max_size;
 	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_dev *cdev = p_hwfn->cdev;
 	union init_array_hdr *hdr;
 	union init_array_hdr *hdr;
@@ -233,8 +221,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
 
 
 	array_data = cdev->fw_data->arr_data;
 	array_data = cdev->fw_data->arr_data;
 
 
-	hdr = (union init_array_hdr *)(array_data +
-				       dmae_array_offset);
+	hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
 	data = le32_to_cpu(hdr->raw.data);
 	data = le32_to_cpu(hdr->raw.data);
 	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
 	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
 	case INIT_ARR_ZIPPED:
 	case INIT_ARR_ZIPPED:
@@ -290,13 +277,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
 /* init_ops write command */
 /* init_ops write command */
 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
 			   struct qed_ptt *p_ptt,
 			   struct qed_ptt *p_ptt,
-			   struct init_write_op *cmd,
-			   bool b_can_dmae)
+			   struct init_write_op *p_cmd, bool b_can_dmae)
 {
 {
-	u32 data = le32_to_cpu(cmd->data);
-	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+	u32 data = le32_to_cpu(p_cmd->data);
 	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
 	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
-	union init_write_args *arg = &cmd->args;
+	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+	union init_write_args *arg = &p_cmd->args;
 	int rc = 0;
 	int rc = 0;
 
 
 	/* Sanitize */
 	/* Sanitize */
@@ -309,20 +295,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
 
 
 	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
 	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
 	case INIT_SRC_INLINE:
 	case INIT_SRC_INLINE:
-		qed_wr(p_hwfn, p_ptt, addr,
-		       le32_to_cpu(arg->inline_val));
+		data = le32_to_cpu(p_cmd->args.inline_val);
+		qed_wr(p_hwfn, p_ptt, addr, data);
 		break;
 		break;
 	case INIT_SRC_ZEROS:
 	case INIT_SRC_ZEROS:
-		if (b_must_dmae ||
-		    (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
-			rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
-						le32_to_cpu(arg->zeros_count));
+		data = le32_to_cpu(p_cmd->args.zeros_count);
+		if (b_must_dmae || (b_can_dmae && (data >= 64)))
+			rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
 		else
 		else
-			qed_init_fill(p_hwfn, p_ptt, addr, 0,
-				      le32_to_cpu(arg->zeros_count));
+			qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
 		break;
 		break;
 	case INIT_SRC_ARRAY:
 	case INIT_SRC_ARRAY:
-		rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+		rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
 					b_must_dmae, b_can_dmae);
 					b_must_dmae, b_can_dmae);
 		break;
 		break;
 	case INIT_SRC_RUNTIME:
 	case INIT_SRC_RUNTIME:
@@ -353,8 +337,7 @@ static inline bool comp_or(u32 val, u32 expected_val)
 
 
 /* init_ops read/poll commands */
 /* init_ops read/poll commands */
 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
-			    struct qed_ptt *p_ptt,
-			    struct init_read_op *cmd)
+			    struct qed_ptt *p_ptt, struct init_read_op *cmd)
 {
 {
 	bool (*comp_check)(u32 val, u32 expected_val);
 	bool (*comp_check)(u32 val, u32 expected_val);
 	u32 delay = QED_INIT_POLL_PERIOD_US, val;
 	u32 delay = QED_INIT_POLL_PERIOD_US, val;
@@ -412,35 +395,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
-				  u16 *offset,
-				  int modes)
+				  u16 *p_offset, int modes)
 {
 {
 	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_dev *cdev = p_hwfn->cdev;
 	const u8 *modes_tree_buf;
 	const u8 *modes_tree_buf;
 	u8 arg1, arg2, tree_val;
 	u8 arg1, arg2, tree_val;
 
 
 	modes_tree_buf = cdev->fw_data->modes_tree_buf;
 	modes_tree_buf = cdev->fw_data->modes_tree_buf;
-	tree_val = modes_tree_buf[(*offset)++];
+	tree_val = modes_tree_buf[(*p_offset)++];
 	switch (tree_val) {
 	switch (tree_val) {
 	case INIT_MODE_OP_NOT:
 	case INIT_MODE_OP_NOT:
-		return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+		return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
 	case INIT_MODE_OP_OR:
 	case INIT_MODE_OP_OR:
-		arg1	= qed_init_cmd_mode_match(p_hwfn, offset, modes);
-		arg2	= qed_init_cmd_mode_match(p_hwfn, offset, modes);
+		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
 		return arg1 | arg2;
 		return arg1 | arg2;
 	case INIT_MODE_OP_AND:
 	case INIT_MODE_OP_AND:
-		arg1	= qed_init_cmd_mode_match(p_hwfn, offset, modes);
-		arg2	= qed_init_cmd_mode_match(p_hwfn, offset, modes);
+		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
 		return arg1 & arg2;
 		return arg1 & arg2;
 	default:
 	default:
 		tree_val -= MAX_INIT_MODE_OPS;
 		tree_val -= MAX_INIT_MODE_OPS;
-		return (modes & (1 << tree_val)) ? 1 : 0;
+		return (modes & BIT(tree_val)) ? 1 : 0;
 	}
 	}
 }
 }
 
 
 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
-			     struct init_if_mode_op *p_cmd,
-			     int modes)
+			     struct init_if_mode_op *p_cmd, int modes)
 {
 {
 	u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
 	u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
 
 
@@ -453,8 +434,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
 
 
 static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
 static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
 			      struct init_if_phase_op *p_cmd,
 			      struct init_if_phase_op *p_cmd,
-			      u32 phase,
-			      u32 phase_id)
+			      u32 phase, u32 phase_id)
 {
 {
 	u32 data = le32_to_cpu(p_cmd->phase_data);
 	u32 data = le32_to_cpu(p_cmd->phase_data);
 	u32 op_data = le32_to_cpu(p_cmd->op_data);
 	u32 op_data = le32_to_cpu(p_cmd->op_data);
@@ -468,10 +448,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 int qed_init_run(struct qed_hwfn *p_hwfn,
 int qed_init_run(struct qed_hwfn *p_hwfn,
-		 struct qed_ptt *p_ptt,
-		 int phase,
-		 int phase_id,
-		 int modes)
+		 struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
 {
 {
 	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_dev *cdev = p_hwfn->cdev;
 	u32 cmd_num, num_init_ops;
 	u32 cmd_num, num_init_ops;

+ 46 - 86
drivers/net/ethernet/qlogic/qed/qed_int.c

@@ -1775,10 +1775,9 @@ struct qed_sb_attn_info {
 };
 };
 
 
 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
-				      struct qed_sb_attn_info   *p_sb_desc)
+				      struct qed_sb_attn_info *p_sb_desc)
 {
 {
-	u16     rc = 0;
-	u16     index;
+	u16 rc = 0, index;
 
 
 	/* Make certain HW write took affect */
 	/* Make certain HW write took affect */
 	mmiowb();
 	mmiowb();
@@ -1802,15 +1801,13 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
  *  @param asserted_bits newly asserted bits
  *  @param asserted_bits newly asserted bits
  *  @return int
  *  @return int
  */
  */
-static int qed_int_assertion(struct qed_hwfn *p_hwfn,
-			     u16 asserted_bits)
+static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
 {
 {
 	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
 	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
 	u32 igu_mask;
 	u32 igu_mask;
 
 
 	/* Mask the source of the attention in the IGU */
 	/* Mask the source of the attention in the IGU */
-	igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-			  IGU_REG_ATTENTION_ENABLE);
+	igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
 		   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
 		   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
 	igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
 	igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
@@ -2041,7 +2038,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
 			struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
 			struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
 
 
 			if ((p_bit->flags & ATTENTION_PARITY) &&
 			if ((p_bit->flags & ATTENTION_PARITY) &&
-			    !!(parities & (1 << bit_idx)))
+			    !!(parities & BIT(bit_idx)))
 				qed_int_deassertion_parity(p_hwfn, p_bit,
 				qed_int_deassertion_parity(p_hwfn, p_bit,
 							   bit_idx);
 							   bit_idx);
 
 
@@ -2114,8 +2111,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
 				    ~((u32)deasserted_bits));
 				    ~((u32)deasserted_bits));
 
 
 	/* Unmask deasserted attentions in IGU */
 	/* Unmask deasserted attentions in IGU */
-	aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-			  IGU_REG_ATTENTION_ENABLE);
+	aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
 	aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
 	aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
 	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
 	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
 
 
@@ -2160,8 +2156,7 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
 			index, attn_bits, attn_acks, asserted_bits,
 			index, attn_bits, attn_acks, asserted_bits,
 			deasserted_bits, p_sb_attn_sw->known_attn);
 			deasserted_bits, p_sb_attn_sw->known_attn);
 	} else if (asserted_bits == 0x100) {
 	} else if (asserted_bits == 0x100) {
-		DP_INFO(p_hwfn,
-			"MFW indication via attention\n");
+		DP_INFO(p_hwfn, "MFW indication via attention\n");
 	} else {
 	} else {
 		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
 		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
 			   "MFW indication [deassertion]\n");
 			   "MFW indication [deassertion]\n");
@@ -2173,18 +2168,14 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
 			return rc;
 			return rc;
 	}
 	}
 
 
-	if (deasserted_bits) {
+	if (deasserted_bits)
 		rc = qed_int_deassertion(p_hwfn, deasserted_bits);
 		rc = qed_int_deassertion(p_hwfn, deasserted_bits);
-		if (rc)
-			return rc;
-	}
 
 
 	return rc;
 	return rc;
 }
 }
 
 
 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
-			    void __iomem *igu_addr,
-			    u32 ack_cons)
+			    void __iomem *igu_addr, u32 ack_cons)
 {
 {
 	struct igu_prod_cons_update igu_ack = { 0 };
 	struct igu_prod_cons_update igu_ack = { 0 };
 
 
@@ -2242,9 +2233,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
 
 
 	/* Gather Interrupts/Attentions information */
 	/* Gather Interrupts/Attentions information */
 	if (!sb_info->sb_virt) {
 	if (!sb_info->sb_virt) {
-		DP_ERR(
-			p_hwfn->cdev,
-			"Interrupt Status block is NULL - cannot check for new interrupts!\n");
+		DP_ERR(p_hwfn->cdev,
+		       "Interrupt Status block is NULL - cannot check for new interrupts!\n");
 	} else {
 	} else {
 		u32 tmp_index = sb_info->sb_ack;
 		u32 tmp_index = sb_info->sb_ack;
 
 
@@ -2255,9 +2245,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
 	}
 	}
 
 
 	if (!sb_attn || !sb_attn->sb_attn) {
 	if (!sb_attn || !sb_attn->sb_attn) {
-		DP_ERR(
-			p_hwfn->cdev,
-			"Attentions Status block is NULL - cannot check for new attentions!\n");
+		DP_ERR(p_hwfn->cdev,
+		       "Attentions Status block is NULL - cannot check for new attentions!\n");
 	} else {
 	} else {
 		u16 tmp_index = sb_attn->index;
 		u16 tmp_index = sb_attn->index;
 
 
@@ -2313,8 +2302,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
 	if (p_sb->sb_attn)
 	if (p_sb->sb_attn)
 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 				  SB_ATTN_ALIGNED_SIZE(p_hwfn),
 				  SB_ATTN_ALIGNED_SIZE(p_hwfn),
-				  p_sb->sb_attn,
-				  p_sb->sb_phys);
+				  p_sb->sb_attn, p_sb->sb_phys);
 	kfree(p_sb);
 	kfree(p_sb);
 }
 }
 
 
@@ -2337,8 +2325,7 @@ static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
 
 
 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
 				 struct qed_ptt *p_ptt,
 				 struct qed_ptt *p_ptt,
-				 void *sb_virt_addr,
-				 dma_addr_t sb_phy_addr)
+				 void *sb_virt_addr, dma_addr_t sb_phy_addr)
 {
 {
 	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
 	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
 	int i, j, k;
 	int i, j, k;
@@ -2378,8 +2365,8 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
 {
 {
 	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_sb_attn_info *p_sb;
 	struct qed_sb_attn_info *p_sb;
-	void *p_virt;
 	dma_addr_t p_phys = 0;
 	dma_addr_t p_phys = 0;
+	void *p_virt;
 
 
 	/* SB struct */
 	/* SB struct */
 	p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
 	p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
@@ -2412,9 +2399,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
 
 
 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
 			   struct cau_sb_entry *p_sb_entry,
 			   struct cau_sb_entry *p_sb_entry,
-			   u8 pf_id,
-			   u16 vf_number,
-			   u8 vf_valid)
+			   u8 pf_id, u16 vf_number, u8 vf_valid)
 {
 {
 	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_dev *cdev = p_hwfn->cdev;
 	u32 cau_state;
 	u32 cau_state;
@@ -2428,12 +2413,6 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
 
 
-	/* setting the time resultion to a fixed value ( = 1) */
-	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
-		  QED_CAU_DEF_RX_TIMER_RES);
-	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
-		  QED_CAU_DEF_TX_TIMER_RES);
-
 	cau_state = CAU_HC_DISABLE_STATE;
 	cau_state = CAU_HC_DISABLE_STATE;
 
 
 	if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
 	if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
@@ -2468,9 +2447,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
 			 struct qed_ptt *p_ptt,
 			 struct qed_ptt *p_ptt,
 			 dma_addr_t sb_phys,
 			 dma_addr_t sb_phys,
-			 u16 igu_sb_id,
-			 u16 vf_number,
-			 u8 vf_valid)
+			 u16 igu_sb_id, u16 vf_number, u8 vf_valid)
 {
 {
 	struct cau_sb_entry sb_entry;
 	struct cau_sb_entry sb_entry;
 
 
@@ -2514,8 +2491,7 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
 			timer_res = 2;
 			timer_res = 2;
 		timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
 		timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
 		qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
 		qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
-				    QED_COAL_RX_STATE_MACHINE,
-				    timeset);
+				    QED_COAL_RX_STATE_MACHINE, timeset);
 
 
 		if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
 		if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
 			timer_res = 0;
 			timer_res = 0;
@@ -2541,8 +2517,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
 			 u8 timeset)
 			 u8 timeset)
 {
 {
 	struct cau_pi_entry pi_entry;
 	struct cau_pi_entry pi_entry;
-	u32 sb_offset;
-	u32 pi_offset;
+	u32 sb_offset, pi_offset;
 
 
 	if (IS_VF(p_hwfn->cdev))
 	if (IS_VF(p_hwfn->cdev))
 		return;
 		return;
@@ -2569,8 +2544,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
 void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt,
-		      struct qed_sb_info *sb_info)
+		      struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
 {
 {
 	/* zero status block and ack counter */
 	/* zero status block and ack counter */
 	sb_info->sb_ack = 0;
 	sb_info->sb_ack = 0;
@@ -2590,8 +2564,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
  *
  *
  * @return u16
  * @return u16
  */
  */
-static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
-			     u16 sb_id)
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
 {
 {
 	u16 igu_sb_id;
 	u16 igu_sb_id;
 
 
@@ -2603,8 +2576,12 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
 	else
 	else
 		igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
 		igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
 
 
-	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
-		   (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+	if (sb_id == QED_SP_SB_ID)
+		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+			   "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+	else
+		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+			   "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
 
 
 	return igu_sb_id;
 	return igu_sb_id;
 }
 }
@@ -2612,9 +2589,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
 int qed_int_sb_init(struct qed_hwfn *p_hwfn,
 int qed_int_sb_init(struct qed_hwfn *p_hwfn,
 		    struct qed_ptt *p_ptt,
 		    struct qed_ptt *p_ptt,
 		    struct qed_sb_info *sb_info,
 		    struct qed_sb_info *sb_info,
-		    void *sb_virt_addr,
-		    dma_addr_t sb_phy_addr,
-		    u16 sb_id)
+		    void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
 {
 {
 	sb_info->sb_virt = sb_virt_addr;
 	sb_info->sb_virt = sb_virt_addr;
 	sb_info->sb_phys = sb_phy_addr;
 	sb_info->sb_phys = sb_phy_addr;
@@ -2650,8 +2625,7 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 int qed_int_sb_release(struct qed_hwfn *p_hwfn,
 int qed_int_sb_release(struct qed_hwfn *p_hwfn,
-		       struct qed_sb_info *sb_info,
-		       u16 sb_id)
+		       struct qed_sb_info *sb_info, u16 sb_id)
 {
 {
 	if (sb_id == QED_SP_SB_ID) {
 	if (sb_id == QED_SP_SB_ID) {
 		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
 		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
@@ -2685,8 +2659,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
 	kfree(p_sb);
 	kfree(p_sb);
 }
 }
 
 
-static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
-			       struct qed_ptt *p_ptt)
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	struct qed_sb_sp_info *p_sb;
 	struct qed_sb_sp_info *p_sb;
 	dma_addr_t p_phys = 0;
 	dma_addr_t p_phys = 0;
@@ -2721,9 +2694,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
 
 
 int qed_int_register_cb(struct qed_hwfn *p_hwfn,
 int qed_int_register_cb(struct qed_hwfn *p_hwfn,
 			qed_int_comp_cb_t comp_cb,
 			qed_int_comp_cb_t comp_cb,
-			void *cookie,
-			u8 *sb_idx,
-			__le16 **p_fw_cons)
+			void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
 {
 {
 	struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
 	struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
 	int rc = -ENOMEM;
 	int rc = -ENOMEM;
@@ -2764,8 +2735,7 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
 }
 }
 
 
 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
-			    struct qed_ptt *p_ptt,
-			    enum qed_int_mode int_mode)
+			    struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
 {
 {
 	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
 	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
 
 
@@ -2809,7 +2779,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
 	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
 	if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
 	if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
 		rc = qed_slowpath_irq_req(p_hwfn);
 		rc = qed_slowpath_irq_req(p_hwfn);
-		if (rc != 0) {
+		if (rc) {
 			DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
 			DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
@@ -2822,8 +2792,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 	return rc;
 	return rc;
 }
 }
 
 
-void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
-			     struct qed_ptt *p_ptt)
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	p_hwfn->b_int_enabled = 0;
 	p_hwfn->b_int_enabled = 0;
 
 
@@ -2950,13 +2919,11 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
 					p_hwfn->hw_info.opaque_fid, b_set);
 					p_hwfn->hw_info.opaque_fid, b_set);
 }
 }
 
 
-static u32 qed_int_igu_read_cam_block(struct qed_hwfn	*p_hwfn,
-				      struct qed_ptt	*p_ptt,
-				      u16		sb_id)
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+				      struct qed_ptt *p_ptt, u16 sb_id)
 {
 {
 	u32 val = qed_rd(p_hwfn, p_ptt,
 	u32 val = qed_rd(p_hwfn, p_ptt,
-			 IGU_REG_MAPPING_MEMORY +
-			 sizeof(u32) * sb_id);
+			 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
 	struct qed_igu_block *p_block;
 	struct qed_igu_block *p_block;
 
 
 	p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
 	p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2983,8 +2950,7 @@ out:
 	return val;
 	return val;
 }
 }
 
 
-int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
-			 struct qed_ptt *p_ptt)
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	struct qed_igu_info *p_igu_info;
 	struct qed_igu_info *p_igu_info;
 	u32 val, min_vf = 0, max_vf = 0;
 	u32 val, min_vf = 0, max_vf = 0;
@@ -3104,22 +3070,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
  */
  */
 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
 {
 {
-	u32 igu_pf_conf = 0;
-
-	igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
 
 
 	STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
 	STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
 }
 }
 
 
 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
 {
 {
-	u64 intr_status = 0;
-	u32 intr_status_lo = 0;
-	u32 intr_status_hi = 0;
 	u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
 	u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
 			       IGU_CMD_INT_ACK_BASE;
 			       IGU_CMD_INT_ACK_BASE;
 	u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
 	u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
 			       IGU_CMD_INT_ACK_BASE;
 			       IGU_CMD_INT_ACK_BASE;
+	u32 intr_status_hi = 0, intr_status_lo = 0;
+	u64 intr_status = 0;
 
 
 	intr_status_lo = REG_RD(p_hwfn,
 	intr_status_lo = REG_RD(p_hwfn,
 				GTT_BAR0_MAP_REG_IGU_CMD +
 				GTT_BAR0_MAP_REG_IGU_CMD +
@@ -3153,8 +3116,7 @@ static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
 	kfree(p_hwfn->sp_dpc);
 	kfree(p_hwfn->sp_dpc);
 }
 }
 
 
-int qed_int_alloc(struct qed_hwfn *p_hwfn,
-		  struct qed_ptt *p_ptt)
+int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	int rc = 0;
 	int rc = 0;
 
 
@@ -3169,10 +3131,9 @@ int qed_int_alloc(struct qed_hwfn *p_hwfn,
 		return rc;
 		return rc;
 	}
 	}
 	rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
 	rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
-	if (rc) {
+	if (rc)
 		DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
 		DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
-		return rc;
-	}
+
 	return rc;
 	return rc;
 }
 }
 
 
@@ -3183,8 +3144,7 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
 	qed_int_sp_dpc_free(p_hwfn);
 	qed_int_sp_dpc_free(p_hwfn);
 }
 }
 
 
-void qed_int_setup(struct qed_hwfn *p_hwfn,
-		   struct qed_ptt *p_ptt)
+void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
 	qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
 	qed_int_sb_attn_setup(p_hwfn, p_ptt);
 	qed_int_sb_attn_setup(p_hwfn, p_ptt);

+ 106 - 135
drivers/net/ethernet/qlogic/qed/qed_l2.c

@@ -52,7 +52,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
 	u16 rx_mode = 0;
 	u16 rx_mode = 0;
 
 
 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
-	if (rc != 0)
+	if (rc)
 		return rc;
 		return rc;
 
 
 	memset(&init_data, 0, sizeof(init_data));
 	memset(&init_data, 0, sizeof(init_data));
@@ -80,8 +80,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
 	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
 
 
 	/* TPA related fields */
 	/* TPA related fields */
-	memset(&p_ramrod->tpa_param, 0,
-	       sizeof(struct eth_vport_tpa_param));
+	memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
 
 
 	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
 	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
 
 
@@ -306,14 +305,14 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
 	memset(&p_ramrod->approx_mcast.bins, 0,
 	memset(&p_ramrod->approx_mcast.bins, 0,
 	       sizeof(p_ramrod->approx_mcast.bins));
 	       sizeof(p_ramrod->approx_mcast.bins));
 
 
-	if (p_params->update_approx_mcast_flg) {
-		p_ramrod->common.update_approx_mcast_flg = 1;
-		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
-			u32 *p_bins = (u32 *)p_params->bins;
-			__le32 val = cpu_to_le32(p_bins[i]);
+	if (!p_params->update_approx_mcast_flg)
+		return;
 
 
-			p_ramrod->approx_mcast.bins[i] = val;
-		}
+	p_ramrod->common.update_approx_mcast_flg = 1;
+	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+		u32 *p_bins = (u32 *)p_params->bins;
+
+		p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
 	}
 	}
 }
 }
 
 
@@ -336,7 +335,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
 	}
 	}
 
 
 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
-	if (rc != 0)
+	if (rc)
 		return rc;
 		return rc;
 
 
 	memset(&init_data, 0, sizeof(init_data));
 	memset(&init_data, 0, sizeof(init_data));
@@ -361,8 +360,8 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
-	p_cmn->update_accept_any_vlan_flg =
-			p_params->update_accept_any_vlan_flg;
+	val = p_params->update_accept_any_vlan_flg;
+	p_cmn->update_accept_any_vlan_flg = val;
 
 
 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
 	val = p_params->update_inner_vlan_removal_flg;
 	val = p_params->update_inner_vlan_removal_flg;
@@ -411,7 +410,7 @@ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
 		return qed_vf_pf_vport_stop(p_hwfn);
 		return qed_vf_pf_vport_stop(p_hwfn);
 
 
 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
-	if (rc != 0)
+	if (rc)
 		return rc;
 		return rc;
 
 
 	memset(&init_data, 0, sizeof(init_data));
 	memset(&init_data, 0, sizeof(init_data));
@@ -476,7 +475,7 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
 
 
 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
 					 comp_mode, p_comp_data);
 					 comp_mode, p_comp_data);
-		if (rc != 0) {
+		if (rc) {
 			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
 			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
 			return rc;
 			return rc;
 		}
 		}
@@ -511,7 +510,7 @@ static int qed_sp_release_queue_cid(
 int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
 int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
 				u16 opaque_fid,
 				u16 opaque_fid,
 				u32 cid,
 				u32 cid,
-				struct qed_queue_start_common_params *params,
+				struct qed_queue_start_common_params *p_params,
 				u8 stats_id,
 				u8 stats_id,
 				u16 bd_max_bytes,
 				u16 bd_max_bytes,
 				dma_addr_t bd_chain_phys_addr,
 				dma_addr_t bd_chain_phys_addr,
@@ -526,23 +525,23 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
 	int rc = -EINVAL;
 	int rc = -EINVAL;
 
 
 	/* Store information for the stop */
 	/* Store information for the stop */
-	p_rx_cid		= &p_hwfn->p_rx_cids[params->queue_id];
-	p_rx_cid->cid		= cid;
-	p_rx_cid->opaque_fid	= opaque_fid;
-	p_rx_cid->vport_id	= params->vport_id;
+	p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+	p_rx_cid->cid = cid;
+	p_rx_cid->opaque_fid = opaque_fid;
+	p_rx_cid->vport_id = p_params->vport_id;
 
 
-	rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
-	if (rc != 0)
+	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+	if (rc)
 		return rc;
 		return rc;
 
 
-	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
-	if (rc != 0)
+	rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
+	if (rc)
 		return rc;
 		return rc;
 
 
 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
 		   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
 		   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-		   opaque_fid, cid, params->queue_id, params->vport_id,
-		   params->sb);
+		   opaque_fid,
+		   cid, p_params->queue_id, p_params->vport_id, p_params->sb);
 
 
 	/* Get SPQ entry */
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
 	memset(&init_data, 0, sizeof(init_data));
@@ -558,24 +557,25 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
 
 
 	p_ramrod = &p_ent->ramrod.rx_queue_start;
 	p_ramrod = &p_ent->ramrod.rx_queue_start;
 
 
-	p_ramrod->sb_id			= cpu_to_le16(params->sb);
-	p_ramrod->sb_index		= params->sb_idx;
-	p_ramrod->vport_id		= abs_vport_id;
-	p_ramrod->stats_counter_id	= stats_id;
-	p_ramrod->rx_queue_id		= cpu_to_le16(abs_rx_q_id);
-	p_ramrod->complete_cqe_flg	= 0;
-	p_ramrod->complete_event_flg	= 1;
+	p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+	p_ramrod->sb_index = p_params->sb_idx;
+	p_ramrod->vport_id = abs_vport_id;
+	p_ramrod->stats_counter_id = stats_id;
+	p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+	p_ramrod->complete_cqe_flg = 0;
+	p_ramrod->complete_event_flg = 1;
 
 
-	p_ramrod->bd_max_bytes	= cpu_to_le16(bd_max_bytes);
+	p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
 
 
-	p_ramrod->num_of_pbl_pages	= cpu_to_le16(cqe_pbl_size);
+	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
 
 
-	p_ramrod->vf_rx_prod_index = params->vf_qid;
-	if (params->vf_qid)
+	p_ramrod->vf_rx_prod_index = p_params->vf_qid;
+	if (p_params->vf_qid)
 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
-			   "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
+			   "Queue is meant for VF rxq[%04x]\n",
+			   p_params->vf_qid);
 
 
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 }
@@ -583,7 +583,7 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
 static int
 static int
 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
 			  u16 opaque_fid,
 			  u16 opaque_fid,
-			  struct qed_queue_start_common_params *params,
+			  struct qed_queue_start_common_params *p_params,
 			  u16 bd_max_bytes,
 			  u16 bd_max_bytes,
 			  dma_addr_t bd_chain_phys_addr,
 			  dma_addr_t bd_chain_phys_addr,
 			  dma_addr_t cqe_pbl_addr,
 			  dma_addr_t cqe_pbl_addr,
@@ -597,20 +597,20 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
 
 
 	if (IS_VF(p_hwfn->cdev)) {
 	if (IS_VF(p_hwfn->cdev)) {
 		return qed_vf_pf_rxq_start(p_hwfn,
 		return qed_vf_pf_rxq_start(p_hwfn,
-					   params->queue_id,
-					   params->sb,
-					   params->sb_idx,
+					   p_params->queue_id,
+					   p_params->sb,
+					   (u8)p_params->sb_idx,
 					   bd_max_bytes,
 					   bd_max_bytes,
 					   bd_chain_phys_addr,
 					   bd_chain_phys_addr,
 					   cqe_pbl_addr, cqe_pbl_size, pp_prod);
 					   cqe_pbl_addr, cqe_pbl_size, pp_prod);
 	}
 	}
 
 
-	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
-	if (rc != 0)
+	rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
+	if (rc)
 		return rc;
 		return rc;
 
 
-	rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
-	if (rc != 0)
+	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+	if (rc)
 		return rc;
 		return rc;
 
 
 	*pp_prod = (u8 __iomem *)p_hwfn->regview +
 	*pp_prod = (u8 __iomem *)p_hwfn->regview +
@@ -622,9 +622,8 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
 			  (u32 *)(&init_prod_val));
 			  (u32 *)(&init_prod_val));
 
 
 	/* Allocate a CID for the queue */
 	/* Allocate a CID for the queue */
-	p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
-	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
-				 &p_rx_cid->cid);
+	p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
 	if (rc) {
 	if (rc) {
 		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
 		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
 		return rc;
 		return rc;
@@ -634,14 +633,14 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
 	rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
 	rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
 					 opaque_fid,
 					 opaque_fid,
 					 p_rx_cid->cid,
 					 p_rx_cid->cid,
-					 params,
+					 p_params,
 					 abs_stats_id,
 					 abs_stats_id,
 					 bd_max_bytes,
 					 bd_max_bytes,
 					 bd_chain_phys_addr,
 					 bd_chain_phys_addr,
 					 cqe_pbl_addr,
 					 cqe_pbl_addr,
 					 cqe_pbl_size);
 					 cqe_pbl_size);
 
 
-	if (rc != 0)
+	if (rc)
 		qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
 		qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
 
 
 	return rc;
 	return rc;
@@ -788,21 +787,20 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
-	p_ramrod		= &p_ent->ramrod.tx_queue_start;
-	p_ramrod->vport_id	= abs_vport_id;
+	p_ramrod = &p_ent->ramrod.tx_queue_start;
+	p_ramrod->vport_id = abs_vport_id;
 
 
-	p_ramrod->sb_id			= cpu_to_le16(p_params->sb);
-	p_ramrod->sb_index		= p_params->sb_idx;
-	p_ramrod->stats_counter_id	= stats_id;
+	p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+	p_ramrod->sb_index = p_params->sb_idx;
+	p_ramrod->stats_counter_id = stats_id;
 
 
-	p_ramrod->queue_zone_id		= cpu_to_le16(abs_tx_q_id);
-	p_ramrod->pbl_size		= cpu_to_le16(pbl_size);
+	p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
+
+	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
 
 
-	pq_id			= qed_get_qm_pq(p_hwfn,
-						PROTOCOLID_ETH,
-						p_pq_params);
-	p_ramrod->qm_pq_id	= cpu_to_le16(pq_id);
+	pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
+	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
 
 
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 }
@@ -836,8 +834,7 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
 	memset(&pq_params, 0, sizeof(pq_params));
 	memset(&pq_params, 0, sizeof(pq_params));
 
 
 	/* Allocate a CID for the queue */
 	/* Allocate a CID for the queue */
-	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
-				 &p_tx_cid->cid);
+	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
 	if (rc) {
 	if (rc) {
 		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
 		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
 		return rc;
 		return rc;
@@ -896,8 +893,7 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
 	return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
 	return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
 }
 }
 
 
-static enum eth_filter_action
-qed_filter_action(enum qed_filter_opcode opcode)
+static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
 {
 {
 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
 
 
@@ -1033,19 +1029,19 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
 		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
 		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
 
 
 	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
 	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
-		p_second_filter->type		= p_first_filter->type;
-		p_second_filter->mac_msb	= p_first_filter->mac_msb;
-		p_second_filter->mac_mid	= p_first_filter->mac_mid;
-		p_second_filter->mac_lsb	= p_first_filter->mac_lsb;
-		p_second_filter->vlan_id	= p_first_filter->vlan_id;
-		p_second_filter->vni		= p_first_filter->vni;
+		p_second_filter->type = p_first_filter->type;
+		p_second_filter->mac_msb = p_first_filter->mac_msb;
+		p_second_filter->mac_mid = p_first_filter->mac_mid;
+		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+		p_second_filter->vlan_id = p_first_filter->vlan_id;
+		p_second_filter->vni = p_first_filter->vni;
 
 
 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
 
 
 		p_first_filter->vport_id = vport_to_remove_from;
 		p_first_filter->vport_id = vport_to_remove_from;
 
 
-		p_second_filter->action		= ETH_FILTER_ACTION_ADD;
-		p_second_filter->vport_id	= vport_to_add_to;
+		p_second_filter->action = ETH_FILTER_ACTION_ADD;
+		p_second_filter->vport_id = vport_to_add_to;
 	} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
 	} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
 		p_first_filter->vport_id = vport_to_add_to;
 		p_first_filter->vport_id = vport_to_add_to;
 		memcpy(p_second_filter, p_first_filter,
 		memcpy(p_second_filter, p_first_filter,
@@ -1086,7 +1082,7 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
 	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
 	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
 				     &p_ramrod, &p_ent,
 				     &p_ramrod, &p_ent,
 				     comp_mode, p_comp_data);
 				     comp_mode, p_comp_data);
-	if (rc != 0) {
+	if (rc) {
 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
 		return rc;
 		return rc;
 	}
 	}
@@ -1094,10 +1090,8 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
 
 
 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
-	if (rc != 0) {
-		DP_ERR(p_hwfn,
-		       "Unicast filter ADD command failed %d\n",
-		       rc);
+	if (rc) {
+		DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
 		return rc;
 		return rc;
 	}
 	}
 
 
@@ -1136,15 +1130,10 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
  * Return:
  * Return:
  ******************************************************************************/
  ******************************************************************************/
 static u32 qed_calc_crc32c(u8 *crc32_packet,
 static u32 qed_calc_crc32c(u8 *crc32_packet,
-			   u32 crc32_length,
-			   u32 crc32_seed,
-			   u8 complement)
+			   u32 crc32_length, u32 crc32_seed, u8 complement)
 {
 {
-	u32 byte = 0;
-	u32 bit = 0;
-	u8 msb = 0;
-	u8 current_byte = 0;
-	u32 crc32_result = crc32_seed;
+	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+	u8 msb = 0, current_byte = 0;
 
 
 	if ((!crc32_packet) ||
 	if ((!crc32_packet) ||
 	    (crc32_length == 0) ||
 	    (crc32_length == 0) ||
@@ -1164,9 +1153,7 @@ static u32 qed_calc_crc32c(u8 *crc32_packet,
 	return crc32_result;
 	return crc32_result;
 }
 }
 
 
-static inline u32 qed_crc32c_le(u32 seed,
-				u8 *mac,
-				u32 len)
+static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
 {
 {
 	u32 packet_buf[2] = { 0 };
 	u32 packet_buf[2] = { 0 };
 
 
@@ -1196,17 +1183,14 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
 	u8 abs_vport_id = 0;
 	u8 abs_vport_id = 0;
 	int rc, i;
 	int rc, i;
 
 
-	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+	if (p_filter_cmd->opcode == QED_FILTER_ADD)
 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
 				  &abs_vport_id);
 				  &abs_vport_id);
-		if (rc)
-			return rc;
-	} else {
+	else
 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
 				  &abs_vport_id);
 				  &abs_vport_id);
-		if (rc)
-			return rc;
-	}
+	if (rc)
+		return rc;
 
 
 	/* Get SPQ entry */
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
 	memset(&init_data, 0, sizeof(init_data));
@@ -1244,11 +1228,11 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
 
 
 		/* Convert to correct endianity */
 		/* Convert to correct endianity */
 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+			struct vport_update_ramrod_mcast *p_ramrod_bins;
 			u32 *p_bins = (u32 *)bins;
 			u32 *p_bins = (u32 *)bins;
-			struct vport_update_ramrod_mcast *approx_mcast;
 
 
-			approx_mcast = &p_ramrod->approx_mcast;
-			approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+			p_ramrod_bins = &p_ramrod->approx_mcast;
+			p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
 		}
 		}
 	}
 	}
 
 
@@ -1286,8 +1270,7 @@ static int qed_filter_mcast_cmd(struct qed_dev *cdev,
 		rc = qed_sp_eth_filter_mcast(p_hwfn,
 		rc = qed_sp_eth_filter_mcast(p_hwfn,
 					     opaque_fid,
 					     opaque_fid,
 					     p_filter_cmd,
 					     p_filter_cmd,
-					     comp_mode,
-					     p_comp_data);
+					     comp_mode, p_comp_data);
 	}
 	}
 	return rc;
 	return rc;
 }
 }
@@ -1314,9 +1297,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
 		rc = qed_sp_eth_filter_ucast(p_hwfn,
 		rc = qed_sp_eth_filter_ucast(p_hwfn,
 					     opaque_fid,
 					     opaque_fid,
 					     p_filter_cmd,
 					     p_filter_cmd,
-					     comp_mode,
-					     p_comp_data);
-		if (rc != 0)
+					     comp_mode, p_comp_data);
+		if (rc)
 			break;
 			break;
 	}
 	}
 
 
@@ -1590,8 +1572,7 @@ out:
 	}
 	}
 }
 }
 
 
-void qed_get_vport_stats(struct qed_dev *cdev,
-			 struct qed_eth_stats *stats)
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
 {
 {
 	u32 i;
 	u32 i;
 
 
@@ -1766,8 +1747,7 @@ static int qed_start_vport(struct qed_dev *cdev,
 	return 0;
 	return 0;
 }
 }
 
 
-static int qed_stop_vport(struct qed_dev *cdev,
-			  u8 vport_id)
+static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
 {
 {
 	int rc, i;
 	int rc, i;
 
 
@@ -1775,8 +1755,7 @@ static int qed_stop_vport(struct qed_dev *cdev,
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
 
 		rc = qed_sp_vport_stop(p_hwfn,
 		rc = qed_sp_vport_stop(p_hwfn,
-				       p_hwfn->hw_info.opaque_fid,
-				       vport_id);
+				       p_hwfn->hw_info.opaque_fid, vport_id);
 
 
 		if (rc) {
 		if (rc) {
 			DP_ERR(cdev, "Failed to stop VPORT\n");
 			DP_ERR(cdev, "Failed to stop VPORT\n");
@@ -1801,10 +1780,8 @@ static int qed_update_vport(struct qed_dev *cdev,
 
 
 	/* Translate protocol params into sp params */
 	/* Translate protocol params into sp params */
 	sp_params.vport_id = params->vport_id;
 	sp_params.vport_id = params->vport_id;
-	sp_params.update_vport_active_rx_flg =
-		params->update_vport_active_flg;
-	sp_params.update_vport_active_tx_flg =
-		params->update_vport_active_flg;
+	sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+	sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
 	sp_params.vport_active_rx_flg = params->vport_active_flg;
 	sp_params.vport_active_rx_flg = params->vport_active_flg;
 	sp_params.vport_active_tx_flg = params->vport_active_flg;
 	sp_params.vport_active_tx_flg = params->vport_active_flg;
 	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
 	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
@@ -1817,8 +1794,7 @@ static int qed_update_vport(struct qed_dev *cdev,
 	 * We need to re-fix the rss values per engine for CMT.
 	 * We need to re-fix the rss values per engine for CMT.
 	 */
 	 */
 	if (cdev->num_hwfns > 1 && params->update_rss_flg) {
 	if (cdev->num_hwfns > 1 && params->update_rss_flg) {
-		struct qed_update_vport_rss_params *rss =
-			&params->rss_params;
+		struct qed_update_vport_rss_params *rss = &params->rss_params;
 		int k, max = 0;
 		int k, max = 0;
 
 
 		/* Find largest entry, since it's possible RSS needs to
 		/* Find largest entry, since it's possible RSS needs to
@@ -1861,8 +1837,8 @@ static int qed_update_vport(struct qed_dev *cdev,
 		       QED_RSS_IND_TABLE_SIZE * sizeof(u16));
 		       QED_RSS_IND_TABLE_SIZE * sizeof(u16));
 		memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
 		memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
 		       QED_RSS_KEY_SIZE * sizeof(u32));
 		       QED_RSS_KEY_SIZE * sizeof(u32));
+		sp_params.rss_params = &sp_rss_params;
 	}
 	}
-	sp_params.rss_params = &sp_rss_params;
 
 
 	for_each_hwfn(cdev, i) {
 	for_each_hwfn(cdev, i) {
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1893,8 +1869,8 @@ static int qed_start_rxq(struct qed_dev *cdev,
 			 u16 cqe_pbl_size,
 			 u16 cqe_pbl_size,
 			 void __iomem **pp_prod)
 			 void __iomem **pp_prod)
 {
 {
-	int rc, hwfn_index;
 	struct qed_hwfn *p_hwfn;
 	struct qed_hwfn *p_hwfn;
+	int rc, hwfn_index;
 
 
 	hwfn_index = params->rss_id % cdev->num_hwfns;
 	hwfn_index = params->rss_id % cdev->num_hwfns;
 	p_hwfn = &cdev->hwfns[hwfn_index];
 	p_hwfn = &cdev->hwfns[hwfn_index];
@@ -1935,8 +1911,7 @@ static int qed_stop_rxq(struct qed_dev *cdev,
 
 
 	rc = qed_sp_eth_rx_queue_stop(p_hwfn,
 	rc = qed_sp_eth_rx_queue_stop(p_hwfn,
 				      params->rx_queue_id / cdev->num_hwfns,
 				      params->rx_queue_id / cdev->num_hwfns,
-				      params->eq_completion_only,
-				      false);
+				      params->eq_completion_only, false);
 	if (rc) {
 	if (rc) {
 		DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
 		DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
 		return rc;
 		return rc;
@@ -2047,11 +2022,11 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
 
 
 	memset(&accept_flags, 0, sizeof(accept_flags));
 	memset(&accept_flags, 0, sizeof(accept_flags));
 
 
-	accept_flags.update_rx_mode_config	= 1;
-	accept_flags.update_tx_mode_config	= 1;
-	accept_flags.rx_accept_filter		= QED_ACCEPT_UCAST_MATCHED |
-						  QED_ACCEPT_MCAST_MATCHED |
-						  QED_ACCEPT_BCAST;
+	accept_flags.update_rx_mode_config = 1;
+	accept_flags.update_tx_mode_config = 1;
+	accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+					QED_ACCEPT_MCAST_MATCHED |
+					QED_ACCEPT_BCAST;
 	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
 	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
 					QED_ACCEPT_MCAST_MATCHED |
 					QED_ACCEPT_MCAST_MATCHED |
 					QED_ACCEPT_BCAST;
 					QED_ACCEPT_BCAST;
@@ -2072,9 +2047,8 @@ static int qed_configure_filter_ucast(struct qed_dev *cdev,
 	struct qed_filter_ucast ucast;
 	struct qed_filter_ucast ucast;
 
 
 	if (!params->vlan_valid && !params->mac_valid) {
 	if (!params->vlan_valid && !params->mac_valid) {
-		DP_NOTICE(
-			cdev,
-			"Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+		DP_NOTICE(cdev,
+			  "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
@@ -2135,8 +2109,7 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
 	for (i = 0; i < mcast.num_mc_addrs; i++)
 	for (i = 0; i < mcast.num_mc_addrs; i++)
 		ether_addr_copy(mcast.mac[i], params->mac[i]);
 		ether_addr_copy(mcast.mac[i], params->mac[i]);
 
 
-	return qed_filter_mcast_cmd(cdev, &mcast,
-				    QED_SPQ_MODE_CB, NULL);
+	return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
 }
 }
 
 
 static int qed_configure_filter(struct qed_dev *cdev,
 static int qed_configure_filter(struct qed_dev *cdev,
@@ -2153,15 +2126,13 @@ static int qed_configure_filter(struct qed_dev *cdev,
 		accept_flags = params->filter.accept_flags;
 		accept_flags = params->filter.accept_flags;
 		return qed_configure_filter_rx_mode(cdev, accept_flags);
 		return qed_configure_filter_rx_mode(cdev, accept_flags);
 	default:
 	default:
-		DP_NOTICE(cdev, "Unknown filter type %d\n",
-			  (int)params->type);
+		DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 }
 }
 
 
 static int qed_fp_cqe_completion(struct qed_dev *dev,
 static int qed_fp_cqe_completion(struct qed_dev *dev,
-				 u8 rss_id,
-				 struct eth_slow_path_rx_cqe *cqe)
+				 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
 {
 {
 	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
 	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
 				      cqe);
 				      cqe);

+ 20 - 14
drivers/net/ethernet/qlogic/qed/qed_main.c

@@ -51,8 +51,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME);
 
 
 static int __init qed_init(void)
 static int __init qed_init(void)
 {
 {
-	pr_notice("qed_init called\n");
-
 	pr_info("%s", version);
 	pr_info("%s", version);
 
 
 	return 0;
 	return 0;
@@ -106,8 +104,7 @@ static void qed_free_pci(struct qed_dev *cdev)
 /* Performs PCI initializations as well as initializing PCI-related parameters
 /* Performs PCI initializations as well as initializing PCI-related parameters
  * in the device structrue. Returns 0 in case of success.
  * in the device structrue. Returns 0 in case of success.
  */
  */
-static int qed_init_pci(struct qed_dev *cdev,
-			struct pci_dev *pdev)
+static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
 {
 {
 	u8 rev_id;
 	u8 rev_id;
 	int rc;
 	int rc;
@@ -263,8 +260,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
 }
 }
 
 
 /* Sets the requested power state */
 /* Sets the requested power state */
-static int qed_set_power_state(struct qed_dev *cdev,
-			       pci_power_t state)
+static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
 {
 {
 	if (!cdev)
 	if (!cdev)
 		return -ENODEV;
 		return -ENODEV;
@@ -366,8 +362,8 @@ static int qed_enable_msix(struct qed_dev *cdev,
 		DP_NOTICE(cdev,
 		DP_NOTICE(cdev,
 			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
 			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
 			  cnt, int_params->in.num_vectors);
 			  cnt, int_params->in.num_vectors);
-		rc = pci_enable_msix_exact(cdev->pdev,
-					   int_params->msix_table, cnt);
+		rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
+					   cnt);
 		if (!rc)
 		if (!rc)
 			rc = cnt;
 			rc = cnt;
 	}
 	}
@@ -439,6 +435,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
 	}
 	}
 
 
 out:
 out:
+	if (!rc)
+		DP_INFO(cdev, "Using %s interrupts\n",
+			int_params->out.int_mode == QED_INT_MODE_INTA ?
+			"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
+			"MSI" : "MSIX");
 	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 
 
 	return rc;
 	return rc;
@@ -514,19 +515,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 {
 {
 	struct qed_dev *cdev = hwfn->cdev;
 	struct qed_dev *cdev = hwfn->cdev;
+	u32 int_mode;
 	int rc = 0;
 	int rc = 0;
 	u8 id;
 	u8 id;
 
 
-	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+	int_mode = cdev->int_params.out.int_mode;
+	if (int_mode == QED_INT_MODE_MSIX) {
 		id = hwfn->my_id;
 		id = hwfn->my_id;
 		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
 		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
 			 id, cdev->pdev->bus->number,
 			 id, cdev->pdev->bus->number,
 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
 			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
 		rc = request_irq(cdev->int_params.msix_table[id].vector,
 		rc = request_irq(cdev->int_params.msix_table[id].vector,
 				 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
 				 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
-		if (!rc)
-			DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
-				   "Requested slowpath MSI-X\n");
 	} else {
 	} else {
 		unsigned long flags = 0;
 		unsigned long flags = 0;
 
 
@@ -541,6 +541,13 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 				 flags, cdev->name, cdev);
 				 flags, cdev->name, cdev);
 	}
 	}
 
 
+	if (rc)
+		DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
+	else
+		DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+			   "Requested slowpath %s\n",
+			   (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
+
 	return rc;
 	return rc;
 }
 }
 
 
@@ -974,8 +981,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
 }
 }
 
 
 static u32 qed_sb_release(struct qed_dev *cdev,
 static u32 qed_sb_release(struct qed_dev *cdev,
-			  struct qed_sb_info *sb_info,
-			  u16 sb_id)
+			  struct qed_sb_info *sb_info, u16 sb_id)
 {
 {
 	struct qed_hwfn *p_hwfn;
 	struct qed_hwfn *p_hwfn;
 	int hwfn_index;
 	int hwfn_index;

+ 28 - 50
drivers/net/ethernet/qlogic/qed/qed_mcp.c

@@ -54,8 +54,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
 	return true;
 	return true;
 }
 }
 
 
-void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
-			   struct qed_ptt *p_ptt)
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
 					PUBLIC_PORT);
 					PUBLIC_PORT);
@@ -68,8 +67,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
 }
 }
 
 
-void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
-		     struct qed_ptt *p_ptt)
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
 	u32 tmp, i;
 	u32 tmp, i;
@@ -99,8 +97,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
 	return 0;
 	return 0;
 }
 }
 
 
-static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
-				struct qed_ptt *p_ptt)
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
 	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
 	u32 drv_mb_offsize, mfw_mb_offsize;
 	u32 drv_mb_offsize, mfw_mb_offsize;
@@ -143,8 +140,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
 	return 0;
 	return 0;
 }
 }
 
 
-int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
-		     struct qed_ptt *p_ptt)
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	struct qed_mcp_info *p_info;
 	struct qed_mcp_info *p_info;
 	u32 size;
 	u32 size;
@@ -165,9 +161,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
 
 
 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
 	p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
 	p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
-	p_info->mfw_mb_shadow =
-		kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
-				p_info->mfw_mb_length), GFP_KERNEL);
+	p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
 	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
 	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
 		goto err;
 		goto err;
 
 
@@ -189,8 +183,7 @@ err:
  * access is achieved by setting a blocking flag, which will fail other
  * access is achieved by setting a blocking flag, which will fail other
  * competing contexts to send their mailboxes.
  * competing contexts to send their mailboxes.
  */
  */
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
-			   u32 cmd)
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
 {
 {
 	spin_lock_bh(&p_hwfn->mcp_info->lock);
 	spin_lock_bh(&p_hwfn->mcp_info->lock);
 
 
@@ -221,15 +214,13 @@ static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
 	return 0;
 	return 0;
 }
 }
 
 
-static void qed_mcp_mb_unlock(struct qed_hwfn	*p_hwfn,
-			      u32		cmd)
+static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
 {
 {
 	if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
 	if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
 		spin_unlock_bh(&p_hwfn->mcp_info->lock);
 		spin_unlock_bh(&p_hwfn->mcp_info->lock);
 }
 }
 
 
-int qed_mcp_reset(struct qed_hwfn *p_hwfn,
-		  struct qed_ptt *p_ptt)
+int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
 	u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
 	u8 delay = CHIP_MCP_RESP_ITER_US;
 	u8 delay = CHIP_MCP_RESP_ITER_US;
@@ -326,7 +317,8 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
 		*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
 		*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
 	} else {
 	} else {
 		/* FW BUG! */
 		/* FW BUG! */
-		DP_ERR(p_hwfn, "MFW failed to respond!\n");
+		DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+		       cmd, param);
 		*o_mcp_resp = 0;
 		*o_mcp_resp = 0;
 		rc = -EAGAIN;
 		rc = -EAGAIN;
 	}
 	}
@@ -342,7 +334,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
 
 
 	/* MCP not initialized */
 	/* MCP not initialized */
 	if (!qed_mcp_is_init(p_hwfn)) {
 	if (!qed_mcp_is_init(p_hwfn)) {
-		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 
@@ -399,8 +391,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
-		     struct qed_ptt *p_ptt,
-		     u32 *p_load_code)
+		     struct qed_ptt *p_ptt, u32 *p_load_code)
 {
 {
 	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_dev *cdev = p_hwfn->cdev;
 	struct qed_mcp_mb_params mb_params;
 	struct qed_mcp_mb_params mb_params;
@@ -527,8 +518,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
 		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
 		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
 		   transceiver_state,
 		   transceiver_state,
 		   (u32)(p_hwfn->mcp_info->port_addr +
 		   (u32)(p_hwfn->mcp_info->port_addr +
-			 offsetof(struct public_port,
-				  transceiver_data)));
+			  offsetof(struct public_port, transceiver_data)));
 
 
 	transceiver_state = GET_FIELD(transceiver_state,
 	transceiver_state = GET_FIELD(transceiver_state,
 				      ETH_TRANSCEIVER_STATE);
 				      ETH_TRANSCEIVER_STATE);
@@ -540,8 +530,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
-				       struct qed_ptt *p_ptt,
-				       bool b_reset)
+				       struct qed_ptt *p_ptt, bool b_reset)
 {
 {
 	struct qed_mcp_link_state *p_link;
 	struct qed_mcp_link_state *p_link;
 	u8 max_bw, min_bw;
 	u8 max_bw, min_bw;
@@ -557,8 +546,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
 			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
 			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
 			   status,
 			   status,
 			   (u32)(p_hwfn->mcp_info->port_addr +
 			   (u32)(p_hwfn->mcp_info->port_addr +
-				 offsetof(struct public_port,
-					  link_status)));
+				 offsetof(struct public_port, link_status)));
 	} else {
 	} else {
 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
 			   "Resetting link indications\n");
 			   "Resetting link indications\n");
@@ -755,8 +743,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
 
 
 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
 				  struct qed_ptt *p_ptt,
 				  struct qed_ptt *p_ptt,
-				  struct public_func *p_data,
-				  int pfid)
+				  struct public_func *p_data, int pfid)
 {
 {
 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
 					PUBLIC_FUNC);
 					PUBLIC_FUNC);
@@ -766,8 +753,7 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
 
 
 	memset(p_data, 0, sizeof(*p_data));
 	memset(p_data, 0, sizeof(*p_data));
 
 
-	size = min_t(u32, sizeof(*p_data),
-		     QED_SECTION_SIZE(mfw_path_offsize));
+	size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
 	for (i = 0; i < size / sizeof(u32); i++)
 	for (i = 0; i < size / sizeof(u32); i++)
 		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
 		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
 					    func_addr + (i << 2));
 					    func_addr + (i << 2));
@@ -802,15 +788,13 @@ int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
-static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
-			      struct qed_ptt *p_ptt)
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	struct qed_mcp_function_info *p_info;
 	struct qed_mcp_function_info *p_info;
 	struct public_func shmem_info;
 	struct public_func shmem_info;
 	u32 resp = 0, param = 0;
 	u32 resp = 0, param = 0;
 
 
-	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
-			       MCP_PF_ID(p_hwfn));
+	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
 
 
 	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
 	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
 
 
@@ -943,8 +927,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
 	return 0;
 	return 0;
 }
 }
 
 
-int qed_mcp_get_media_type(struct qed_dev *cdev,
-			   u32 *p_media_type)
+int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
 {
 {
 	struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
 	struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
 	struct qed_ptt  *p_ptt;
 	struct qed_ptt  *p_ptt;
@@ -953,7 +936,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (!qed_mcp_is_init(p_hwfn)) {
 	if (!qed_mcp_is_init(p_hwfn)) {
-		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 
@@ -1006,15 +989,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
 	struct qed_mcp_function_info *info;
 	struct qed_mcp_function_info *info;
 	struct public_func shmem_info;
 	struct public_func shmem_info;
 
 
-	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
-			       MCP_PF_ID(p_hwfn));
+	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
 	info = &p_hwfn->mcp_info->func_info;
 	info = &p_hwfn->mcp_info->func_info;
 
 
 	info->pause_on_host = (shmem_info.config &
 	info->pause_on_host = (shmem_info.config &
 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
 
 
-	if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
-				    &info->protocol)) {
+	if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
 		return -EINVAL;
 		return -EINVAL;
@@ -1075,15 +1056,13 @@ struct qed_mcp_link_capabilities
 	return &p_hwfn->mcp_info->link_capabilities;
 	return &p_hwfn->mcp_info->link_capabilities;
 }
 }
 
 
-int qed_mcp_drain(struct qed_hwfn *p_hwfn,
-		  struct qed_ptt *p_ptt)
+int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 {
 	u32 resp = 0, param = 0;
 	u32 resp = 0, param = 0;
 	int rc;
 	int rc;
 
 
 	rc = qed_mcp_cmd(p_hwfn, p_ptt,
 	rc = qed_mcp_cmd(p_hwfn, p_ptt,
-			 DRV_MSG_CODE_NIG_DRAIN, 1000,
-			 &resp, &param);
+			 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
 
 
 	/* Wait for the drain to complete before returning */
 	/* Wait for the drain to complete before returning */
 	msleep(1020);
 	msleep(1020);
@@ -1092,8 +1071,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
-			   struct qed_ptt *p_ptt,
-			   u32 *p_flash_size)
+			   struct qed_ptt *p_ptt, u32 *p_flash_size)
 {
 {
 	u32 flash_size;
 	u32 flash_size;
 
 
@@ -1171,8 +1149,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
 	return rc;
 	return rc;
 }
 }
 
 
-int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-		    enum qed_led_mode mode)
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, enum qed_led_mode mode)
 {
 {
 	u32 resp = 0, param = 0, drv_mb_param;
 	u32 resp = 0, param = 0, drv_mb_param;
 	int rc;
 	int rc;

+ 5 - 10
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c

@@ -25,9 +25,7 @@
 
 
 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 			struct qed_spq_entry **pp_ent,
 			struct qed_spq_entry **pp_ent,
-			u8 cmd,
-			u8 protocol,
-			struct qed_sp_init_data *p_data)
+			u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
 {
 {
 	u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
 	u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
 	struct qed_spq_entry *p_ent = NULL;
 	struct qed_spq_entry *p_ent = NULL;
@@ -38,7 +36,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 
 
 	rc = qed_spq_get_entry(p_hwfn, pp_ent);
 	rc = qed_spq_get_entry(p_hwfn, pp_ent);
 
 
-	if (rc != 0)
+	if (rc)
 		return rc;
 		return rc;
 
 
 	p_ent = *pp_ent;
 	p_ent = *pp_ent;
@@ -321,8 +319,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 
 
 	rc = qed_sp_init_request(p_hwfn, &p_ent,
 	rc = qed_sp_init_request(p_hwfn, &p_ent,
 				 COMMON_RAMROD_PF_START,
 				 COMMON_RAMROD_PF_START,
-				 PROTOCOLID_COMMON,
-				 &init_data);
+				 PROTOCOLID_COMMON, &init_data);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
@@ -356,8 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
 	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
 		       p_hwfn->p_consq->chain.pbl.p_phys_table);
 		       p_hwfn->p_consq->chain.pbl.p_phys_table);
 
 
-	qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
-				     &p_ramrod->tunnel_config);
+	qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
 
 
 	if (IS_MF_SI(p_hwfn))
 	if (IS_MF_SI(p_hwfn))
 		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
 		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
@@ -389,8 +385,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 
 
 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
 		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-		   sb, sb_index,
-		   p_ramrod->outer_tag);
+		   sb, sb_index, p_ramrod->outer_tag);
 
 
 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
 

+ 39 - 56
drivers/net/ethernet/qlogic/qed/qed_spq.c

@@ -41,8 +41,7 @@
 ***************************************************************************/
 ***************************************************************************/
 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
 				void *cookie,
 				void *cookie,
-				union event_ring_data *data,
-				u8 fw_return_code)
+				union event_ring_data *data, u8 fw_return_code)
 {
 {
 	struct qed_spq_comp_done *comp_done;
 	struct qed_spq_comp_done *comp_done;
 
 
@@ -109,9 +108,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
 /***************************************************************************
 /***************************************************************************
 * SPQ entries inner API
 * SPQ entries inner API
 ***************************************************************************/
 ***************************************************************************/
-static int
-qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
-		   struct qed_spq_entry *p_ent)
+static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+			      struct qed_spq_entry *p_ent)
 {
 {
 	p_ent->flags = 0;
 	p_ent->flags = 0;
 
 
@@ -189,8 +187,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 }
 }
 
 
 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
-			   struct qed_spq *p_spq,
-			   struct qed_spq_entry *p_ent)
+			   struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
 {
 {
 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 	struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
 	u16 echo = qed_chain_get_prod_idx(p_chain);
 	u16 echo = qed_chain_get_prod_idx(p_chain);
@@ -255,8 +252,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
 /***************************************************************************
 /***************************************************************************
 * EQ API
 * EQ API
 ***************************************************************************/
 ***************************************************************************/
-void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
-			u16 prod)
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
 {
 {
 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
 	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
 		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
@@ -267,9 +263,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
 	mmiowb();
 	mmiowb();
 }
 }
 
 
-int qed_eq_completion(struct qed_hwfn *p_hwfn,
-		      void *cookie)
-
+int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 {
 {
 	struct qed_eq *p_eq = cookie;
 	struct qed_eq *p_eq = cookie;
 	struct qed_chain *p_chain = &p_eq->chain;
 	struct qed_chain *p_chain = &p_eq->chain;
@@ -323,8 +317,7 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn,
 	return rc;
 	return rc;
 }
 }
 
 
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
-			    u16 num_elem)
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 {
 {
 	struct qed_eq *p_eq;
 	struct qed_eq *p_eq;
 
 
@@ -348,11 +341,8 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
 	}
 	}
 
 
 	/* register EQ completion on the SP SB */
 	/* register EQ completion on the SP SB */
-	qed_int_register_cb(p_hwfn,
-			    qed_eq_completion,
-			    p_eq,
-			    &p_eq->eq_sb_index,
-			    &p_eq->p_fw_cons);
+	qed_int_register_cb(p_hwfn, qed_eq_completion,
+			    p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
 
 
 	return p_eq;
 	return p_eq;
 
 
@@ -361,14 +351,12 @@ eq_allocate_fail:
 	return NULL;
 	return NULL;
 }
 }
 
 
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
-		  struct qed_eq *p_eq)
+void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
 {
 {
 	qed_chain_reset(&p_eq->chain);
 	qed_chain_reset(&p_eq->chain);
 }
 }
 
 
-void qed_eq_free(struct qed_hwfn *p_hwfn,
-		 struct qed_eq *p_eq)
+void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
 {
 {
 	if (!p_eq)
 	if (!p_eq)
 		return;
 		return;
@@ -379,10 +367,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn,
 /***************************************************************************
 /***************************************************************************
 * CQE API - manipulate EQ functionality
 * CQE API - manipulate EQ functionality
 ***************************************************************************/
 ***************************************************************************/
-static int qed_cqe_completion(
-	struct qed_hwfn *p_hwfn,
-	struct eth_slow_path_rx_cqe *cqe,
-	enum protocol_type protocol)
+static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
+			      struct eth_slow_path_rx_cqe *cqe,
+			      enum protocol_type protocol)
 {
 {
 	if (IS_VF(p_hwfn->cdev))
 	if (IS_VF(p_hwfn->cdev))
 		return 0;
 		return 0;
@@ -463,8 +450,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
 	u32 capacity;
 	u32 capacity;
 
 
 	/* SPQ struct */
 	/* SPQ struct */
-	p_spq =
-		kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+	p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
 	if (!p_spq) {
 	if (!p_spq) {
 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -525,9 +511,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
 	kfree(p_spq);
 	kfree(p_spq);
 }
 }
 
 
-int
-qed_spq_get_entry(struct qed_hwfn *p_hwfn,
-		  struct qed_spq_entry **pp_ent)
+int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
 {
 {
 	struct qed_spq *p_spq = p_hwfn->p_spq;
 	struct qed_spq *p_spq = p_hwfn->p_spq;
 	struct qed_spq_entry *p_ent = NULL;
 	struct qed_spq_entry *p_ent = NULL;
@@ -538,14 +522,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn,
 	if (list_empty(&p_spq->free_pool)) {
 	if (list_empty(&p_spq->free_pool)) {
 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
 		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
 		if (!p_ent) {
 		if (!p_ent) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to allocate an SPQ entry for a pending ramrod\n");
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto out_unlock;
 			goto out_unlock;
 		}
 		}
 		p_ent->queue = &p_spq->unlimited_pending;
 		p_ent->queue = &p_spq->unlimited_pending;
 	} else {
 	} else {
 		p_ent = list_first_entry(&p_spq->free_pool,
 		p_ent = list_first_entry(&p_spq->free_pool,
-					 struct qed_spq_entry,
-					 list);
+					 struct qed_spq_entry, list);
 		list_del(&p_ent->list);
 		list_del(&p_ent->list);
 		p_ent->queue = &p_spq->pending;
 		p_ent->queue = &p_spq->pending;
 	}
 	}
@@ -564,8 +549,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
 	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
 }
 }
 
 
-void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
-			  struct qed_spq_entry *p_ent)
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
 {
 {
 	spin_lock_bh(&p_hwfn->p_spq->lock);
 	spin_lock_bh(&p_hwfn->p_spq->lock);
 	__qed_spq_return_entry(p_hwfn, p_ent);
 	__qed_spq_return_entry(p_hwfn, p_ent);
@@ -586,10 +570,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
  *
  *
  * @return int
  * @return int
  */
  */
-static int
-qed_spq_add_entry(struct qed_hwfn *p_hwfn,
-		  struct qed_spq_entry *p_ent,
-		  enum spq_priority priority)
+static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+			     struct qed_spq_entry *p_ent,
+			     enum spq_priority priority)
 {
 {
 	struct qed_spq *p_spq = p_hwfn->p_spq;
 	struct qed_spq *p_spq = p_hwfn->p_spq;
 
 
@@ -604,8 +587,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
 			struct qed_spq_entry *p_en2;
 			struct qed_spq_entry *p_en2;
 
 
 			p_en2 = list_first_entry(&p_spq->free_pool,
 			p_en2 = list_first_entry(&p_spq->free_pool,
-						 struct qed_spq_entry,
-						 list);
+						 struct qed_spq_entry, list);
 			list_del(&p_en2->list);
 			list_del(&p_en2->list);
 
 
 			/* Copy the ring element physical pointer to the new
 			/* Copy the ring element physical pointer to the new
@@ -655,8 +637,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
 * Posting new Ramrods
 * Posting new Ramrods
 ***************************************************************************/
 ***************************************************************************/
 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
-			     struct list_head *head,
-			     u32 keep_reserve)
+			     struct list_head *head, u32 keep_reserve)
 {
 {
 	struct qed_spq *p_spq = p_hwfn->p_spq;
 	struct qed_spq *p_spq = p_hwfn->p_spq;
 	int rc;
 	int rc;
@@ -690,8 +671,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 			break;
 			break;
 
 
 		p_ent = list_first_entry(&p_spq->unlimited_pending,
 		p_ent = list_first_entry(&p_spq->unlimited_pending,
-					 struct qed_spq_entry,
-					 list);
+					 struct qed_spq_entry, list);
 		if (!p_ent)
 		if (!p_ent)
 			return -EINVAL;
 			return -EINVAL;
 
 
@@ -705,8 +685,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 }
 }
 
 
 int qed_spq_post(struct qed_hwfn *p_hwfn,
 int qed_spq_post(struct qed_hwfn *p_hwfn,
-		 struct qed_spq_entry *p_ent,
-		 u8 *fw_return_code)
+		 struct qed_spq_entry *p_ent, u8 *fw_return_code)
 {
 {
 	int rc = 0;
 	int rc = 0;
 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
@@ -803,8 +782,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	spin_lock_bh(&p_spq->lock);
 	spin_lock_bh(&p_spq->lock);
-	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
-				 list) {
+	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
 		if (p_ent->elem.hdr.echo == echo) {
 		if (p_ent->elem.hdr.echo == echo) {
 			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
 			u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
 
 
@@ -846,15 +824,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
 
 
 	if (!found) {
 	if (!found) {
 		DP_NOTICE(p_hwfn,
 		DP_NOTICE(p_hwfn,
-			  "Failed to find an entry this EQE completes\n");
+			  "Failed to find an entry this EQE [echo %04x] completes\n",
+			  le16_to_cpu(echo));
 		return -EEXIST;
 		return -EEXIST;
 	}
 	}
 
 
-	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
+		   le16_to_cpu(echo),
 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
 		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
 	if (found->comp_cb.function)
 	if (found->comp_cb.function)
 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 					fw_return_code);
 					fw_return_code);
+	else
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_SPQ,
+			   "Got a completion without a callback function\n");
 
 
 	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
 	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
 	    (found->queue == &p_spq->unlimited_pending))
 	    (found->queue == &p_spq->unlimited_pending))
@@ -901,14 +886,12 @@ consq_allocate_fail:
 	return NULL;
 	return NULL;
 }
 }
 
 
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
-		     struct qed_consq *p_consq)
+void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
 {
 {
 	qed_chain_reset(&p_consq->chain);
 	qed_chain_reset(&p_consq->chain);
 }
 }
 
 
-void qed_consq_free(struct qed_hwfn *p_hwfn,
-		    struct qed_consq *p_consq)
+void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
 {
 {
 	if (!p_consq)
 	if (!p_consq)
 		return;
 		return;

+ 25 - 31
drivers/net/ethernet/qlogic/qed/qed_sriov.c

@@ -699,7 +699,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
 				&qzone_id);
 				&qzone_id);
 
 
 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
-		val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+		val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
 		qed_wr(p_hwfn, p_ptt, reg_addr, val);
 		qed_wr(p_hwfn, p_ptt, reg_addr, val);
 	}
 	}
 }
 }
@@ -1090,13 +1090,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
 
 
 	/* Prepare response for all extended tlvs if they are found by PF */
 	/* Prepare response for all extended tlvs if they are found by PF */
 	for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
 	for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
-		if (!(tlvs_mask & (1 << i)))
+		if (!(tlvs_mask & BIT(i)))
 			continue;
 			continue;
 
 
 		resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
 		resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
 				   qed_iov_vport_to_tlv(p_hwfn, i), size);
 				   qed_iov_vport_to_tlv(p_hwfn, i), size);
 
 
-		if (tlvs_accepted & (1 << i))
+		if (tlvs_accepted & BIT(i))
 			resp->hdr.status = status;
 			resp->hdr.status = status;
 		else
 		else
 			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
 			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
@@ -1334,8 +1334,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
 	pfdev_info->fw_minor = FW_MINOR_VERSION;
 	pfdev_info->fw_minor = FW_MINOR_VERSION;
 	pfdev_info->fw_rev = FW_REVISION_VERSION;
 	pfdev_info->fw_rev = FW_REVISION_VERSION;
 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
-	pfdev_info->minor_fp_hsi = min_t(u8,
-					 ETH_HSI_VER_MINOR,
+	pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
 					 req->vfdev_info.eth_fp_hsi_minor);
 					 req->vfdev_info.eth_fp_hsi_minor);
 	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
 	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
 	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
 	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
@@ -1438,14 +1437,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
 
 
 		filter.type = QED_FILTER_VLAN;
 		filter.type = QED_FILTER_VLAN;
 		filter.vlan = p_vf->shadow_config.vlans[i].vid;
 		filter.vlan = p_vf->shadow_config.vlans[i].vid;
-		DP_VERBOSE(p_hwfn,
-			   QED_MSG_IOV,
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
 			   "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
 			   "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
 			   filter.vlan, p_vf->relative_vf_id);
 			   filter.vlan, p_vf->relative_vf_id);
-		rc = qed_sp_eth_filter_ucast(p_hwfn,
-					     p_vf->opaque_fid,
-					     &filter,
-					     QED_SPQ_MODE_CB, NULL);
+		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+					     &filter, QED_SPQ_MODE_CB, NULL);
 		if (rc) {
 		if (rc) {
 			DP_NOTICE(p_hwfn,
 			DP_NOTICE(p_hwfn,
 				  "Failed to configure VLAN [%04x] to VF [%04x]\n",
 				  "Failed to configure VLAN [%04x] to VF [%04x]\n",
@@ -1463,7 +1459,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
 {
 {
 	int rc = 0;
 	int rc = 0;
 
 
-	if ((events & (1 << VLAN_ADDR_FORCED)) &&
+	if ((events & BIT(VLAN_ADDR_FORCED)) &&
 	    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
 	    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
 		rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
 		rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
 
 
@@ -1479,7 +1475,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
 	if (!p_vf->vport_instance)
 	if (!p_vf->vport_instance)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (events & (1 << MAC_ADDR_FORCED)) {
+	if (events & BIT(MAC_ADDR_FORCED)) {
 		/* Since there's no way [currently] of removing the MAC,
 		/* Since there's no way [currently] of removing the MAC,
 		 * we can always assume this means we need to force it.
 		 * we can always assume this means we need to force it.
 		 */
 		 */
@@ -1502,7 +1498,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
 		p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
 		p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
 	}
 	}
 
 
-	if (events & (1 << VLAN_ADDR_FORCED)) {
+	if (events & BIT(VLAN_ADDR_FORCED)) {
 		struct qed_sp_vport_update_params vport_update;
 		struct qed_sp_vport_update_params vport_update;
 		u8 removal;
 		u8 removal;
 		int i;
 		int i;
@@ -1572,7 +1568,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
 		if (filter.vlan)
 		if (filter.vlan)
 			p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
 			p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
 		else
 		else
-			p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+			p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
 	}
 	}
 
 
 	/* If forced features are terminated, we need to configure the shadow
 	/* If forced features are terminated, we need to configure the shadow
@@ -1619,8 +1615,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
 
 
 		qed_int_cau_conf_sb(p_hwfn, p_ptt,
 		qed_int_cau_conf_sb(p_hwfn, p_ptt,
 				    start->sb_addr[sb_id],
 				    start->sb_addr[sb_id],
-				    vf->igu_sbs[sb_id],
-				    vf->abs_vf_id, 1);
+				    vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
 	}
 	}
 	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 
 
@@ -1632,7 +1627,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
 	 * vfs that would still be fine, since they passed '0' as padding].
 	 * vfs that would still be fine, since they passed '0' as padding].
 	 */
 	 */
 	p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
 	p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
-	if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+	if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
 		u8 vf_req = start->only_untagged;
 		u8 vf_req = start->only_untagged;
 
 
 		vf_info->bulletin.p_virt->default_only_untagged = vf_req;
 		vf_info->bulletin.p_virt->default_only_untagged = vf_req;
@@ -1652,7 +1647,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
 	params.mtu = vf->mtu;
 	params.mtu = vf->mtu;
 
 
 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
-	if (rc != 0) {
+	if (rc) {
 		DP_ERR(p_hwfn,
 		DP_ERR(p_hwfn,
 		       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
 		       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
 		status = PFVF_STATUS_FAILURE;
 		status = PFVF_STATUS_FAILURE;
@@ -1679,7 +1674,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
 	vf->spoof_chk = false;
 	vf->spoof_chk = false;
 
 
 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
-	if (rc != 0) {
+	if (rc) {
 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
 		       rc);
 		       rc);
 		status = PFVF_STATUS_FAILURE;
 		status = PFVF_STATUS_FAILURE;
@@ -2045,7 +2040,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
 	p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
 	p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
 
 
 	/* Ignore the VF request if we're forcing a vlan */
 	/* Ignore the VF request if we're forcing a vlan */
-	if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+	if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
 		p_data->update_inner_vlan_removal_flg = 1;
 		p_data->update_inner_vlan_removal_flg = 1;
 		p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
 		p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
 	}
 	}
@@ -2340,7 +2335,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
 	/* In forced mode, we're willing to remove entries - but we don't add
 	/* In forced mode, we're willing to remove entries - but we don't add
 	 * new ones.
 	 * new ones.
 	 */
 	 */
-	if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+	if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
 		return 0;
 		return 0;
 
 
 	if (p_params->opcode == QED_FILTER_ADD ||
 	if (p_params->opcode == QED_FILTER_ADD ||
@@ -2374,7 +2369,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
 	int i;
 	int i;
 
 
 	/* If we're in forced-mode, we don't allow any change */
 	/* If we're in forced-mode, we don't allow any change */
-	if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+	if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
 		return 0;
 		return 0;
 
 
 	/* First remove entries and then add new ones */
 	/* First remove entries and then add new ones */
@@ -2509,7 +2504,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
 	}
 	}
 
 
 	/* Determine if the unicast filtering is acceptible by PF */
 	/* Determine if the unicast filtering is acceptible by PF */
-	if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+	if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
 	    (params.type == QED_FILTER_VLAN ||
 	    (params.type == QED_FILTER_VLAN ||
 	     params.type == QED_FILTER_MAC_VLAN)) {
 	     params.type == QED_FILTER_MAC_VLAN)) {
 		/* Once VLAN is forced or PVID is set, do not allow
 		/* Once VLAN is forced or PVID is set, do not allow
@@ -2521,7 +2516,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
 		goto out;
 		goto out;
 	}
 	}
 
 
-	if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+	if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
 	    (params.type == QED_FILTER_MAC ||
 	    (params.type == QED_FILTER_MAC ||
 	     params.type == QED_FILTER_MAC_VLAN)) {
 	     params.type == QED_FILTER_MAC_VLAN)) {
 		if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
 		if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
@@ -2749,7 +2744,7 @@ cleanup:
 		/* Mark VF for ack and clean pending state */
 		/* Mark VF for ack and clean pending state */
 		if (p_vf->state == VF_RESET)
 		if (p_vf->state == VF_RESET)
 			p_vf->state = VF_STOPPED;
 			p_vf->state = VF_STOPPED;
-		ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+		ack_vfs[vfid / 32] |= BIT((vfid % 32));
 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
 		    ~(1ULL << (rel_vf_id % 64));
 		    ~(1ULL << (rel_vf_id % 64));
 		p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
 		p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
@@ -2805,7 +2800,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
 			continue;
 			continue;
 
 
 		vfid = p_vf->abs_vf_id;
 		vfid = p_vf->abs_vf_id;
-		if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+		if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
 			u16 rel_vf_id = p_vf->relative_vf_id;
 			u16 rel_vf_id = p_vf->relative_vf_id;
 
 
@@ -3064,8 +3059,7 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
 
 
 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
 	/* Forced MAC will disable MAC_ADDR */
 	/* Forced MAC will disable MAC_ADDR */
-	vf_info->bulletin.p_virt->valid_bitmap &=
-				~(1 << VFPF_BULLETIN_MAC_ADDR);
+	vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
 
 
 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
 }
 }
@@ -3163,7 +3157,7 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
 	if (!p_vf || !p_vf->bulletin.p_virt)
 	if (!p_vf || !p_vf->bulletin.p_virt)
 		return NULL;
 		return NULL;
 
 
-	if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+	if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
 		return NULL;
 		return NULL;
 
 
 	return p_vf->bulletin.p_virt->mac;
 	return p_vf->bulletin.p_virt->mac;
@@ -3177,7 +3171,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
 	if (!p_vf || !p_vf->bulletin.p_virt)
 	if (!p_vf || !p_vf->bulletin.p_virt)
 		return 0;
 		return 0;
 
 
-	if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+	if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
 		return 0;
 		return 0;
 
 
 	return p_vf->bulletin.p_virt->pvid;
 	return p_vf->bulletin.p_virt->pvid;

+ 2 - 4
drivers/net/ethernet/qlogic/qede/qede_ethtool.c

@@ -440,8 +440,7 @@ static u32 qede_get_msglevel(struct net_device *ndev)
 {
 {
 	struct qede_dev *edev = netdev_priv(ndev);
 	struct qede_dev *edev = netdev_priv(ndev);
 
 
-	return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
-	       edev->dp_module;
+	return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
 }
 }
 
 
 static void qede_set_msglevel(struct net_device *ndev, u32 level)
 static void qede_set_msglevel(struct net_device *ndev, u32 level)
@@ -465,8 +464,7 @@ static int qede_nway_reset(struct net_device *dev)
 	struct qed_link_params link_params;
 	struct qed_link_params link_params;
 
 
 	if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
 	if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
-		DP_INFO(edev,
-			"Link settings are not allowed to be changed\n");
+		DP_INFO(edev, "Link settings are not allowed to be changed\n");
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	}
 	}
 
 

+ 58 - 86
drivers/net/ethernet/qlogic/qede/qede_main.c

@@ -222,7 +222,7 @@ int __init qede_init(void)
 {
 {
 	int ret;
 	int ret;
 
 
-	pr_notice("qede_init: %s\n", version);
+	pr_info("qede_init: %s\n", version);
 
 
 	qed_ops = qed_get_eth_ops();
 	qed_ops = qed_get_eth_ops();
 	if (!qed_ops) {
 	if (!qed_ops) {
@@ -253,7 +253,8 @@ int __init qede_init(void)
 
 
 static void __exit qede_cleanup(void)
 static void __exit qede_cleanup(void)
 {
 {
-	pr_notice("qede_cleanup called\n");
+	if (debug & QED_LOG_INFO_MASK)
+		pr_info("qede_cleanup called\n");
 
 
 	unregister_netdevice_notifier(&qede_netdev_notifier);
 	unregister_netdevice_notifier(&qede_netdev_notifier);
 	pci_unregister_driver(&qede_pci_driver);
 	pci_unregister_driver(&qede_pci_driver);
@@ -270,8 +271,7 @@ module_exit(qede_cleanup);
 
 
 /* Unmap the data and free skb */
 /* Unmap the data and free skb */
 static int qede_free_tx_pkt(struct qede_dev *edev,
 static int qede_free_tx_pkt(struct qede_dev *edev,
-			    struct qede_tx_queue *txq,
-			    int *len)
+			    struct qede_tx_queue *txq, int *len)
 {
 {
 	u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
 	u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
 	struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
 	struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -329,8 +329,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
 static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 				    struct qede_tx_queue *txq,
 				    struct qede_tx_queue *txq,
 				    struct eth_tx_1st_bd *first_bd,
 				    struct eth_tx_1st_bd *first_bd,
-				    int nbd,
-				    bool data_split)
+				    int nbd, bool data_split)
 {
 {
 	u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
 	u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
 	struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
 	struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -339,8 +338,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 
 
 	/* Return prod to its position before this skb was handled */
 	/* Return prod to its position before this skb was handled */
 	qed_chain_set_prod(&txq->tx_pbl,
 	qed_chain_set_prod(&txq->tx_pbl,
-			   le16_to_cpu(txq->tx_db.data.bd_prod),
-			   first_bd);
+			   le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
 
 
 	first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
 	first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
 
 
@@ -366,8 +364,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 
 
 	/* Return again prod to its position before this skb was handled */
 	/* Return again prod to its position before this skb was handled */
 	qed_chain_set_prod(&txq->tx_pbl,
 	qed_chain_set_prod(&txq->tx_pbl,
-			   le16_to_cpu(txq->tx_db.data.bd_prod),
-			   first_bd);
+			   le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
 
 
 	/* Free skb */
 	/* Free skb */
 	dev_kfree_skb_any(skb);
 	dev_kfree_skb_any(skb);
@@ -376,8 +373,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 }
 }
 
 
 static u32 qede_xmit_type(struct qede_dev *edev,
 static u32 qede_xmit_type(struct qede_dev *edev,
-			  struct sk_buff *skb,
-			  int *ipv6_ext)
+			  struct sk_buff *skb, int *ipv6_ext)
 {
 {
 	u32 rc = XMIT_L4_CSUM;
 	u32 rc = XMIT_L4_CSUM;
 	__be16 l3_proto;
 	__be16 l3_proto;
@@ -434,15 +430,13 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
 }
 }
 
 
 static int map_frag_to_bd(struct qede_dev *edev,
 static int map_frag_to_bd(struct qede_dev *edev,
-			  skb_frag_t *frag,
-			  struct eth_tx_bd *bd)
+			  skb_frag_t *frag, struct eth_tx_bd *bd)
 {
 {
 	dma_addr_t mapping;
 	dma_addr_t mapping;
 
 
 	/* Map skb non-linear frag data for DMA */
 	/* Map skb non-linear frag data for DMA */
 	mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
 	mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
-				   skb_frag_size(frag),
-				   DMA_TO_DEVICE);
+				   skb_frag_size(frag), DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
 	if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
 		DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
 		DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -504,9 +498,8 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
 }
 }
 
 
 /* Main transmit function */
 /* Main transmit function */
-static
-netdev_tx_t qede_start_xmit(struct sk_buff *skb,
-			    struct net_device *ndev)
+static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+				   struct net_device *ndev)
 {
 {
 	struct qede_dev *edev = netdev_priv(ndev);
 	struct qede_dev *edev = netdev_priv(ndev);
 	struct netdev_queue *netdev_txq;
 	struct netdev_queue *netdev_txq;
@@ -530,8 +523,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
 	txq = QEDE_TX_QUEUE(edev, txq_index);
 	txq = QEDE_TX_QUEUE(edev, txq_index);
 	netdev_txq = netdev_get_tx_queue(ndev, txq_index);
 	netdev_txq = netdev_get_tx_queue(ndev, txq_index);
 
 
-	WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
-			       (MAX_SKB_FRAGS + 1));
+	WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
 
 
 	xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
 	xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
 
 
@@ -761,8 +753,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
 	return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
 	return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
 }
 }
 
 
-static int qede_tx_int(struct qede_dev *edev,
-		       struct qede_tx_queue *txq)
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
 {
 	struct netdev_queue *netdev_txq;
 	struct netdev_queue *netdev_txq;
 	u16 hw_bd_cons;
 	u16 hw_bd_cons;
@@ -960,8 +951,7 @@ static inline void qede_update_rx_prod(struct qede_dev *edev,
 
 
 static u32 qede_get_rxhash(struct qede_dev *edev,
 static u32 qede_get_rxhash(struct qede_dev *edev,
 			   u8 bitfields,
 			   u8 bitfields,
-			   __le32 rss_hash,
-			   enum pkt_hash_types *rxhash_type)
+			   __le32 rss_hash, enum pkt_hash_types *rxhash_type)
 {
 {
 	enum rss_hash_type htype;
 	enum rss_hash_type htype;
 
 
@@ -990,12 +980,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
 
 
 static inline void qede_skb_receive(struct qede_dev *edev,
 static inline void qede_skb_receive(struct qede_dev *edev,
 				    struct qede_fastpath *fp,
 				    struct qede_fastpath *fp,
-				    struct sk_buff *skb,
-				    u16 vlan_tag)
+				    struct sk_buff *skb, u16 vlan_tag)
 {
 {
 	if (vlan_tag)
 	if (vlan_tag)
-		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-				       vlan_tag);
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
 
 	napi_gro_receive(&fp->napi, skb);
 	napi_gro_receive(&fp->napi, skb);
 }
 }
@@ -1018,8 +1006,7 @@ static void qede_set_gro_params(struct qede_dev *edev,
 
 
 static int qede_fill_frag_skb(struct qede_dev *edev,
 static int qede_fill_frag_skb(struct qede_dev *edev,
 			      struct qede_rx_queue *rxq,
 			      struct qede_rx_queue *rxq,
-			      u8 tpa_agg_index,
-			      u16 len_on_bd)
+			      u8 tpa_agg_index, u16 len_on_bd)
 {
 {
 	struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
 	struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
 							 NUM_RX_BDS_MAX];
 							 NUM_RX_BDS_MAX];
@@ -1467,7 +1454,7 @@ alloc_skb:
 		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
 		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
 		if (unlikely(!skb)) {
 		if (unlikely(!skb)) {
 			DP_NOTICE(edev,
 			DP_NOTICE(edev,
-				  "Build_skb failed, dropping incoming packet\n");
+				  "skb allocation failed, dropping incoming packet\n");
 			qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
 			qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
 			rxq->rx_alloc_errors++;
 			rxq->rx_alloc_errors++;
 			goto next_cqe;
 			goto next_cqe;
@@ -1575,8 +1562,7 @@ alloc_skb:
 		skb->protocol = eth_type_trans(skb, edev->ndev);
 		skb->protocol = eth_type_trans(skb, edev->ndev);
 
 
 		rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
 		rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
-					  fp_cqe->rss_hash,
-					  &rxhash_type);
+					  fp_cqe->rss_hash, &rxhash_type);
 
 
 		skb_set_hash(skb, rx_hash, rxhash_type);
 		skb_set_hash(skb, rx_hash, rxhash_type);
 
 
@@ -1787,9 +1773,9 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
 	edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
 	edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
 }
 }
 
 
-static struct rtnl_link_stats64 *qede_get_stats64(
-			    struct net_device *dev,
-			    struct rtnl_link_stats64 *stats)
+static
+struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
+					   struct rtnl_link_stats64 *stats)
 {
 {
 	struct qede_dev *edev = netdev_priv(dev);
 	struct qede_dev *edev = netdev_priv(dev);
 
 
@@ -2103,8 +2089,7 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
 		}
 		}
 
 
 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
-			   "marked vlan %d as non-configured\n",
-			   vlan->vid);
+			   "marked vlan %d as non-configured\n", vlan->vid);
 	}
 	}
 
 
 	edev->accept_any_vlan = false;
 	edev->accept_any_vlan = false;
@@ -2146,7 +2131,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
 
 
 		edev->vxlan_dst_port = t_port;
 		edev->vxlan_dst_port = t_port;
 
 
-		DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+		DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
 			   t_port);
 			   t_port);
 
 
 		set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
 		set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2157,7 +2142,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
 
 
 		edev->geneve_dst_port = t_port;
 		edev->geneve_dst_port = t_port;
 
 
-		DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+		DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
 			   t_port);
 			   t_port);
 		set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
 		set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
 		break;
 		break;
@@ -2181,7 +2166,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
 
 
 		edev->vxlan_dst_port = 0;
 		edev->vxlan_dst_port = 0;
 
 
-		DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+		DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
 			   t_port);
 			   t_port);
 
 
 		set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
 		set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2192,7 +2177,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
 
 
 		edev->geneve_dst_port = 0;
 		edev->geneve_dst_port = 0;
 
 
-		DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+		DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
 			   t_port);
 			   t_port);
 		set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
 		set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
 		break;
 		break;
@@ -2237,15 +2222,13 @@ static const struct net_device_ops qede_netdev_ops = {
 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
 					    struct pci_dev *pdev,
 					    struct pci_dev *pdev,
 					    struct qed_dev_eth_info *info,
 					    struct qed_dev_eth_info *info,
-					    u32 dp_module,
-					    u8 dp_level)
+					    u32 dp_module, u8 dp_level)
 {
 {
 	struct net_device *ndev;
 	struct net_device *ndev;
 	struct qede_dev *edev;
 	struct qede_dev *edev;
 
 
 	ndev = alloc_etherdev_mqs(sizeof(*edev),
 	ndev = alloc_etherdev_mqs(sizeof(*edev),
-				  info->num_queues,
-				  info->num_queues);
+				  info->num_queues, info->num_queues);
 	if (!ndev) {
 	if (!ndev) {
 		pr_err("etherdev allocation failed\n");
 		pr_err("etherdev allocation failed\n");
 		return NULL;
 		return NULL;
@@ -2261,6 +2244,9 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
 	edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
 	edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
 	edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 	edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 
 
+	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
+		info->num_queues, info->num_queues);
+
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 
 
 	memset(&edev->stats, 0, sizeof(edev->stats));
 	memset(&edev->stats, 0, sizeof(edev->stats));
@@ -2453,7 +2439,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 			bool is_vf, enum qede_probe_mode mode)
 			bool is_vf, enum qede_probe_mode mode)
 {
 {
 	struct qed_probe_params probe_params;
 	struct qed_probe_params probe_params;
-	struct qed_slowpath_params params;
+	struct qed_slowpath_params sp_params;
 	struct qed_dev_eth_info dev_info;
 	struct qed_dev_eth_info dev_info;
 	struct qede_dev *edev;
 	struct qede_dev *edev;
 	struct qed_dev *cdev;
 	struct qed_dev *cdev;
@@ -2476,14 +2462,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 	qede_update_pf_params(cdev);
 	qede_update_pf_params(cdev);
 
 
 	/* Start the Slowpath-process */
 	/* Start the Slowpath-process */
-	memset(&params, 0, sizeof(struct qed_slowpath_params));
-	params.int_mode = QED_INT_MODE_MSIX;
-	params.drv_major = QEDE_MAJOR_VERSION;
-	params.drv_minor = QEDE_MINOR_VERSION;
-	params.drv_rev = QEDE_REVISION_VERSION;
-	params.drv_eng = QEDE_ENGINEERING_VERSION;
-	strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
-	rc = qed_ops->common->slowpath_start(cdev, &params);
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.int_mode = QED_INT_MODE_MSIX;
+	sp_params.drv_major = QEDE_MAJOR_VERSION;
+	sp_params.drv_minor = QEDE_MINOR_VERSION;
+	sp_params.drv_rev = QEDE_REVISION_VERSION;
+	sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
+	strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
 	if (rc) {
 	if (rc) {
 		pr_notice("Cannot start slowpath\n");
 		pr_notice("Cannot start slowpath\n");
 		goto err1;
 		goto err1;
@@ -2586,7 +2572,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 	qed_ops->common->slowpath_stop(cdev);
 	qed_ops->common->slowpath_stop(cdev);
 	qed_ops->common->remove(cdev);
 	qed_ops->common->remove(cdev);
 
 
-	pr_notice("Ending successfully qede_remove\n");
+	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
 }
 }
 
 
 static void qede_remove(struct pci_dev *pdev)
 static void qede_remove(struct pci_dev *pdev)
@@ -2634,16 +2620,14 @@ static void qede_free_mem_sb(struct qede_dev *edev,
 
 
 /* This function allocates fast-path status block memory */
 /* This function allocates fast-path status block memory */
 static int qede_alloc_mem_sb(struct qede_dev *edev,
 static int qede_alloc_mem_sb(struct qede_dev *edev,
-			     struct qed_sb_info *sb_info,
-			     u16 sb_id)
+			     struct qed_sb_info *sb_info, u16 sb_id)
 {
 {
 	struct status_block *sb_virt;
 	struct status_block *sb_virt;
 	dma_addr_t sb_phys;
 	dma_addr_t sb_phys;
 	int rc;
 	int rc;
 
 
 	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
 	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
-				     sizeof(*sb_virt),
-				     &sb_phys, GFP_KERNEL);
+				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
 	if (!sb_virt) {
 	if (!sb_virt) {
 		DP_ERR(edev, "Status block allocation failed\n");
 		DP_ERR(edev, "Status block allocation failed\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -2675,16 +2659,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
 		data = rx_buf->data;
 		data = rx_buf->data;
 
 
 		dma_unmap_page(&edev->pdev->dev,
 		dma_unmap_page(&edev->pdev->dev,
-			       rx_buf->mapping,
-			       PAGE_SIZE, DMA_FROM_DEVICE);
+			       rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
 
 
 		rx_buf->data = NULL;
 		rx_buf->data = NULL;
 		__free_page(data);
 		__free_page(data);
 	}
 	}
 }
 }
 
 
-static void qede_free_sge_mem(struct qede_dev *edev,
-			      struct qede_rx_queue *rxq) {
+static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
 	int i;
 	int i;
 
 
 	if (edev->gro_disable)
 	if (edev->gro_disable)
@@ -2703,8 +2686,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
 	}
 	}
 }
 }
 
 
-static void qede_free_mem_rxq(struct qede_dev *edev,
-			      struct qede_rx_queue *rxq)
+static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
 {
 	qede_free_sge_mem(edev, rxq);
 	qede_free_sge_mem(edev, rxq);
 
 
@@ -2726,9 +2708,6 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
 	struct eth_rx_bd *rx_bd;
 	struct eth_rx_bd *rx_bd;
 	dma_addr_t mapping;
 	dma_addr_t mapping;
 	struct page *data;
 	struct page *data;
-	u16 rx_buf_size;
-
-	rx_buf_size = rxq->rx_buf_size;
 
 
 	data = alloc_pages(GFP_ATOMIC, 0);
 	data = alloc_pages(GFP_ATOMIC, 0);
 	if (unlikely(!data)) {
 	if (unlikely(!data)) {
@@ -2763,8 +2742,7 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
 	return 0;
 	return 0;
 }
 }
 
 
-static int qede_alloc_sge_mem(struct qede_dev *edev,
-			      struct qede_rx_queue *rxq)
+static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
 {
 	dma_addr_t mapping;
 	dma_addr_t mapping;
 	int i;
 	int i;
@@ -2811,15 +2789,14 @@ err:
 }
 }
 
 
 /* This function allocates all memory needed per Rx queue */
 /* This function allocates all memory needed per Rx queue */
-static int qede_alloc_mem_rxq(struct qede_dev *edev,
-			      struct qede_rx_queue *rxq)
+static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
 {
 	int i, rc, size;
 	int i, rc, size;
 
 
 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
 
 
-	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
-			   edev->ndev->mtu;
+	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+
 	if (rxq->rx_buf_size > PAGE_SIZE)
 	if (rxq->rx_buf_size > PAGE_SIZE)
 		rxq->rx_buf_size = PAGE_SIZE;
 		rxq->rx_buf_size = PAGE_SIZE;
 
 
@@ -2873,8 +2850,7 @@ err:
 	return rc;
 	return rc;
 }
 }
 
 
-static void qede_free_mem_txq(struct qede_dev *edev,
-			      struct qede_tx_queue *txq)
+static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
 {
 	/* Free the parallel SW ring */
 	/* Free the parallel SW ring */
 	kfree(txq->sw_tx_ring);
 	kfree(txq->sw_tx_ring);
@@ -2884,8 +2860,7 @@ static void qede_free_mem_txq(struct qede_dev *edev,
 }
 }
 
 
 /* This function allocates all memory needed per Tx queue */
 /* This function allocates all memory needed per Tx queue */
-static int qede_alloc_mem_txq(struct qede_dev *edev,
-			      struct qede_tx_queue *txq)
+static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
 {
 	int size, rc;
 	int size, rc;
 	union eth_tx_bd_types *p_virt;
 	union eth_tx_bd_types *p_virt;
@@ -2917,8 +2892,7 @@ err:
 }
 }
 
 
 /* This function frees all memory of a single fp */
 /* This function frees all memory of a single fp */
-static void qede_free_mem_fp(struct qede_dev *edev,
-			     struct qede_fastpath *fp)
+static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
 {
 	int tc;
 	int tc;
 
 
@@ -2933,8 +2907,7 @@ static void qede_free_mem_fp(struct qede_dev *edev,
 /* This function allocates all memory needed for a single fp (i.e. an entity
 /* This function allocates all memory needed for a single fp (i.e. an entity
  * which contains status block, one rx queue and multiple per-TC tx queues.
  * which contains status block, one rx queue and multiple per-TC tx queues.
  */
  */
-static int qede_alloc_mem_fp(struct qede_dev *edev,
-			     struct qede_fastpath *fp)
+static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
 {
 	int rc, tc;
 	int rc, tc;
 
 
@@ -3146,8 +3119,7 @@ static int qede_setup_irqs(struct qede_dev *edev)
 }
 }
 
 
 static int qede_drain_txq(struct qede_dev *edev,
 static int qede_drain_txq(struct qede_dev *edev,
-			  struct qede_tx_queue *txq,
-			  bool allow_drain)
+			  struct qede_tx_queue *txq, bool allow_drain)
 {
 {
 	int rc, cnt = 1000;
 	int rc, cnt = 1000;