Browse Source

Merge branch 'qed-IOV-cleanups'

Yuval Mintz says:

====================
qed: IOV related clenaups

This patch series targets IOV functionality [on both PF and VF].

Patches #2, #3 and #5 fix flows relating to malicious VFs, either by
upgrading and aligning current safe-guards or by correcing racy flows.

Patches #1 and #8 make some malicious/dysnfunctional VFs logging appear
by default in logs.

The rest of the patches either cleanup the existing code or else correct
some possible [yet fairly insignicant] issues in VF behavior.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 8 years ago
parent
commit
592d42ac7f

+ 190 - 96
drivers/net/ethernet/qlogic/qed/qed_sriov.c

@@ -178,26 +178,59 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
 	return vf;
 	return vf;
 }
 }
 
 
+enum qed_iov_validate_q_mode {
+	QED_IOV_VALIDATE_Q_NA,
+	QED_IOV_VALIDATE_Q_ENABLE,
+	QED_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
+					struct qed_vf_info *p_vf,
+					u16 qid,
+					enum qed_iov_validate_q_mode mode,
+					bool b_is_tx)
+{
+	if (mode == QED_IOV_VALIDATE_Q_NA)
+		return true;
+
+	if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
+	    (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
+		return mode == QED_IOV_VALIDATE_Q_ENABLE;
+
+	/* In case we haven't found any valid cid, then its disabled */
+	return mode == QED_IOV_VALIDATE_Q_DISABLE;
+}
+
 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
-				 struct qed_vf_info *p_vf, u16 rx_qid)
+				 struct qed_vf_info *p_vf,
+				 u16 rx_qid,
+				 enum qed_iov_validate_q_mode mode)
 {
 {
-	if (rx_qid >= p_vf->num_rxqs)
+	if (rx_qid >= p_vf->num_rxqs) {
 		DP_VERBOSE(p_hwfn,
 		DP_VERBOSE(p_hwfn,
 			   QED_MSG_IOV,
 			   QED_MSG_IOV,
 			   "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
 			   "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
 			   p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
 			   p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
-	return rx_qid < p_vf->num_rxqs;
+		return false;
+	}
+
+	return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
 }
 }
 
 
 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
-				 struct qed_vf_info *p_vf, u16 tx_qid)
+				 struct qed_vf_info *p_vf,
+				 u16 tx_qid,
+				 enum qed_iov_validate_q_mode mode)
 {
 {
-	if (tx_qid >= p_vf->num_txqs)
+	if (tx_qid >= p_vf->num_txqs) {
 		DP_VERBOSE(p_hwfn,
 		DP_VERBOSE(p_hwfn,
 			   QED_MSG_IOV,
 			   QED_MSG_IOV,
 			   "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
 			   "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
 			   p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
 			   p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
-	return tx_qid < p_vf->num_txqs;
+		return false;
+	}
+
+	return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
 }
 }
 
 
 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
@@ -217,6 +250,34 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
 	return false;
 	return false;
 }
 }
 
 
+static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
+					struct qed_vf_info *p_vf)
+{
+	u8 i;
+
+	for (i = 0; i < p_vf->num_rxqs; i++)
+		if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+						QED_IOV_VALIDATE_Q_ENABLE,
+						false))
+			return true;
+
+	return false;
+}
+
+static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
+					struct qed_vf_info *p_vf)
+{
+	u8 i;
+
+	for (i = 0; i < p_vf->num_txqs; i++)
+		if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+						QED_IOV_VALIDATE_Q_ENABLE,
+						true))
+			return true;
+
+	return false;
+}
+
 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
 				    int vfid, struct qed_ptt *p_ptt)
 				    int vfid, struct qed_ptt *p_ptt)
 {
 {
@@ -693,6 +754,11 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
 	int rc;
 	int rc;
 
 
+	/* It's possible VF was previously considered malicious -
+	 * clear the indication even if we're only going to disable VF.
+	 */
+	vf->b_malicious = false;
+
 	if (vf->to_disable)
 	if (vf->to_disable)
 		return 0;
 		return 0;
 
 
@@ -705,9 +771,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
 
 
 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
 
 
-	/* It's possible VF was previously considered malicious */
-	vf->b_malicious = false;
-
 	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
 	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
@@ -1134,13 +1197,17 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
 			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
 			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
 			   &params);
 			   &params);
 
 
-	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
-			   mbx->req_virt->first_tlv.reply_address,
-			   sizeof(u64) / 4, &params);
-
+	/* Once PF copies the rc to the VF, the latter can continue
+	 * and send an additional message. So we have to make sure the
+	 * channel would be re-set to ready prior to that.
+	 */
 	REG_WR(p_hwfn,
 	REG_WR(p_hwfn,
 	       GTT_BAR0_MAP_REG_USDM_RAM +
 	       GTT_BAR0_MAP_REG_USDM_RAM +
 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+			   mbx->req_virt->first_tlv.reply_address,
+			   sizeof(u64) / 4, &params);
 }
 }
 
 
 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
@@ -1749,6 +1816,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
 	vf->state = VF_ENABLED;
 	vf->state = VF_ENABLED;
 	start = &mbx->req_virt->start_vport;
 	start = &mbx->req_virt->start_vport;
 
 
+	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
 	/* Initialize Status block in CAU */
 	/* Initialize Status block in CAU */
 	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
 	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
 		if (!start->sb_addr[sb_id]) {
 		if (!start->sb_addr[sb_id]) {
@@ -1762,7 +1831,6 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
 				    start->sb_addr[sb_id],
 				    start->sb_addr[sb_id],
 				    vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
 				    vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
 	}
 	}
-	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 
 
 	vf->mtu = start->mtu;
 	vf->mtu = start->mtu;
 	vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
 	vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1819,6 +1887,16 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
 	vf->vport_instance--;
 	vf->vport_instance--;
 	vf->spoof_chk = false;
 	vf->spoof_chk = false;
 
 
+	if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
+	    (qed_iov_validate_active_txq(p_hwfn, vf))) {
+		vf->b_malicious = true;
+		DP_NOTICE(p_hwfn,
+			  "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+			  vf->abs_vf_id);
+		status = PFVF_STATUS_MALICIOUS;
+		goto out;
+	}
+
 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
 	if (rc) {
 	if (rc) {
 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
@@ -1830,6 +1908,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
 	vf->configured_features = 0;
 	vf->configured_features = 0;
 	memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
 	memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
 
 
+out:
 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
 			     sizeof(struct pfvf_def_resp_tlv), status);
 			     sizeof(struct pfvf_def_resp_tlv), status);
 }
 }
@@ -1886,7 +1965,8 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
 
 
 	req = &mbx->req_virt->start_rxq;
 	req = &mbx->req_virt->start_rxq;
 
 
-	if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+	if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+				  QED_IOV_VALIDATE_Q_DISABLE) ||
 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 		goto out;
 		goto out;
 
 
@@ -2000,7 +2080,8 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
 	memset(&params, 0, sizeof(params));
 	memset(&params, 0, sizeof(params));
 	req = &mbx->req_virt->start_txq;
 	req = &mbx->req_virt->start_txq;
 
 
-	if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+	if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+				  QED_IOV_VALIDATE_Q_DISABLE) ||
 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
 		goto out;
 		goto out;
 
 
@@ -2037,57 +2118,53 @@ out:
 
 
 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
 				struct qed_vf_info *vf,
 				struct qed_vf_info *vf,
-				u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+				u16 rxq_id, bool cqe_completion)
 {
 {
 	struct qed_vf_q_info *p_queue;
 	struct qed_vf_q_info *p_queue;
 	int rc = 0;
 	int rc = 0;
-	int qid;
 
 
-	if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+	if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
+				  QED_IOV_VALIDATE_Q_ENABLE)) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_IOV,
+			   "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
+			   vf->relative_vf_id, rxq_id);
 		return -EINVAL;
 		return -EINVAL;
+	}
 
 
-	for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
-		p_queue = &vf->vf_queues[qid];
+	p_queue = &vf->vf_queues[rxq_id];
 
 
-		if (!p_queue->p_rx_cid)
-			continue;
-
-		rc = qed_eth_rx_queue_stop(p_hwfn,
-					   p_queue->p_rx_cid,
-					   false, cqe_completion);
-		if (rc)
-			return rc;
+	rc = qed_eth_rx_queue_stop(p_hwfn,
+				   p_queue->p_rx_cid,
+				   false, cqe_completion);
+	if (rc)
+		return rc;
 
 
-		vf->vf_queues[qid].p_rx_cid = NULL;
-		vf->num_active_rxqs--;
-	}
+	p_queue->p_rx_cid = NULL;
+	vf->num_active_rxqs--;
 
 
-	return rc;
+	return 0;
 }
 }
 
 
 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
-				struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+				struct qed_vf_info *vf, u16 txq_id)
 {
 {
-	int rc = 0;
 	struct qed_vf_q_info *p_queue;
 	struct qed_vf_q_info *p_queue;
-	int qid;
+	int rc = 0;
 
 
-	if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+	if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
+				  QED_IOV_VALIDATE_Q_ENABLE))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
-		p_queue = &vf->vf_queues[qid];
-		if (!p_queue->p_tx_cid)
-			continue;
+	p_queue = &vf->vf_queues[txq_id];
 
 
-		rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
-		if (rc)
-			return rc;
+	rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+	if (rc)
+		return rc;
 
 
-		p_queue->p_tx_cid = NULL;
-	}
+	p_queue->p_tx_cid = NULL;
 
 
-	return rc;
+	return 0;
 }
 }
 
 
 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2096,20 +2173,28 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
 {
 {
 	u16 length = sizeof(struct pfvf_def_resp_tlv);
 	u16 length = sizeof(struct pfvf_def_resp_tlv);
 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
-	u8 status = PFVF_STATUS_SUCCESS;
+	u8 status = PFVF_STATUS_FAILURE;
 	struct vfpf_stop_rxqs_tlv *req;
 	struct vfpf_stop_rxqs_tlv *req;
 	int rc;
 	int rc;
 
 
-	/* We give the option of starting from qid != 0, in this case we
-	 * need to make sure that qid + num_qs doesn't exceed the actual
-	 * amount of queues that exist.
+	/* There has never been an official driver that used this interface
+	 * for stopping multiple queues, and it is now considered deprecated.
+	 * Validate this isn't used here.
 	 */
 	 */
 	req = &mbx->req_virt->stop_rxqs;
 	req = &mbx->req_virt->stop_rxqs;
-	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
-				  req->num_rxqs, req->cqe_completion);
-	if (rc)
-		status = PFVF_STATUS_FAILURE;
+	if (req->num_rxqs != 1) {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "Odd; VF[%d] tried stopping multiple Rx queues\n",
+			   vf->relative_vf_id);
+		status = PFVF_STATUS_NOT_SUPPORTED;
+		goto out;
+	}
 
 
+	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+				  req->cqe_completion);
+	if (!rc)
+		status = PFVF_STATUS_SUCCESS;
+out:
 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
 			     length, status);
 			     length, status);
 }
 }
@@ -2120,19 +2205,27 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
 {
 {
 	u16 length = sizeof(struct pfvf_def_resp_tlv);
 	u16 length = sizeof(struct pfvf_def_resp_tlv);
 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
-	u8 status = PFVF_STATUS_SUCCESS;
+	u8 status = PFVF_STATUS_FAILURE;
 	struct vfpf_stop_txqs_tlv *req;
 	struct vfpf_stop_txqs_tlv *req;
 	int rc;
 	int rc;
 
 
-	/* We give the option of starting from qid != 0, in this case we
-	 * need to make sure that qid + num_qs doesn't exceed the actual
-	 * amount of queues that exist.
+	/* There has never been an official driver that used this interface
+	 * for stopping multiple queues, and it is now considered deprecated.
+	 * Validate this isn't used here.
 	 */
 	 */
 	req = &mbx->req_virt->stop_txqs;
 	req = &mbx->req_virt->stop_txqs;
-	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
-	if (rc)
-		status = PFVF_STATUS_FAILURE;
+	if (req->num_txqs != 1) {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "Odd; VF[%d] tried stopping multiple Tx queues\n",
+			   vf->relative_vf_id);
+		status = PFVF_STATUS_NOT_SUPPORTED;
+		goto out;
+	}
+	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
+	if (!rc)
+		status = PFVF_STATUS_SUCCESS;
 
 
+out:
 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
 			     length, status);
 			     length, status);
 }
 }
@@ -2157,22 +2250,17 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
 
 	/* Validate inputs */
 	/* Validate inputs */
-	if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
-	    !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
-		DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
-			vf->relative_vf_id, req->rx_qid, req->num_rxqs);
-		goto out;
-	}
-
-	for (i = 0; i < req->num_rxqs; i++) {
-		qid = req->rx_qid + i;
-		if (!vf->vf_queues[qid].p_rx_cid) {
-			DP_INFO(p_hwfn,
-				"VF[%d] rx_qid = %d isn`t active!\n",
-				vf->relative_vf_id, qid);
+	for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+		if (!qed_iov_validate_rxq(p_hwfn, vf, i,
+					  QED_IOV_VALIDATE_Q_ENABLE)) {
+			DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+				vf->relative_vf_id, req->rx_qid, req->num_rxqs);
 			goto out;
 			goto out;
 		}
 		}
 
 
+	/* Prepare the handlers */
+	for (i = 0; i < req->num_rxqs; i++) {
+		qid = req->rx_qid + i;
 		handlers[i] = vf->vf_queues[qid].p_rx_cid;
 		handlers[i] = vf->vf_queues[qid].p_rx_cid;
 	}
 	}
 
 
@@ -2388,7 +2476,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
 
 
 	for (i = 0; i < table_size; i++) {
 	for (i = 0; i < table_size; i++) {
 		q_idx = p_rss_tlv->rss_ind_table[i];
 		q_idx = p_rss_tlv->rss_ind_table[i];
-		if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+		if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
+					  QED_IOV_VALIDATE_Q_ENABLE)) {
 			DP_VERBOSE(p_hwfn,
 			DP_VERBOSE(p_hwfn,
 				   QED_MSG_IOV,
 				   QED_MSG_IOV,
 				   "VF[%d]: Omitting RSS due to wrong queue %04x\n",
 				   "VF[%d]: Omitting RSS due to wrong queue %04x\n",
@@ -2397,15 +2486,6 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
 			goto out;
 			goto out;
 		}
 		}
 
 
-		if (!vf->vf_queues[q_idx].p_rx_cid) {
-			DP_VERBOSE(p_hwfn,
-				   QED_MSG_IOV,
-				   "VF[%d]: Omitting RSS due to inactive queue %08x\n",
-				   vf->relative_vf_id, q_idx);
-			b_reject = true;
-			goto out;
-		}
-
 		p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
 		p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
 	}
 	}
 
 
@@ -3058,9 +3138,10 @@ qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	return rc;
 	return rc;
 }
 }
 
 
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
 {
 {
-	u16 i, found = 0;
+	bool found = false;
+	u16 i;
 
 
 	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
 	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -3070,7 +3151,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
 
 
 	if (!p_hwfn->cdev->p_iov_info) {
 	if (!p_hwfn->cdev->p_iov_info) {
 		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
 		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
-		return 0;
+		return false;
 	}
 	}
 
 
 	/* Mark VFs */
 	/* Mark VFs */
@@ -3099,7 +3180,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
 			 * VF flr until ACKs, we're safe.
 			 * VF flr until ACKs, we're safe.
 			 */
 			 */
 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
-			found = 1;
+			found = true;
 		}
 		}
 	}
 	}
 
 
@@ -3305,11 +3386,17 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
 	if (!p_vf)
 	if (!p_vf)
 		return;
 		return;
 
 
-	DP_INFO(p_hwfn,
-		"VF [%d] - Malicious behavior [%02x]\n",
-		p_vf->abs_vf_id, p_data->err_id);
+	if (!p_vf->b_malicious) {
+		DP_NOTICE(p_hwfn,
+			  "VF [%d] - Malicious behavior [%02x]\n",
+			  p_vf->abs_vf_id, p_data->err_id);
 
 
-	p_vf->b_malicious = true;
+		p_vf->b_malicious = true;
+	} else {
+		DP_INFO(p_hwfn,
+			"VF [%d] - Malicious behavior [%02x]\n",
+			p_vf->abs_vf_id, p_data->err_id);
+	}
 }
 }
 
 
 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
@@ -3858,6 +3945,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
 
 
 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
 {
 {
+	struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
 	struct qed_mcp_link_capabilities caps;
 	struct qed_mcp_link_capabilities caps;
 	struct qed_mcp_link_params params;
 	struct qed_mcp_link_params params;
 	struct qed_mcp_link_state link;
 	struct qed_mcp_link_state link;
@@ -3874,9 +3962,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
 		if (!vf_info)
 		if (!vf_info)
 			continue;
 			continue;
 
 
-		memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
-		memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
-		memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+		/* Only hwfn0 is actually interested in the link speed.
+		 * But since only it would receive an MFW indication of link,
+		 * need to take configuration from it - otherwise things like
+		 * rate limiting for hwfn1 VF would not work.
+		 */
+		memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
+		       sizeof(params));
+		memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
+		memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
 		       sizeof(caps));
 		       sizeof(caps));
 
 
 		/* Modify link according to the VF's configured link state */
 		/* Modify link according to the VF's configured link state */

+ 5 - 5
drivers/net/ethernet/qlogic/qed/qed_sriov.h

@@ -348,9 +348,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
  * @param p_hwfn
  * @param p_hwfn
  * @param disabled_vfs - bitmask of all VFs on path that were FLRed
  * @param disabled_vfs - bitmask of all VFs on path that were FLRed
  *
  *
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
  */
  */
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
 
 
 /**
 /**
  * @brief Search extended TLVs in request/reply buffer.
  * @brief Search extended TLVs in request/reply buffer.
@@ -407,10 +407,10 @@ static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
-static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
-				      u32 *disabled_vfs)
+static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+				       u32 *disabled_vfs)
 {
 {
-	return 0;
+	return false;
 }
 }
 
 
 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)

+ 12 - 6
drivers/net/ethernet/qlogic/qed/qed_vf.c

@@ -134,14 +134,20 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
 	}
 	}
 
 
 	if (!*done) {
 	if (!*done) {
-		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-			   "VF <-- PF Timeout [Type %d]\n",
-			   p_req->first_tlv.tl.type);
+		DP_NOTICE(p_hwfn,
+			  "VF <-- PF Timeout [Type %d]\n",
+			  p_req->first_tlv.tl.type);
 		rc = -EBUSY;
 		rc = -EBUSY;
 	} else {
 	} else {
-		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-			   "PF response: %d [Type %d]\n",
-			   *done, p_req->first_tlv.tl.type);
+		if ((*done != PFVF_STATUS_SUCCESS) &&
+		    (*done != PFVF_STATUS_NO_RESOURCE))
+			DP_NOTICE(p_hwfn,
+				  "PF response: %d [Type %d]\n",
+				  *done, p_req->first_tlv.tl.type);
+		else
+			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+				   "PF response: %d [Type %d]\n",
+				   *done, p_req->first_tlv.tl.type);
 	}
 	}
 
 
 	return rc;
 	return rc;

+ 4 - 0
drivers/net/ethernet/qlogic/qed/qed_vf.h

@@ -275,6 +275,8 @@ struct vfpf_stop_rxqs_tlv {
 	struct vfpf_first_tlv first_tlv;
 	struct vfpf_first_tlv first_tlv;
 
 
 	u16 rx_qid;
 	u16 rx_qid;
+
+	/* this field is deprecated and should *always* be set to '1' */
 	u8 num_rxqs;
 	u8 num_rxqs;
 	u8 cqe_completion;
 	u8 cqe_completion;
 	u8 padding[4];
 	u8 padding[4];
@@ -285,6 +287,8 @@ struct vfpf_stop_txqs_tlv {
 	struct vfpf_first_tlv first_tlv;
 	struct vfpf_first_tlv first_tlv;
 
 
 	u16 tx_qid;
 	u16 tx_qid;
+
+	/* this field is deprecated and should *always* be set to '1' */
 	u8 num_txqs;
 	u8 num_txqs;
 	u8 padding[5];
 	u8 padding[5];
 };
 };