|
@@ -522,6 +522,46 @@ err_out1:
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
|
|
|
+ struct bnxt_vf_info *vf,
|
|
|
+ u16 event_id)
|
|
|
+{
|
|
|
+ int rc = 0;
|
|
|
+ struct hwrm_fwd_async_event_cmpl_input req = {0};
|
|
|
+ struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
|
|
|
+ struct hwrm_async_event_cmpl *async_cmpl;
|
|
|
+
|
|
|
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
|
|
|
+ if (vf)
|
|
|
+ req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
|
|
|
+ else
|
|
|
+ /* broadcast this async event to all VFs */
|
|
|
+ req.encap_async_event_target_id = cpu_to_le16(0xffff);
|
|
|
+ async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
|
|
|
+ async_cmpl->type =
|
|
|
+ cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
|
|
|
+ async_cmpl->event_id = cpu_to_le16(event_id);
|
|
|
+
|
|
|
+ mutex_lock(&bp->hwrm_cmd_lock);
|
|
|
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
|
+
|
|
|
+ if (rc) {
|
|
|
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
|
|
|
+ rc);
|
|
|
+ goto fwd_async_event_cmpl_exit;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (resp->error_code) {
|
|
|
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
|
|
|
+ resp->error_code);
|
|
|
+ rc = -1;
|
|
|
+ }
|
|
|
+
|
|
|
+fwd_async_event_cmpl_exit:
|
|
|
+ mutex_unlock(&bp->hwrm_cmd_lock);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
void bnxt_sriov_disable(struct bnxt *bp)
|
|
|
{
|
|
|
u16 num_vfs = pci_num_vf(bp->pdev);
|
|
@@ -530,6 +570,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
|
|
|
return;
|
|
|
|
|
|
if (pci_vfs_assigned(bp->pdev)) {
|
|
|
+ bnxt_hwrm_fwd_async_event_cmpl(
|
|
|
+ bp, NULL,
|
|
|
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
|
|
|
netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
|
|
|
num_vfs);
|
|
|
} else {
|