Browse Source

nvme-rdma: fix possible double free condition when failing to create a controller

Failures after nvme_init_ctrl will defer resource cleanups to .free_ctrl
when the reference is released, hence we should not free the controller
queues for these failures.

Fix that by moving controller queues allocation before controller
initialization and correctly freeing them for failures before
initialization and skip them for failures after initialization.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Sagi Grimberg 7 years ago
parent
commit
3d0641015b
1 changed files with 10 additions and 10 deletions
  1. 10 10
      drivers/nvme/host/rdma.c

+ 10 - 10
drivers/nvme/host/rdma.c

@@ -888,9 +888,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
 	list_del(&ctrl->list);
 	list_del(&ctrl->list);
 	mutex_unlock(&nvme_rdma_ctrl_mutex);
 	mutex_unlock(&nvme_rdma_ctrl_mutex);
 
 
-	kfree(ctrl->queues);
 	nvmf_free_options(nctrl->opts);
 	nvmf_free_options(nctrl->opts);
 free_ctrl:
 free_ctrl:
+	kfree(ctrl->queues);
 	kfree(ctrl);
 	kfree(ctrl);
 }
 }
 
 
@@ -1932,11 +1932,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 		goto out_free_ctrl;
 		goto out_free_ctrl;
 	}
 	}
 
 
-	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
-				0 /* no quirks, we're perfect! */);
-	if (ret)
-		goto out_free_ctrl;
-
 	INIT_DELAYED_WORK(&ctrl->reconnect_work,
 	INIT_DELAYED_WORK(&ctrl->reconnect_work,
 			nvme_rdma_reconnect_ctrl_work);
 			nvme_rdma_reconnect_ctrl_work);
 	INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
 	INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1945,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
 				GFP_KERNEL);
 				GFP_KERNEL);
 	if (!ctrl->queues)
 	if (!ctrl->queues)
-		goto out_uninit_ctrl;
+		goto out_free_ctrl;
+
+	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+				0 /* no quirks, we're perfect! */);
+	if (ret)
+		goto out_kfree_queues;
 
 
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
 	WARN_ON_ONCE(!changed);
 	WARN_ON_ONCE(!changed);
 
 
 	ret = nvme_rdma_configure_admin_queue(ctrl, true);
 	ret = nvme_rdma_configure_admin_queue(ctrl, true);
 	if (ret)
 	if (ret)
-		goto out_kfree_queues;
+		goto out_uninit_ctrl;
 
 
 	/* sanity check icdoff */
 	/* sanity check icdoff */
 	if (ctrl->ctrl.icdoff) {
 	if (ctrl->ctrl.icdoff) {
@@ -2014,14 +2014,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 
 
 out_remove_admin_queue:
 out_remove_admin_queue:
 	nvme_rdma_destroy_admin_queue(ctrl, true);
 	nvme_rdma_destroy_admin_queue(ctrl, true);
-out_kfree_queues:
-	kfree(ctrl->queues);
 out_uninit_ctrl:
 out_uninit_ctrl:
 	nvme_uninit_ctrl(&ctrl->ctrl);
 	nvme_uninit_ctrl(&ctrl->ctrl);
 	nvme_put_ctrl(&ctrl->ctrl);
 	nvme_put_ctrl(&ctrl->ctrl);
 	if (ret > 0)
 	if (ret > 0)
 		ret = -EIO;
 		ret = -EIO;
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);
+out_kfree_queues:
+	kfree(ctrl->queues);
 out_free_ctrl:
 out_free_ctrl:
 	kfree(ctrl);
 	kfree(ctrl);
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);