|
@@ -867,6 +867,14 @@ out_free_io_queues:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
|
|
|
|
+{
|
|
|
|
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
|
|
|
+
|
|
|
|
+ cancel_work_sync(&ctrl->err_work);
|
|
|
|
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
|
|
|
|
+}
|
|
|
|
+
|
|
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
|
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
|
{
|
|
{
|
|
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
|
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
|
@@ -1718,9 +1726,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
|
|
|
|
|
|
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
|
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
|
{
|
|
{
|
|
- cancel_work_sync(&ctrl->err_work);
|
|
|
|
- cancel_delayed_work_sync(&ctrl->reconnect_work);
|
|
|
|
-
|
|
|
|
if (ctrl->ctrl.queue_count > 1) {
|
|
if (ctrl->ctrl.queue_count > 1) {
|
|
nvme_stop_queues(&ctrl->ctrl);
|
|
nvme_stop_queues(&ctrl->ctrl);
|
|
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
|
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
|
@@ -1798,6 +1803,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
|
.submit_async_event = nvme_rdma_submit_async_event,
|
|
.submit_async_event = nvme_rdma_submit_async_event,
|
|
.delete_ctrl = nvme_rdma_delete_ctrl,
|
|
.delete_ctrl = nvme_rdma_delete_ctrl,
|
|
.get_address = nvmf_get_address,
|
|
.get_address = nvmf_get_address,
|
|
|
|
+ .stop_ctrl = nvme_rdma_stop_ctrl,
|
|
};
|
|
};
|
|
|
|
|
|
static inline bool
|
|
static inline bool
|