|
@@ -83,6 +83,7 @@ struct nvme_rdma_request {
|
|
|
enum nvme_rdma_queue_flags {
|
|
|
NVME_RDMA_Q_CONNECTED = (1 << 0),
|
|
|
NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
|
|
|
+ NVME_RDMA_Q_DELETING = (1 << 2),
|
|
|
};
|
|
|
|
|
|
struct nvme_rdma_queue {
|
|
@@ -559,6 +560,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
|
|
|
|
|
|
queue = &ctrl->queues[idx];
|
|
|
queue->ctrl = ctrl;
|
|
|
+ queue->flags = 0;
|
|
|
init_completion(&queue->cm_done);
|
|
|
|
|
|
if (idx > 0)
|
|
@@ -616,7 +618,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
|
|
|
|
|
|
static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
|
|
|
{
|
|
|
- if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
|
|
|
+ if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
|
|
|
return;
|
|
|
nvme_rdma_stop_queue(queue);
|
|
|
nvme_rdma_free_queue(queue);
|
|
@@ -769,8 +771,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
|
|
{
|
|
|
struct nvme_rdma_ctrl *ctrl = container_of(work,
|
|
|
struct nvme_rdma_ctrl, err_work);
|
|
|
+ int i;
|
|
|
|
|
|
nvme_stop_keep_alive(&ctrl->ctrl);
|
|
|
+
|
|
|
+ for (i = 0; i < ctrl->queue_count; i++)
|
|
|
+ clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
|
|
|
+
|
|
|
if (ctrl->queue_count > 1)
|
|
|
nvme_stop_queues(&ctrl->ctrl);
|
|
|
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
|
|
@@ -1350,7 +1357,7 @@ static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
|
|
|
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
|
|
|
|
|
/* Disable the queue so ctrl delete won't free it */
|
|
|
- if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
|
|
|
+ if (!test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) {
|
|
|
/* Free this queue ourselves */
|
|
|
nvme_rdma_stop_queue(queue);
|
|
|
nvme_rdma_destroy_queue_ib(queue);
|