|
@@ -12,13 +12,11 @@
|
|
|
* more details.
|
|
|
*/
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
-#include <linux/delay.h>
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/err.h>
|
|
|
#include <linux/string.h>
|
|
|
-#include <linux/jiffies.h>
|
|
|
#include <linux/atomic.h>
|
|
|
#include <linux/blk-mq.h>
|
|
|
#include <linux/types.h>
|
|
@@ -26,7 +24,6 @@
|
|
|
#include <linux/mutex.h>
|
|
|
#include <linux/scatterlist.h>
|
|
|
#include <linux/nvme.h>
|
|
|
-#include <linux/t10-pi.h>
|
|
|
#include <asm/unaligned.h>
|
|
|
|
|
|
#include <rdma/ib_verbs.h>
|
|
@@ -169,7 +166,6 @@ MODULE_PARM_DESC(register_always,
|
|
|
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
|
|
struct rdma_cm_event *event);
|
|
|
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
|
|
-static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
|
|
|
|
|
|
/* XXX: really should move to a generic header sooner or later.. */
|
|
|
static inline void put_unaligned_le24(u32 val, u8 *p)
|
|
@@ -687,11 +683,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
|
|
list_del(&ctrl->list);
|
|
|
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
|
|
|
|
|
- if (ctrl->ctrl.tagset) {
|
|
|
- blk_cleanup_queue(ctrl->ctrl.connect_q);
|
|
|
- blk_mq_free_tag_set(&ctrl->tag_set);
|
|
|
- nvme_rdma_dev_put(ctrl->device);
|
|
|
- }
|
|
|
kfree(ctrl->queues);
|
|
|
nvmf_free_options(nctrl->opts);
|
|
|
free_ctrl:
|
|
@@ -748,8 +739,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
|
|
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
|
|
WARN_ON_ONCE(!changed);
|
|
|
|
|
|
- if (ctrl->queue_count > 1)
|
|
|
+ if (ctrl->queue_count > 1) {
|
|
|
nvme_start_queues(&ctrl->ctrl);
|
|
|
+ nvme_queue_scan(&ctrl->ctrl);
|
|
|
+ nvme_queue_async_events(&ctrl->ctrl);
|
|
|
+ }
|
|
|
|
|
|
dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
|
|
|
|
|
@@ -1269,7 +1263,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
|
|
|
{
|
|
|
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
|
|
|
struct rdma_conn_param param = { };
|
|
|
- struct nvme_rdma_cm_req priv;
|
|
|
+ struct nvme_rdma_cm_req priv = { };
|
|
|
int ret;
|
|
|
|
|
|
param.qp_num = queue->qp->qp_num;
|
|
@@ -1318,37 +1312,39 @@ out_destroy_queue_ib:
|
|
|
* that caught the event. Since we hold the callout until the controller
|
|
|
* deletion is completed, we'll deadlock if the controller deletion will
|
|
|
* call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
|
|
|
- * of destroying this queue before-hand, destroy the queue resources
|
|
|
- * after the controller deletion completed with the exception of destroying
|
|
|
- * the cm_id implicitely by returning a non-zero rc to the callout.
|
|
|
+ * of destroying this queue before-hand, destroy the queue resources,
|
|
|
+ * then queue the controller deletion which won't destroy this queue and
|
|
|
+ * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
|
|
|
*/
|
|
|
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
|
|
|
{
|
|
|
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
|
|
|
- int ret, ctrl_deleted = 0;
|
|
|
+ int ret;
|
|
|
|
|
|
- /* First disable the queue so ctrl delete won't free it */
|
|
|
- if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
|
|
|
- goto out;
|
|
|
+ /* Own the controller deletion */
|
|
|
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
|
|
|
+ return 0;
|
|
|
|
|
|
- /* delete the controller */
|
|
|
- ret = __nvme_rdma_del_ctrl(ctrl);
|
|
|
- if (!ret) {
|
|
|
- dev_warn(ctrl->ctrl.device,
|
|
|
- "Got rdma device removal event, deleting ctrl\n");
|
|
|
- flush_work(&ctrl->delete_work);
|
|
|
+ dev_warn(ctrl->ctrl.device,
|
|
|
+ "Got rdma device removal event, deleting ctrl\n");
|
|
|
|
|
|
- /* Return non-zero so the cm_id will destroy implicitly */
|
|
|
- ctrl_deleted = 1;
|
|
|
+ /* Get rid of reconnect work if its running */
|
|
|
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
|
|
|
|
|
|
+ /* Disable the queue so ctrl delete won't free it */
|
|
|
+ if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
|
|
|
/* Free this queue ourselves */
|
|
|
- rdma_disconnect(queue->cm_id);
|
|
|
- ib_drain_qp(queue->qp);
|
|
|
+ nvme_rdma_stop_queue(queue);
|
|
|
nvme_rdma_destroy_queue_ib(queue);
|
|
|
+
|
|
|
+ /* Return non-zero so the cm_id will destroy implicitly */
|
|
|
+ ret = 1;
|
|
|
}
|
|
|
|
|
|
-out:
|
|
|
- return ctrl_deleted;
|
|
|
+ /* Queue controller deletion */
|
|
|
+ queue_work(nvme_rdma_wq, &ctrl->delete_work);
|
|
|
+ flush_work(&ctrl->delete_work);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
|
@@ -1648,7 +1644,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
|
|
|
nvme_rdma_free_io_queues(ctrl);
|
|
|
}
|
|
|
|
|
|
- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
|
|
|
+ if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
|
|
|
nvme_shutdown_ctrl(&ctrl->ctrl);
|
|
|
|
|
|
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
|
|
@@ -1657,15 +1653,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
|
|
|
nvme_rdma_destroy_admin_queue(ctrl);
|
|
|
}
|
|
|
|
|
|
+static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
|
|
+{
|
|
|
+ nvme_uninit_ctrl(&ctrl->ctrl);
|
|
|
+ if (shutdown)
|
|
|
+ nvme_rdma_shutdown_ctrl(ctrl);
|
|
|
+
|
|
|
+ if (ctrl->ctrl.tagset) {
|
|
|
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
|
|
|
+ blk_mq_free_tag_set(&ctrl->tag_set);
|
|
|
+ nvme_rdma_dev_put(ctrl->device);
|
|
|
+ }
|
|
|
+
|
|
|
+ nvme_put_ctrl(&ctrl->ctrl);
|
|
|
+}
|
|
|
+
|
|
|
static void nvme_rdma_del_ctrl_work(struct work_struct *work)
|
|
|
{
|
|
|
struct nvme_rdma_ctrl *ctrl = container_of(work,
|
|
|
struct nvme_rdma_ctrl, delete_work);
|
|
|
|
|
|
- nvme_remove_namespaces(&ctrl->ctrl);
|
|
|
- nvme_rdma_shutdown_ctrl(ctrl);
|
|
|
- nvme_uninit_ctrl(&ctrl->ctrl);
|
|
|
- nvme_put_ctrl(&ctrl->ctrl);
|
|
|
+ __nvme_rdma_remove_ctrl(ctrl, true);
|
|
|
}
|
|
|
|
|
|
static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
|
|
@@ -1698,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
|
|
|
struct nvme_rdma_ctrl *ctrl = container_of(work,
|
|
|
struct nvme_rdma_ctrl, delete_work);
|
|
|
|
|
|
- nvme_remove_namespaces(&ctrl->ctrl);
|
|
|
- nvme_uninit_ctrl(&ctrl->ctrl);
|
|
|
- nvme_put_ctrl(&ctrl->ctrl);
|
|
|
+ __nvme_rdma_remove_ctrl(ctrl, false);
|
|
|
}
|
|
|
|
|
|
static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|
@@ -1739,6 +1745,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|
|
if (ctrl->queue_count > 1) {
|
|
|
nvme_start_queues(&ctrl->ctrl);
|
|
|
nvme_queue_scan(&ctrl->ctrl);
|
|
|
+ nvme_queue_async_events(&ctrl->ctrl);
|
|
|
}
|
|
|
|
|
|
return;
|