|
@@ -69,6 +69,8 @@
|
|
/*
|
|
/*
|
|
* internal functions
|
|
* internal functions
|
|
*/
|
|
*/
|
|
|
|
+static void rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf);
|
|
|
|
+static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
|
|
|
|
|
|
static struct workqueue_struct *rpcrdma_receive_wq;
|
|
static struct workqueue_struct *rpcrdma_receive_wq;
|
|
|
|
|
|
@@ -262,6 +264,21 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
|
__func__, ep);
|
|
__func__, ep);
|
|
complete(&ia->ri_done);
|
|
complete(&ia->ri_done);
|
|
break;
|
|
break;
|
|
|
|
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
|
|
|
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
|
|
|
+ pr_info("rpcrdma: removing device for %pIS:%u\n",
|
|
|
|
+ sap, rpc_get_port(sap));
|
|
|
|
+#endif
|
|
|
|
+ set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
|
|
|
|
+ ep->rep_connected = -ENODEV;
|
|
|
|
+ xprt_force_disconnect(&xprt->rx_xprt);
|
|
|
|
+ wait_for_completion(&ia->ri_remove_done);
|
|
|
|
+
|
|
|
|
+ ia->ri_id = NULL;
|
|
|
|
+ ia->ri_pd = NULL;
|
|
|
|
+ ia->ri_device = NULL;
|
|
|
|
+ /* Return 1 to ensure the core destroys the id. */
|
|
|
|
+ return 1;
|
|
case RDMA_CM_EVENT_ESTABLISHED:
|
|
case RDMA_CM_EVENT_ESTABLISHED:
|
|
connstate = 1;
|
|
connstate = 1;
|
|
ib_query_qp(ia->ri_id->qp, attr,
|
|
ib_query_qp(ia->ri_id->qp, attr,
|
|
@@ -291,9 +308,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
|
goto connected;
|
|
goto connected;
|
|
case RDMA_CM_EVENT_DISCONNECTED:
|
|
case RDMA_CM_EVENT_DISCONNECTED:
|
|
connstate = -ECONNABORTED;
|
|
connstate = -ECONNABORTED;
|
|
- goto connected;
|
|
|
|
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
|
|
|
- connstate = -ENODEV;
|
|
|
|
connected:
|
|
connected:
|
|
dprintk("RPC: %s: %sconnected\n",
|
|
dprintk("RPC: %s: %sconnected\n",
|
|
__func__, connstate > 0 ? "" : "dis");
|
|
__func__, connstate > 0 ? "" : "dis");
|
|
@@ -346,6 +360,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
init_completion(&ia->ri_done);
|
|
init_completion(&ia->ri_done);
|
|
|
|
+ init_completion(&ia->ri_remove_done);
|
|
|
|
|
|
id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
|
|
id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
|
|
IB_QPT_RC);
|
|
IB_QPT_RC);
|
|
@@ -468,6 +483,56 @@ out_err:
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * rpcrdma_ia_remove - Handle device driver unload
|
|
|
|
+ * @ia: interface adapter being removed
|
|
|
|
+ *
|
|
|
|
+ * Divest transport H/W resources associated with this adapter,
|
|
|
|
+ * but allow it to be restored later.
|
|
|
|
+ */
|
|
|
|
+void
|
|
|
|
+rpcrdma_ia_remove(struct rpcrdma_ia *ia)
|
|
|
|
+{
|
|
|
|
+ struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
|
|
|
|
+ rx_ia);
|
|
|
|
+ struct rpcrdma_ep *ep = &r_xprt->rx_ep;
|
|
|
|
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
|
|
|
+ struct rpcrdma_req *req;
|
|
|
|
+ struct rpcrdma_rep *rep;
|
|
|
|
+
|
|
|
|
+ cancel_delayed_work_sync(&buf->rb_refresh_worker);
|
|
|
|
+
|
|
|
|
+ /* This is similar to rpcrdma_ep_destroy, but:
|
|
|
|
+ * - Don't cancel the connect worker.
|
|
|
|
+ * - Don't call rpcrdma_ep_disconnect, which waits
|
|
|
|
+ * for another conn upcall, which will deadlock.
|
|
|
|
+ * - rdma_disconnect is unneeded, the underlying
|
|
|
|
+ * connection is already gone.
|
|
|
|
+ */
|
|
|
|
+ if (ia->ri_id->qp) {
|
|
|
|
+ ib_drain_qp(ia->ri_id->qp);
|
|
|
|
+ rdma_destroy_qp(ia->ri_id);
|
|
|
|
+ ia->ri_id->qp = NULL;
|
|
|
|
+ }
|
|
|
|
+ ib_free_cq(ep->rep_attr.recv_cq);
|
|
|
|
+ ib_free_cq(ep->rep_attr.send_cq);
|
|
|
|
+
|
|
|
|
+ /* The ULP is responsible for ensuring all DMA
|
|
|
|
+ * mappings and MRs are gone.
|
|
|
|
+ */
|
|
|
|
+ list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
|
|
|
|
+ rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf);
|
|
|
|
+ list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
|
|
|
|
+ rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf);
|
|
|
|
+ rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
|
|
|
|
+ rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
|
|
|
|
+ }
|
|
|
|
+ rpcrdma_destroy_mrs(buf);
|
|
|
|
+
|
|
|
|
+ /* Allow waiters to continue */
|
|
|
|
+ complete(&ia->ri_remove_done);
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* rpcrdma_ia_close - Clean up/close an IA.
|
|
* rpcrdma_ia_close - Clean up/close an IA.
|
|
* @ia: interface adapter to close
|
|
* @ia: interface adapter to close
|
|
@@ -1080,7 +1145,8 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
|
|
|
|
|
|
out_nomws:
|
|
out_nomws:
|
|
dprintk("RPC: %s: no MWs available\n", __func__);
|
|
dprintk("RPC: %s: no MWs available\n", __func__);
|
|
- schedule_delayed_work(&buf->rb_refresh_worker, 0);
|
|
|
|
|
|
+ if (r_xprt->rx_ep.rep_connected != -ENODEV)
|
|
|
|
+ schedule_delayed_work(&buf->rb_refresh_worker, 0);
|
|
|
|
|
|
/* Allow the reply handler and refresh worker to run */
|
|
/* Allow the reply handler and refresh worker to run */
|
|
cond_resched();
|
|
cond_resched();
|