|
@@ -43,10 +43,6 @@
|
|
|
|
|
|
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
|
|
|
|
|
|
-#define NVME_RDMA_MAX_PAGES_PER_MR 512
|
|
|
-
|
|
|
-#define NVME_RDMA_DEF_RECONNECT_DELAY 20
|
|
|
-
|
|
|
/*
|
|
|
* We handle AEN commands ourselves and don't even let the
|
|
|
* block layer know about them.
|
|
@@ -77,7 +73,6 @@ struct nvme_rdma_request {
|
|
|
u32 num_sge;
|
|
|
int nents;
|
|
|
bool inline_data;
|
|
|
- bool need_inval;
|
|
|
struct ib_reg_wr reg_wr;
|
|
|
struct ib_cqe reg_cqe;
|
|
|
struct nvme_rdma_queue *queue;
|
|
@@ -286,7 +281,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
|
|
|
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (!req->need_inval)
|
|
|
+ if (!req->mr->need_inval)
|
|
|
goto out;
|
|
|
|
|
|
ib_dereg_mr(req->mr);
|
|
@@ -298,7 +293,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
|
|
|
req->mr = NULL;
|
|
|
}
|
|
|
|
|
|
- req->need_inval = false;
|
|
|
+ req->mr->need_inval = false;
|
|
|
|
|
|
out:
|
|
|
return ret;
|
|
@@ -645,7 +640,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
|
|
|
int i, ret;
|
|
|
|
|
|
for (i = 1; i < ctrl->queue_count; i++) {
|
|
|
- ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
|
|
|
+ ret = nvme_rdma_init_queue(ctrl, i,
|
|
|
+ ctrl->ctrl.opts->queue_size);
|
|
|
if (ret) {
|
|
|
dev_info(ctrl->ctrl.device,
|
|
|
"failed to initialize i/o queue: %d\n", ret);
|
|
@@ -849,7 +845,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
|
|
|
if (!blk_rq_bytes(rq))
|
|
|
return;
|
|
|
|
|
|
- if (req->need_inval) {
|
|
|
+ if (req->mr->need_inval) {
|
|
|
res = nvme_rdma_inv_rkey(queue, req);
|
|
|
if (res < 0) {
|
|
|
dev_err(ctrl->ctrl.device,
|
|
@@ -935,7 +931,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
|
|
|
IB_ACCESS_REMOTE_READ |
|
|
|
IB_ACCESS_REMOTE_WRITE;
|
|
|
|
|
|
- req->need_inval = true;
|
|
|
+ req->mr->need_inval = true;
|
|
|
|
|
|
sg->addr = cpu_to_le64(req->mr->iova);
|
|
|
put_unaligned_le24(req->mr->length, sg->length);
|
|
@@ -958,7 +954,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
|
|
|
|
|
|
req->num_sge = 1;
|
|
|
req->inline_data = false;
|
|
|
- req->need_inval = false;
|
|
|
+ req->mr->need_inval = false;
|
|
|
|
|
|
c->common.flags |= NVME_CMD_SGL_METABUF;
|
|
|
|
|
@@ -1145,7 +1141,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
|
|
|
|
|
|
if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
|
|
|
wc->ex.invalidate_rkey == req->mr->rkey)
|
|
|
- req->need_inval = false;
|
|
|
+ req->mr->need_inval = false;
|
|
|
|
|
|
blk_mq_complete_request(rq, status);
|
|
|
|
|
@@ -1278,8 +1274,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
|
|
|
|
|
|
priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
|
|
|
priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
|
|
|
- priv.hrqsize = cpu_to_le16(queue->queue_size);
|
|
|
- priv.hsqsize = cpu_to_le16(queue->queue_size);
|
|
|
+ /*
|
|
|
+ * set the admin queue depth to the minimum size
|
|
|
+ * specified by the Fabrics standard.
|
|
|
+ */
|
|
|
+ if (priv.qid == 0) {
|
|
|
+ priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
|
|
|
+ priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * current interpretation of the fabrics spec
|
|
|
+ * is at minimum you make hrqsize sqsize+1, or a
|
|
|
+ * 1's based representation of sqsize.
|
|
|
+ */
|
|
|
+ priv.hrqsize = cpu_to_le16(queue->queue_size);
|
|
|
+ priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
|
|
|
+ }
|
|
|
|
|
|
ret = rdma_connect(queue->cm_id, ¶m);
|
|
|
if (ret) {
|
|
@@ -1319,7 +1329,7 @@ out_destroy_queue_ib:
|
|
|
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
|
|
|
{
|
|
|
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
|
|
|
- int ret;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
/* Own the controller deletion */
|
|
|
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
|
|
@@ -1461,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
|
if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
|
|
|
flush = true;
|
|
|
ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
|
|
|
- req->need_inval ? &req->reg_wr.wr : NULL, flush);
|
|
|
+ req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
|
|
|
if (ret) {
|
|
|
nvme_rdma_unmap_data(queue, rq);
|
|
|
goto err;
|
|
@@ -1816,7 +1826,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
|
|
|
|
|
|
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
|
|
|
ctrl->tag_set.ops = &nvme_rdma_mq_ops;
|
|
|
- ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
|
|
|
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
|
|
|
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
|
|
|
ctrl->tag_set.numa_node = NUMA_NO_NODE;
|
|
|
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
|
@@ -1914,7 +1924,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|
|
spin_lock_init(&ctrl->lock);
|
|
|
|
|
|
ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
|
|
|
- ctrl->ctrl.sqsize = opts->queue_size;
|
|
|
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
|
|
|
ctrl->ctrl.kato = opts->kato;
|
|
|
|
|
|
ret = -ENOMEM;
|