|
|
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
|
|
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
|
|
|
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
|
|
|
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
|
|
|
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
|
|
|
+ struct nvmet_rdma_rsp *r);
|
|
|
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
|
|
|
+ struct nvmet_rdma_rsp *r);
|
|
|
|
|
|
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
|
|
|
|
|
|
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
|
|
|
spin_unlock_irqrestore(&queue->rsps_lock, flags);
|
|
|
|
|
|
if (unlikely(!rsp)) {
|
|
|
- rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
|
|
|
if (unlikely(!rsp))
|
|
|
return NULL;
|
|
|
+ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
|
|
|
+ if (unlikely(ret)) {
|
|
|
+ kfree(rsp);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
rsp->allocated = true;
|
|
|
}
|
|
|
|
|
|
@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
|
|
|
unsigned long flags;
|
|
|
|
|
|
if (unlikely(rsp->allocated)) {
|
|
|
+ nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
|
|
|
kfree(rsp);
|
|
|
return;
|
|
|
}
|