|
@@ -39,68 +39,173 @@
|
|
|
|
|
|
#include "iscsi_iser.h"
|
|
|
|
|
|
-#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
|
|
|
+static void
|
|
|
+iser_free_bounce_sg(struct iser_data_buf *data)
|
|
|
+{
|
|
|
+ struct scatterlist *sg;
|
|
|
+ int count;
|
|
|
|
|
|
-/**
|
|
|
- * iser_start_rdma_unaligned_sg
|
|
|
- */
|
|
|
-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
|
|
- struct iser_data_buf *data,
|
|
|
- struct iser_data_buf *data_copy,
|
|
|
- enum iser_data_dir cmd_dir)
|
|
|
+ for_each_sg(data->sg, sg, data->size, count)
|
|
|
+ __free_page(sg_page(sg));
|
|
|
+
|
|
|
+ kfree(data->sg);
|
|
|
+
|
|
|
+ data->sg = data->orig_sg;
|
|
|
+ data->size = data->orig_size;
|
|
|
+ data->orig_sg = NULL;
|
|
|
+ data->orig_size = 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iser_alloc_bounce_sg(struct iser_data_buf *data)
|
|
|
{
|
|
|
- struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
|
|
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
|
|
struct scatterlist *sg;
|
|
|
- char *mem = NULL;
|
|
|
- unsigned long cmd_data_len = 0;
|
|
|
- int dma_nents, i;
|
|
|
+ struct page *page;
|
|
|
+ unsigned long length = data->data_len;
|
|
|
+ int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE);
|
|
|
|
|
|
- for_each_sg(sgl, sg, data->size, i)
|
|
|
- cmd_data_len += ib_sg_dma_len(dev, sg);
|
|
|
+ sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC);
|
|
|
+ if (!sg)
|
|
|
+ goto err;
|
|
|
|
|
|
- if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
|
|
|
- mem = (void *)__get_free_pages(GFP_ATOMIC,
|
|
|
- ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
|
|
|
- else
|
|
|
- mem = kmalloc(cmd_data_len, GFP_ATOMIC);
|
|
|
+ sg_init_table(sg, nents);
|
|
|
+ while (length) {
|
|
|
+ u32 page_len = min_t(u32, length, PAGE_SIZE);
|
|
|
|
|
|
- if (mem == NULL) {
|
|
|
- iser_err("Failed to allocate mem size %d %d for copying sglist\n",
|
|
|
- data->size, (int)cmd_data_len);
|
|
|
- return -ENOMEM;
|
|
|
+ page = alloc_page(GFP_ATOMIC);
|
|
|
+ if (!page)
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ sg_set_page(&sg[i], page, page_len, 0);
|
|
|
+ length -= page_len;
|
|
|
+ i++;
|
|
|
}
|
|
|
|
|
|
- if (cmd_dir == ISER_DIR_OUT) {
|
|
|
- /* copy the unaligned sg the buffer which is used for RDMA */
|
|
|
- char *p, *from;
|
|
|
-
|
|
|
- sgl = (struct scatterlist *)data->buf;
|
|
|
- p = mem;
|
|
|
- for_each_sg(sgl, sg, data->size, i) {
|
|
|
- from = kmap_atomic(sg_page(sg));
|
|
|
- memcpy(p,
|
|
|
- from + sg->offset,
|
|
|
- sg->length);
|
|
|
- kunmap_atomic(from);
|
|
|
- p += sg->length;
|
|
|
+ data->orig_sg = data->sg;
|
|
|
+ data->orig_size = data->size;
|
|
|
+ data->sg = sg;
|
|
|
+ data->size = nents;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err:
|
|
|
+ for (; i > 0; i--)
|
|
|
+ __free_page(sg_page(&sg[i - 1]));
|
|
|
+ kfree(sg);
|
|
|
+
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+iser_copy_bounce(struct iser_data_buf *data, bool to_buffer)
|
|
|
+{
|
|
|
+ struct scatterlist *osg, *bsg = data->sg;
|
|
|
+ void *oaddr, *baddr;
|
|
|
+ unsigned int left = data->data_len;
|
|
|
+ unsigned int bsg_off = 0;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_sg(data->orig_sg, osg, data->orig_size, i) {
|
|
|
+ unsigned int copy_len, osg_off = 0;
|
|
|
+
|
|
|
+ oaddr = kmap_atomic(sg_page(osg)) + osg->offset;
|
|
|
+ copy_len = min(left, osg->length);
|
|
|
+ while (copy_len) {
|
|
|
+ unsigned int len = min(copy_len, bsg->length - bsg_off);
|
|
|
+
|
|
|
+ baddr = kmap_atomic(sg_page(bsg)) + bsg->offset;
|
|
|
+ if (to_buffer)
|
|
|
+ memcpy(baddr + bsg_off, oaddr + osg_off, len);
|
|
|
+ else
|
|
|
+ memcpy(oaddr + osg_off, baddr + bsg_off, len);
|
|
|
+
|
|
|
+ kunmap_atomic(baddr - bsg->offset);
|
|
|
+ osg_off += len;
|
|
|
+ bsg_off += len;
|
|
|
+ copy_len -= len;
|
|
|
+
|
|
|
+ if (bsg_off >= bsg->length) {
|
|
|
+ bsg = sg_next(bsg);
|
|
|
+ bsg_off = 0;
|
|
|
+ }
|
|
|
}
|
|
|
+ kunmap_atomic(oaddr - osg->offset);
|
|
|
+ left -= osg_off;
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+iser_copy_from_bounce(struct iser_data_buf *data)
|
|
|
+{
|
|
|
+ iser_copy_bounce(data, false);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+iser_copy_to_bounce(struct iser_data_buf *data)
|
|
|
+{
|
|
|
+ iser_copy_bounce(data, true);
|
|
|
+}
|
|
|
+
|
|
|
+struct fast_reg_descriptor *
|
|
|
+iser_reg_desc_get(struct ib_conn *ib_conn)
|
|
|
+{
|
|
|
+ struct fast_reg_descriptor *desc;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&ib_conn->lock, flags);
|
|
|
+ desc = list_first_entry(&ib_conn->fastreg.pool,
|
|
|
+ struct fast_reg_descriptor, list);
|
|
|
+ list_del(&desc->list);
|
|
|
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
|
|
|
+
|
|
|
+ return desc;
|
|
|
+}
|
|
|
+
|
|
|
+void
|
|
|
+iser_reg_desc_put(struct ib_conn *ib_conn,
|
|
|
+ struct fast_reg_descriptor *desc)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
|
|
|
- data_copy->buf = &data_copy->sg_single;
|
|
|
- data_copy->size = 1;
|
|
|
- data_copy->copy_buf = mem;
|
|
|
+ spin_lock_irqsave(&ib_conn->lock, flags);
|
|
|
+ list_add(&desc->list, &ib_conn->fastreg.pool);
|
|
|
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
|
|
|
+}
|
|
|
|
|
|
- dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
|
|
|
- (cmd_dir == ISER_DIR_OUT) ?
|
|
|
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
- BUG_ON(dma_nents == 0);
|
|
|
+/**
|
|
|
+ * iser_start_rdma_unaligned_sg
|
|
|
+ */
|
|
|
+static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
|
|
+ struct iser_data_buf *data,
|
|
|
+ enum iser_data_dir cmd_dir)
|
|
|
+{
|
|
|
+ struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ rc = iser_alloc_bounce_sg(data);
|
|
|
+ if (rc) {
|
|
|
+ iser_err("Failed to allocate bounce for data len %lu\n",
|
|
|
+ data->data_len);
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cmd_dir == ISER_DIR_OUT)
|
|
|
+ iser_copy_to_bounce(data);
|
|
|
|
|
|
- data_copy->dma_nents = dma_nents;
|
|
|
- data_copy->data_len = cmd_data_len;
|
|
|
+ data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size,
|
|
|
+ (cmd_dir == ISER_DIR_OUT) ?
|
|
|
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
+ if (!data->dma_nents) {
|
|
|
+ iser_err("Got dma_nents %d, something went wrong...\n",
|
|
|
+ data->dma_nents);
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
|
+err:
|
|
|
+ iser_free_bounce_sg(data);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -109,51 +214,18 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
|
|
|
|
|
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
|
|
struct iser_data_buf *data,
|
|
|
- struct iser_data_buf *data_copy,
|
|
|
enum iser_data_dir cmd_dir)
|
|
|
{
|
|
|
- struct ib_device *dev;
|
|
|
- unsigned long cmd_data_len;
|
|
|
-
|
|
|
- dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
|
|
+ struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
|
|
|
|
|
- ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
|
|
|
+ ib_dma_unmap_sg(dev, data->sg, data->size,
|
|
|
(cmd_dir == ISER_DIR_OUT) ?
|
|
|
DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
|
|
|
- if (cmd_dir == ISER_DIR_IN) {
|
|
|
- char *mem;
|
|
|
- struct scatterlist *sgl, *sg;
|
|
|
- unsigned char *p, *to;
|
|
|
- unsigned int sg_size;
|
|
|
- int i;
|
|
|
-
|
|
|
- /* copy back read RDMA to unaligned sg */
|
|
|
- mem = data_copy->copy_buf;
|
|
|
-
|
|
|
- sgl = (struct scatterlist *)data->buf;
|
|
|
- sg_size = data->size;
|
|
|
-
|
|
|
- p = mem;
|
|
|
- for_each_sg(sgl, sg, sg_size, i) {
|
|
|
- to = kmap_atomic(sg_page(sg));
|
|
|
- memcpy(to + sg->offset,
|
|
|
- p,
|
|
|
- sg->length);
|
|
|
- kunmap_atomic(to);
|
|
|
- p += sg->length;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (cmd_dir == ISER_DIR_IN)
|
|
|
+ iser_copy_from_bounce(data);
|
|
|
|
|
|
- cmd_data_len = data->data_len;
|
|
|
-
|
|
|
- if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
|
|
|
- free_pages((unsigned long)data_copy->copy_buf,
|
|
|
- ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
|
|
|
- else
|
|
|
- kfree(data_copy->copy_buf);
|
|
|
-
|
|
|
- data_copy->copy_buf = NULL;
|
|
|
+ iser_free_bounce_sg(data);
|
|
|
}
|
|
|
|
|
|
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
|
|
@@ -175,7 +247,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
|
|
struct ib_device *ibdev, u64 *pages,
|
|
|
int *offset, int *data_size)
|
|
|
{
|
|
|
- struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
|
|
|
+ struct scatterlist *sg, *sgl = data->sg;
|
|
|
u64 start_addr, end_addr, page, chunk_start = 0;
|
|
|
unsigned long total_sz = 0;
|
|
|
unsigned int dma_len;
|
|
@@ -227,14 +299,14 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
|
|
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
|
|
struct ib_device *ibdev)
|
|
|
{
|
|
|
- struct scatterlist *sgl, *sg, *next_sg = NULL;
|
|
|
+ struct scatterlist *sg, *sgl, *next_sg = NULL;
|
|
|
u64 start_addr, end_addr;
|
|
|
int i, ret_len, start_check = 0;
|
|
|
|
|
|
if (data->dma_nents == 1)
|
|
|
return 1;
|
|
|
|
|
|
- sgl = (struct scatterlist *)data->buf;
|
|
|
+ sgl = data->sg;
|
|
|
start_addr = ib_sg_dma_address(ibdev, sgl);
|
|
|
|
|
|
for_each_sg(sgl, sg, data->dma_nents, i) {
|
|
@@ -266,11 +338,10 @@ static int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
|
|
static void iser_data_buf_dump(struct iser_data_buf *data,
|
|
|
struct ib_device *ibdev)
|
|
|
{
|
|
|
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
|
|
|
struct scatterlist *sg;
|
|
|
int i;
|
|
|
|
|
|
- for_each_sg(sgl, sg, data->dma_nents, i)
|
|
|
+ for_each_sg(data->sg, sg, data->dma_nents, i)
|
|
|
iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
|
|
|
"off:0x%x sz:0x%x dma_len:0x%x\n",
|
|
|
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
|
|
@@ -288,31 +359,6 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
|
|
|
iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
|
|
|
}
|
|
|
|
|
|
-static void iser_page_vec_build(struct iser_data_buf *data,
|
|
|
- struct iser_page_vec *page_vec,
|
|
|
- struct ib_device *ibdev)
|
|
|
-{
|
|
|
- int page_vec_len = 0;
|
|
|
-
|
|
|
- page_vec->length = 0;
|
|
|
- page_vec->offset = 0;
|
|
|
-
|
|
|
- iser_dbg("Translating sg sz: %d\n", data->dma_nents);
|
|
|
- page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
|
|
|
- &page_vec->offset,
|
|
|
- &page_vec->data_size);
|
|
|
- iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
|
|
|
-
|
|
|
- page_vec->length = page_vec_len;
|
|
|
-
|
|
|
- if (page_vec_len * SIZE_4K < page_vec->data_size) {
|
|
|
- iser_err("page_vec too short to hold this SG\n");
|
|
|
- iser_data_buf_dump(data, ibdev);
|
|
|
- iser_dump_page_vec(page_vec);
|
|
|
- BUG();
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
|
|
struct iser_data_buf *data,
|
|
|
enum iser_data_dir iser_dir,
|
|
@@ -323,7 +369,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
|
|
iser_task->dir[iser_dir] = 1;
|
|
|
dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
|
|
|
|
|
- data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
|
|
|
+ data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
|
|
|
if (data->dma_nents == 0) {
|
|
|
iser_err("dma_map_sg failed!!!\n");
|
|
|
return -EINVAL;
|
|
@@ -338,24 +384,41 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
|
|
|
struct ib_device *dev;
|
|
|
|
|
|
dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
|
|
- ib_dma_unmap_sg(dev, data->buf, data->size, dir);
|
|
|
+ ib_dma_unmap_sg(dev, data->sg, data->size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
|
|
|
+ struct iser_mem_reg *reg)
|
|
|
+{
|
|
|
+ struct scatterlist *sg = mem->sg;
|
|
|
+
|
|
|
+ reg->sge.lkey = device->mr->lkey;
|
|
|
+ reg->rkey = device->mr->rkey;
|
|
|
+ reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
|
|
|
+ reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
|
|
|
+
|
|
|
+ iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
|
|
|
+ " length=0x%x\n", reg->sge.lkey, reg->rkey,
|
|
|
+ reg->sge.addr, reg->sge.length);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
|
|
|
- struct ib_device *ibdev,
|
|
|
struct iser_data_buf *mem,
|
|
|
- struct iser_data_buf *mem_copy,
|
|
|
enum iser_data_dir cmd_dir,
|
|
|
int aligned_len)
|
|
|
{
|
|
|
- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
|
|
|
+ struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
|
|
|
+ struct iser_device *device = iser_task->iser_conn->ib_conn.device;
|
|
|
|
|
|
iscsi_conn->fmr_unalign_cnt++;
|
|
|
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
|
|
|
aligned_len, mem->size);
|
|
|
|
|
|
if (iser_debug_level > 0)
|
|
|
- iser_data_buf_dump(mem, ibdev);
|
|
|
+ iser_data_buf_dump(mem, device->ib_device);
|
|
|
|
|
|
/* unmap the command data before accessing it */
|
|
|
iser_dma_unmap_task_data(iser_task, mem,
|
|
@@ -364,12 +427,94 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
|
|
|
|
|
|
/* allocate copy buf, if we are writing, copy the */
|
|
|
/* unaligned scatterlist, dma map the copy */
|
|
|
- if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
|
|
|
+ if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * iser_reg_page_vec - Register physical memory
|
|
|
+ *
|
|
|
+ * returns: 0 on success, errno code on failure
|
|
|
+ */
|
|
|
+static
|
|
|
+int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
|
|
|
+ struct iser_data_buf *mem,
|
|
|
+ struct iser_page_vec *page_vec,
|
|
|
+ struct iser_mem_reg *mem_reg)
|
|
|
+{
|
|
|
+ struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
|
|
|
+ struct iser_device *device = ib_conn->device;
|
|
|
+ struct ib_pool_fmr *fmr;
|
|
|
+ int ret, plen;
|
|
|
+
|
|
|
+ plen = iser_sg_to_page_vec(mem, device->ib_device,
|
|
|
+ page_vec->pages,
|
|
|
+ &page_vec->offset,
|
|
|
+ &page_vec->data_size);
|
|
|
+ page_vec->length = plen;
|
|
|
+ if (plen * SIZE_4K < page_vec->data_size) {
|
|
|
+ iser_err("page vec too short to hold this SG\n");
|
|
|
+ iser_data_buf_dump(mem, device->ib_device);
|
|
|
+ iser_dump_page_vec(page_vec);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
|
|
|
+ page_vec->pages,
|
|
|
+ page_vec->length,
|
|
|
+ page_vec->pages[0]);
|
|
|
+ if (IS_ERR(fmr)) {
|
|
|
+ ret = PTR_ERR(fmr);
|
|
|
+ iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ mem_reg->sge.lkey = fmr->fmr->lkey;
|
|
|
+ mem_reg->rkey = fmr->fmr->rkey;
|
|
|
+ mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset;
|
|
|
+ mem_reg->sge.length = page_vec->data_size;
|
|
|
+ mem_reg->mem_h = fmr;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * Unregister (previosuly registered using FMR) memory.
|
|
|
+ * If memory is non-FMR does nothing.
|
|
|
+ */
|
|
|
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
|
|
|
+ enum iser_data_dir cmd_dir)
|
|
|
+{
|
|
|
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!reg->mem_h)
|
|
|
+ return;
|
|
|
+
|
|
|
+ iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
|
|
|
+
|
|
|
+ ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
|
|
|
+ if (ret)
|
|
|
+ iser_err("ib_fmr_pool_unmap failed %d\n", ret);
|
|
|
+
|
|
|
+ reg->mem_h = NULL;
|
|
|
+}
|
|
|
+
|
|
|
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
|
|
|
+ enum iser_data_dir cmd_dir)
|
|
|
+{
|
|
|
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
|
|
|
+
|
|
|
+ if (!reg->mem_h)
|
|
|
+ return;
|
|
|
+
|
|
|
+ iser_reg_desc_put(&iser_task->iser_conn->ib_conn,
|
|
|
+ reg->mem_h);
|
|
|
+ reg->mem_h = NULL;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
|
|
|
* using FMR (if possible) obtaining rkey and va
|
|
@@ -383,45 +528,29 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
|
|
|
struct iser_device *device = ib_conn->device;
|
|
|
struct ib_device *ibdev = device->ib_device;
|
|
|
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
|
|
|
- struct iser_regd_buf *regd_buf;
|
|
|
+ struct iser_mem_reg *mem_reg;
|
|
|
int aligned_len;
|
|
|
int err;
|
|
|
int i;
|
|
|
- struct scatterlist *sg;
|
|
|
|
|
|
- regd_buf = &iser_task->rdma_regd[cmd_dir];
|
|
|
+ mem_reg = &iser_task->rdma_reg[cmd_dir];
|
|
|
|
|
|
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
|
|
|
if (aligned_len != mem->dma_nents) {
|
|
|
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
|
|
|
- &iser_task->data_copy[cmd_dir],
|
|
|
+ err = fall_to_bounce_buf(iser_task, mem,
|
|
|
cmd_dir, aligned_len);
|
|
|
if (err) {
|
|
|
iser_err("failed to allocate bounce buffer\n");
|
|
|
return err;
|
|
|
}
|
|
|
- mem = &iser_task->data_copy[cmd_dir];
|
|
|
}
|
|
|
|
|
|
/* if there a single dma entry, FMR is not needed */
|
|
|
if (mem->dma_nents == 1) {
|
|
|
- sg = (struct scatterlist *)mem->buf;
|
|
|
-
|
|
|
- regd_buf->reg.lkey = device->mr->lkey;
|
|
|
- regd_buf->reg.rkey = device->mr->rkey;
|
|
|
- regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
|
|
|
- regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
|
|
|
-
|
|
|
- iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
|
|
|
- "va: 0x%08lX sz: %ld]\n",
|
|
|
- (unsigned int)regd_buf->reg.lkey,
|
|
|
- (unsigned int)regd_buf->reg.rkey,
|
|
|
- (unsigned long)regd_buf->reg.va,
|
|
|
- (unsigned long)regd_buf->reg.len);
|
|
|
+ return iser_reg_dma(device, mem, mem_reg);
|
|
|
} else { /* use FMR for multiple dma entries */
|
|
|
- iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
|
|
|
- err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
|
|
|
- ®d_buf->reg);
|
|
|
+ err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec,
|
|
|
+ mem_reg);
|
|
|
if (err && err != -EAGAIN) {
|
|
|
iser_data_buf_dump(mem, ibdev);
|
|
|
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
|
|
@@ -519,8 +648,10 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
|
|
|
|
|
|
static int
|
|
|
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
|
|
|
- struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
|
|
|
- struct ib_sge *prot_sge, struct ib_sge *sig_sge)
|
|
|
+ struct fast_reg_descriptor *desc,
|
|
|
+ struct iser_mem_reg *data_reg,
|
|
|
+ struct iser_mem_reg *prot_reg,
|
|
|
+ struct iser_mem_reg *sig_reg)
|
|
|
{
|
|
|
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
|
|
|
struct iser_pi_context *pi_ctx = desc->pi_ctx;
|
|
@@ -544,12 +675,12 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
|
|
|
memset(&sig_wr, 0, sizeof(sig_wr));
|
|
|
sig_wr.opcode = IB_WR_REG_SIG_MR;
|
|
|
sig_wr.wr_id = ISER_FASTREG_LI_WRID;
|
|
|
- sig_wr.sg_list = data_sge;
|
|
|
+ sig_wr.sg_list = &data_reg->sge;
|
|
|
sig_wr.num_sge = 1;
|
|
|
sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
|
|
|
sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
|
|
|
if (scsi_prot_sg_count(iser_task->sc))
|
|
|
- sig_wr.wr.sig_handover.prot = prot_sge;
|
|
|
+ sig_wr.wr.sig_handover.prot = &prot_reg->sge;
|
|
|
sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
|
|
|
IB_ACCESS_REMOTE_READ |
|
|
|
IB_ACCESS_REMOTE_WRITE;
|
|
@@ -566,27 +697,26 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
|
|
|
}
|
|
|
desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
|
|
|
|
|
|
- sig_sge->lkey = pi_ctx->sig_mr->lkey;
|
|
|
- sig_sge->addr = 0;
|
|
|
- sig_sge->length = scsi_transfer_length(iser_task->sc);
|
|
|
+ sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
|
|
|
+ sig_reg->rkey = pi_ctx->sig_mr->rkey;
|
|
|
+ sig_reg->sge.addr = 0;
|
|
|
+ sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
|
|
|
|
|
|
- iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
|
|
|
- sig_sge->addr, sig_sge->length,
|
|
|
- sig_sge->lkey);
|
|
|
+ iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
|
|
|
+ sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
|
|
|
+ sig_reg->sge.length);
|
|
|
err:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
|
|
- struct iser_regd_buf *regd_buf,
|
|
|
struct iser_data_buf *mem,
|
|
|
+ struct fast_reg_descriptor *desc,
|
|
|
enum iser_reg_indicator ind,
|
|
|
- struct ib_sge *sge)
|
|
|
+ struct iser_mem_reg *reg)
|
|
|
{
|
|
|
- struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
|
|
|
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
|
|
|
struct iser_device *device = ib_conn->device;
|
|
|
- struct ib_device *ibdev = device->ib_device;
|
|
|
struct ib_mr *mr;
|
|
|
struct ib_fast_reg_page_list *frpl;
|
|
|
struct ib_send_wr fastreg_wr, inv_wr;
|
|
@@ -594,17 +724,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
|
|
int ret, offset, size, plen;
|
|
|
|
|
|
/* if there a single dma entry, dma mr suffices */
|
|
|
- if (mem->dma_nents == 1) {
|
|
|
- struct scatterlist *sg = (struct scatterlist *)mem->buf;
|
|
|
-
|
|
|
- sge->lkey = device->mr->lkey;
|
|
|
- sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
|
|
|
- sge->length = ib_sg_dma_len(ibdev, &sg[0]);
|
|
|
-
|
|
|
- iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
|
|
|
- sge->lkey, sge->addr, sge->length);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ if (mem->dma_nents == 1)
|
|
|
+ return iser_reg_dma(device, mem, reg);
|
|
|
|
|
|
if (ind == ISER_DATA_KEY_VALID) {
|
|
|
mr = desc->data_mr;
|
|
@@ -652,9 +773,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
|
|
}
|
|
|
desc->reg_indicators &= ~ind;
|
|
|
|
|
|
- sge->lkey = mr->lkey;
|
|
|
- sge->addr = frpl->page_list[0] + offset;
|
|
|
- sge->length = size;
|
|
|
+ reg->sge.lkey = mr->lkey;
|
|
|
+ reg->rkey = mr->rkey;
|
|
|
+ reg->sge.addr = frpl->page_list[0] + offset;
|
|
|
+ reg->sge.length = size;
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -672,93 +794,66 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
|
|
|
struct iser_device *device = ib_conn->device;
|
|
|
struct ib_device *ibdev = device->ib_device;
|
|
|
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
|
|
|
- struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
|
|
|
+ struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir];
|
|
|
struct fast_reg_descriptor *desc = NULL;
|
|
|
- struct ib_sge data_sge;
|
|
|
int err, aligned_len;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
|
|
|
if (aligned_len != mem->dma_nents) {
|
|
|
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
|
|
|
- &iser_task->data_copy[cmd_dir],
|
|
|
+ err = fall_to_bounce_buf(iser_task, mem,
|
|
|
cmd_dir, aligned_len);
|
|
|
if (err) {
|
|
|
iser_err("failed to allocate bounce buffer\n");
|
|
|
return err;
|
|
|
}
|
|
|
- mem = &iser_task->data_copy[cmd_dir];
|
|
|
}
|
|
|
|
|
|
if (mem->dma_nents != 1 ||
|
|
|
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
|
|
|
- spin_lock_irqsave(&ib_conn->lock, flags);
|
|
|
- desc = list_first_entry(&ib_conn->fastreg.pool,
|
|
|
- struct fast_reg_descriptor, list);
|
|
|
- list_del(&desc->list);
|
|
|
- spin_unlock_irqrestore(&ib_conn->lock, flags);
|
|
|
- regd_buf->reg.mem_h = desc;
|
|
|
+ desc = iser_reg_desc_get(ib_conn);
|
|
|
+ mem_reg->mem_h = desc;
|
|
|
}
|
|
|
|
|
|
- err = iser_fast_reg_mr(iser_task, regd_buf, mem,
|
|
|
- ISER_DATA_KEY_VALID, &data_sge);
|
|
|
+ err = iser_fast_reg_mr(iser_task, mem, desc,
|
|
|
+ ISER_DATA_KEY_VALID, mem_reg);
|
|
|
if (err)
|
|
|
goto err_reg;
|
|
|
|
|
|
if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
|
|
|
- struct ib_sge prot_sge, sig_sge;
|
|
|
+ struct iser_mem_reg prot_reg;
|
|
|
|
|
|
- memset(&prot_sge, 0, sizeof(prot_sge));
|
|
|
+ memset(&prot_reg, 0, sizeof(prot_reg));
|
|
|
if (scsi_prot_sg_count(iser_task->sc)) {
|
|
|
mem = &iser_task->prot[cmd_dir];
|
|
|
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
|
|
|
if (aligned_len != mem->dma_nents) {
|
|
|
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
|
|
|
- &iser_task->prot_copy[cmd_dir],
|
|
|
+ err = fall_to_bounce_buf(iser_task, mem,
|
|
|
cmd_dir, aligned_len);
|
|
|
if (err) {
|
|
|
iser_err("failed to allocate bounce buffer\n");
|
|
|
return err;
|
|
|
}
|
|
|
- mem = &iser_task->prot_copy[cmd_dir];
|
|
|
}
|
|
|
|
|
|
- err = iser_fast_reg_mr(iser_task, regd_buf, mem,
|
|
|
- ISER_PROT_KEY_VALID, &prot_sge);
|
|
|
+ err = iser_fast_reg_mr(iser_task, mem, desc,
|
|
|
+ ISER_PROT_KEY_VALID, &prot_reg);
|
|
|
if (err)
|
|
|
goto err_reg;
|
|
|
}
|
|
|
|
|
|
- err = iser_reg_sig_mr(iser_task, desc, &data_sge,
|
|
|
- &prot_sge, &sig_sge);
|
|
|
+ err = iser_reg_sig_mr(iser_task, desc, mem_reg,
|
|
|
+ &prot_reg, mem_reg);
|
|
|
if (err) {
|
|
|
iser_err("Failed to register signature mr\n");
|
|
|
return err;
|
|
|
}
|
|
|
desc->reg_indicators |= ISER_FASTREG_PROTECTED;
|
|
|
-
|
|
|
- regd_buf->reg.lkey = sig_sge.lkey;
|
|
|
- regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
|
|
|
- regd_buf->reg.va = sig_sge.addr;
|
|
|
- regd_buf->reg.len = sig_sge.length;
|
|
|
- } else {
|
|
|
- if (desc)
|
|
|
- regd_buf->reg.rkey = desc->data_mr->rkey;
|
|
|
- else
|
|
|
- regd_buf->reg.rkey = device->mr->rkey;
|
|
|
-
|
|
|
- regd_buf->reg.lkey = data_sge.lkey;
|
|
|
- regd_buf->reg.va = data_sge.addr;
|
|
|
- regd_buf->reg.len = data_sge.length;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
err_reg:
|
|
|
- if (desc) {
|
|
|
- spin_lock_irqsave(&ib_conn->lock, flags);
|
|
|
- list_add_tail(&desc->list, &ib_conn->fastreg.pool);
|
|
|
- spin_unlock_irqrestore(&ib_conn->lock, flags);
|
|
|
- }
|
|
|
+ if (desc)
|
|
|
+ iser_reg_desc_put(ib_conn, desc);
|
|
|
|
|
|
return err;
|
|
|
}
|