|
@@ -88,113 +88,6 @@ int iser_assign_reg_ops(struct iser_device *device)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-iser_free_bounce_sg(struct iser_data_buf *data)
|
|
|
-{
|
|
|
- struct scatterlist *sg;
|
|
|
- int count;
|
|
|
-
|
|
|
- for_each_sg(data->sg, sg, data->size, count)
|
|
|
- __free_page(sg_page(sg));
|
|
|
-
|
|
|
- kfree(data->sg);
|
|
|
-
|
|
|
- data->sg = data->orig_sg;
|
|
|
- data->size = data->orig_size;
|
|
|
- data->orig_sg = NULL;
|
|
|
- data->orig_size = 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int
|
|
|
-iser_alloc_bounce_sg(struct iser_data_buf *data)
|
|
|
-{
|
|
|
- struct scatterlist *sg;
|
|
|
- struct page *page;
|
|
|
- unsigned long length = data->data_len;
|
|
|
- int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE);
|
|
|
-
|
|
|
- sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC);
|
|
|
- if (!sg)
|
|
|
- goto err;
|
|
|
-
|
|
|
- sg_init_table(sg, nents);
|
|
|
- while (length) {
|
|
|
- u32 page_len = min_t(u32, length, PAGE_SIZE);
|
|
|
-
|
|
|
- page = alloc_page(GFP_ATOMIC);
|
|
|
- if (!page)
|
|
|
- goto err;
|
|
|
-
|
|
|
- sg_set_page(&sg[i], page, page_len, 0);
|
|
|
- length -= page_len;
|
|
|
- i++;
|
|
|
- }
|
|
|
-
|
|
|
- data->orig_sg = data->sg;
|
|
|
- data->orig_size = data->size;
|
|
|
- data->sg = sg;
|
|
|
- data->size = nents;
|
|
|
-
|
|
|
- return 0;
|
|
|
-
|
|
|
-err:
|
|
|
- for (; i > 0; i--)
|
|
|
- __free_page(sg_page(&sg[i - 1]));
|
|
|
- kfree(sg);
|
|
|
-
|
|
|
- return -ENOMEM;
|
|
|
-}
|
|
|
-
|
|
|
-static void
|
|
|
-iser_copy_bounce(struct iser_data_buf *data, bool to_buffer)
|
|
|
-{
|
|
|
- struct scatterlist *osg, *bsg = data->sg;
|
|
|
- void *oaddr, *baddr;
|
|
|
- unsigned int left = data->data_len;
|
|
|
- unsigned int bsg_off = 0;
|
|
|
- int i;
|
|
|
-
|
|
|
- for_each_sg(data->orig_sg, osg, data->orig_size, i) {
|
|
|
- unsigned int copy_len, osg_off = 0;
|
|
|
-
|
|
|
- oaddr = kmap_atomic(sg_page(osg)) + osg->offset;
|
|
|
- copy_len = min(left, osg->length);
|
|
|
- while (copy_len) {
|
|
|
- unsigned int len = min(copy_len, bsg->length - bsg_off);
|
|
|
-
|
|
|
- baddr = kmap_atomic(sg_page(bsg)) + bsg->offset;
|
|
|
- if (to_buffer)
|
|
|
- memcpy(baddr + bsg_off, oaddr + osg_off, len);
|
|
|
- else
|
|
|
- memcpy(oaddr + osg_off, baddr + bsg_off, len);
|
|
|
-
|
|
|
- kunmap_atomic(baddr - bsg->offset);
|
|
|
- osg_off += len;
|
|
|
- bsg_off += len;
|
|
|
- copy_len -= len;
|
|
|
-
|
|
|
- if (bsg_off >= bsg->length) {
|
|
|
- bsg = sg_next(bsg);
|
|
|
- bsg_off = 0;
|
|
|
- }
|
|
|
- }
|
|
|
- kunmap_atomic(oaddr - osg->offset);
|
|
|
- left -= osg_off;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-iser_copy_from_bounce(struct iser_data_buf *data)
|
|
|
-{
|
|
|
- iser_copy_bounce(data, false);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-iser_copy_to_bounce(struct iser_data_buf *data)
|
|
|
-{
|
|
|
- iser_copy_bounce(data, true);
|
|
|
-}
|
|
|
-
|
|
|
struct iser_fr_desc *
|
|
|
iser_reg_desc_get_fr(struct ib_conn *ib_conn)
|
|
|
{
|
|
@@ -238,62 +131,6 @@ iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * iser_start_rdma_unaligned_sg
|
|
|
- */
|
|
|
-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
|
|
- struct iser_data_buf *data,
|
|
|
- enum iser_data_dir cmd_dir)
|
|
|
-{
|
|
|
- struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
|
|
- int rc;
|
|
|
-
|
|
|
- rc = iser_alloc_bounce_sg(data);
|
|
|
- if (rc) {
|
|
|
- iser_err("Failed to allocate bounce for data len %lu\n",
|
|
|
- data->data_len);
|
|
|
- return rc;
|
|
|
- }
|
|
|
-
|
|
|
- if (cmd_dir == ISER_DIR_OUT)
|
|
|
- iser_copy_to_bounce(data);
|
|
|
-
|
|
|
- data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size,
|
|
|
- (cmd_dir == ISER_DIR_OUT) ?
|
|
|
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
- if (!data->dma_nents) {
|
|
|
- iser_err("Got dma_nents %d, something went wrong...\n",
|
|
|
- data->dma_nents);
|
|
|
- rc = -ENOMEM;
|
|
|
- goto err;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-err:
|
|
|
- iser_free_bounce_sg(data);
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * iser_finalize_rdma_unaligned_sg
|
|
|
- */
|
|
|
-
|
|
|
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
|
|
|
- struct iser_data_buf *data,
|
|
|
- enum iser_data_dir cmd_dir)
|
|
|
-{
|
|
|
- struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
|
|
-
|
|
|
- ib_dma_unmap_sg(dev, data->sg, data->size,
|
|
|
- (cmd_dir == ISER_DIR_OUT) ?
|
|
|
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
-
|
|
|
- if (cmd_dir == ISER_DIR_IN)
|
|
|
- iser_copy_from_bounce(data);
|
|
|
-
|
|
|
- iser_free_bounce_sg(data);
|
|
|
-}
|
|
|
-
|
|
|
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
|
|
|
|
|
|
/**
|
|
@@ -355,64 +192,6 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
|
|
|
return cur_page;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-/**
|
|
|
- * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
|
|
|
- * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
|
|
|
- * the number of entries which are aligned correctly. Supports the case where
|
|
|
- * consecutive SG elements are actually fragments of the same physcial page.
|
|
|
- */
|
|
|
-static int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
|
|
- struct ib_device *ibdev,
|
|
|
- unsigned sg_tablesize)
|
|
|
-{
|
|
|
- struct scatterlist *sg, *sgl, *next_sg = NULL;
|
|
|
- u64 start_addr, end_addr;
|
|
|
- int i, ret_len, start_check = 0;
|
|
|
-
|
|
|
- if (data->dma_nents == 1)
|
|
|
- return 1;
|
|
|
-
|
|
|
- sgl = data->sg;
|
|
|
- start_addr = ib_sg_dma_address(ibdev, sgl);
|
|
|
-
|
|
|
- if (unlikely(sgl[0].offset &&
|
|
|
- data->data_len >= sg_tablesize * PAGE_SIZE)) {
|
|
|
- iser_dbg("can't register length %lx with offset %x "
|
|
|
- "fall to bounce buffer\n", data->data_len,
|
|
|
- sgl[0].offset);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- for_each_sg(sgl, sg, data->dma_nents, i) {
|
|
|
- if (start_check && !IS_4K_ALIGNED(start_addr))
|
|
|
- break;
|
|
|
-
|
|
|
- next_sg = sg_next(sg);
|
|
|
- if (!next_sg)
|
|
|
- break;
|
|
|
-
|
|
|
- end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
|
|
|
- start_addr = ib_sg_dma_address(ibdev, next_sg);
|
|
|
-
|
|
|
- if (end_addr == start_addr) {
|
|
|
- start_check = 0;
|
|
|
- continue;
|
|
|
- } else
|
|
|
- start_check = 1;
|
|
|
-
|
|
|
- if (!IS_4K_ALIGNED(end_addr))
|
|
|
- break;
|
|
|
- }
|
|
|
- ret_len = (next_sg) ? i : i+1;
|
|
|
-
|
|
|
- if (unlikely(ret_len != data->dma_nents))
|
|
|
- iser_warn("rdma alignment violation (%d/%d aligned)\n",
|
|
|
- ret_len, data->dma_nents);
|
|
|
-
|
|
|
- return ret_len;
|
|
|
-}
|
|
|
-
|
|
|
static void iser_data_buf_dump(struct iser_data_buf *data,
|
|
|
struct ib_device *ibdev)
|
|
|
{
|
|
@@ -483,31 +262,6 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
|
|
|
- struct iser_data_buf *mem,
|
|
|
- enum iser_data_dir cmd_dir)
|
|
|
-{
|
|
|
- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
|
|
|
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
|
|
|
-
|
|
|
- iscsi_conn->fmr_unalign_cnt++;
|
|
|
-
|
|
|
- if (iser_debug_level > 0)
|
|
|
- iser_data_buf_dump(mem, device->ib_device);
|
|
|
-
|
|
|
- /* unmap the command data before accessing it */
|
|
|
- iser_dma_unmap_task_data(iser_task, mem,
|
|
|
- (cmd_dir == ISER_DIR_OUT) ?
|
|
|
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
-
|
|
|
- /* allocate copy buf, if we are writing, copy the */
|
|
|
- /* unaligned scatterlist, dma map the copy */
|
|
|
- if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* iser_reg_page_vec - Register physical memory
|
|
|
*
|
|
@@ -779,26 +533,6 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int
|
|
|
-iser_handle_unaligned_buf(struct iscsi_iser_task *task,
|
|
|
- struct iser_data_buf *mem,
|
|
|
- enum iser_data_dir dir)
|
|
|
-{
|
|
|
- struct iser_conn *iser_conn = task->iser_conn;
|
|
|
- struct iser_device *device = iser_conn->ib_conn.device;
|
|
|
- int err, aligned_len;
|
|
|
-
|
|
|
- aligned_len = iser_data_buf_aligned_len(mem, device->ib_device,
|
|
|
- iser_conn->scsi_sg_tablesize);
|
|
|
- if (aligned_len != mem->dma_nents) {
|
|
|
- err = fall_to_bounce_buf(task, mem, dir);
|
|
|
- if (err)
|
|
|
- return err;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static int
|
|
|
iser_reg_prot_sg(struct iscsi_iser_task *task,
|
|
|
struct iser_data_buf *mem,
|
|
@@ -841,10 +575,6 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
|
|
|
bool use_dma_key;
|
|
|
int err;
|
|
|
|
|
|
- err = iser_handle_unaligned_buf(task, mem, dir);
|
|
|
- if (unlikely(err))
|
|
|
- return err;
|
|
|
-
|
|
|
use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
|
|
|
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
|
|
|
|
|
@@ -867,10 +597,6 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
|
|
|
|
|
|
if (scsi_prot_sg_count(task->sc)) {
|
|
|
mem = &task->prot[dir];
|
|
|
- err = iser_handle_unaligned_buf(task, mem, dir);
|
|
|
- if (unlikely(err))
|
|
|
- goto err_reg;
|
|
|
-
|
|
|
err = iser_reg_prot_sg(task, mem, desc,
|
|
|
use_dma_key, prot_reg);
|
|
|
if (unlikely(err))
|