|
@@ -1697,31 +1697,12 @@ static int
|
|
|
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
|
|
|
{
|
|
|
struct scatterlist *sg;
|
|
|
- struct page *page;
|
|
|
unsigned int nent;
|
|
|
- u32 page_len, length;
|
|
|
- int i = 0;
|
|
|
|
|
|
- length = fod->req.transfer_len;
|
|
|
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
|
|
|
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
|
|
|
+ sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
|
|
|
if (!sg)
|
|
|
goto out;
|
|
|
|
|
|
- sg_init_table(sg, nent);
|
|
|
-
|
|
|
- while (length) {
|
|
|
- page_len = min_t(u32, length, PAGE_SIZE);
|
|
|
-
|
|
|
- page = alloc_page(GFP_KERNEL);
|
|
|
- if (!page)
|
|
|
- goto out_free_pages;
|
|
|
-
|
|
|
- sg_set_page(&sg[i], page, page_len, 0);
|
|
|
- length -= page_len;
|
|
|
- i++;
|
|
|
- }
|
|
|
-
|
|
|
fod->data_sg = sg;
|
|
|
fod->data_sg_cnt = nent;
|
|
|
fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
|
|
@@ -1731,14 +1712,6 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-out_free_pages:
|
|
|
- while (i > 0) {
|
|
|
- i--;
|
|
|
- __free_page(sg_page(&sg[i]));
|
|
|
- }
|
|
|
- kfree(sg);
|
|
|
- fod->data_sg = NULL;
|
|
|
- fod->data_sg_cnt = 0;
|
|
|
out:
|
|
|
return NVME_SC_INTERNAL;
|
|
|
}
|
|
@@ -1746,18 +1719,13 @@ out:
|
|
|
static void
|
|
|
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
|
|
|
{
|
|
|
- struct scatterlist *sg;
|
|
|
- int count;
|
|
|
-
|
|
|
if (!fod->data_sg || !fod->data_sg_cnt)
|
|
|
return;
|
|
|
|
|
|
fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
|
|
|
((fod->io_dir == NVMET_FCP_WRITE) ?
|
|
|
DMA_FROM_DEVICE : DMA_TO_DEVICE));
|
|
|
- for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
|
|
|
- __free_page(sg_page(sg));
|
|
|
- kfree(fod->data_sg);
|
|
|
+ sgl_free(fod->data_sg);
|
|
|
fod->data_sg = NULL;
|
|
|
fod->data_sg_cnt = 0;
|
|
|
}
|