|
@@ -112,29 +112,39 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
|
|
|
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
|
|
|
|
|
|
if (dma_addr & ~PAGE_MASK) {
|
|
|
- if (i > 0)
|
|
|
+ if (i > 0) {
|
|
|
+ ib_dma_unmap_sg(dev, sg, nents,
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
return -EINVAL;
|
|
|
- else
|
|
|
+ } else {
|
|
|
++page_cnt;
|
|
|
+ }
|
|
|
}
|
|
|
if ((dma_addr + dma_len) & ~PAGE_MASK) {
|
|
|
- if (i < sg_dma_len - 1)
|
|
|
+ if (i < sg_dma_len - 1) {
|
|
|
+ ib_dma_unmap_sg(dev, sg, nents,
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
return -EINVAL;
|
|
|
- else
|
|
|
+ } else {
|
|
|
++page_cnt;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
len += dma_len;
|
|
|
}
|
|
|
|
|
|
page_cnt += len >> PAGE_SHIFT;
|
|
|
- if (page_cnt > ibmr->pool->fmr_attr.max_pages)
|
|
|
+ if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
|
|
|
+ ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
|
|
|
return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
|
|
|
rdsibdev_to_node(rds_ibdev));
|
|
|
- if (!dma_pages)
|
|
|
+ if (!dma_pages) {
|
|
|
+ ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
|
|
|
return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
page_cnt = 0;
|
|
|
for (i = 0; i < sg_dma_len; ++i) {
|
|
@@ -147,8 +157,10 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
|
|
|
}
|
|
|
|
|
|
ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
|
|
|
- if (ret)
|
|
|
+ if (ret) {
|
|
|
+ ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
/* Success - we successfully remapped the MR, so we can
|
|
|
* safely tear down the old mapping.
|