|
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
|
|
|
struct mlx4_ib_mr *mr,
|
|
|
int max_pages)
|
|
|
{
|
|
|
- int size = max_pages * sizeof(u64);
|
|
|
- int add_size;
|
|
|
int ret;
|
|
|
|
|
|
- add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
|
|
|
+ /* Ensure that size is aligned to DMA cacheline
|
|
|
+ * requirements.
|
|
|
+ * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
|
|
|
+ * so page_map_size will never cross PAGE_SIZE.
|
|
|
+ */
|
|
|
+ mr->page_map_size = roundup(max_pages * sizeof(u64),
|
|
|
+ MLX4_MR_PAGES_ALIGN);
|
|
|
|
|
|
- mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
|
|
|
- if (!mr->pages_alloc)
|
|
|
+ /* Prevent cross page boundary allocation. */
|
|
|
+ mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
|
|
|
+ if (!mr->pages)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
|
|
|
-
|
|
|
mr->page_map = dma_map_single(device->dma_device, mr->pages,
|
|
|
- size, DMA_TO_DEVICE);
|
|
|
+ mr->page_map_size, DMA_TO_DEVICE);
|
|
|
|
|
|
if (dma_mapping_error(device->dma_device, mr->page_map)) {
|
|
|
ret = -ENOMEM;
|
|
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
-err:
|
|
|
- kfree(mr->pages_alloc);
|
|
|
|
|
|
+err:
|
|
|
+ free_page((unsigned long)mr->pages);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
|
|
|
{
|
|
|
if (mr->pages) {
|
|
|
struct ib_device *device = mr->ibmr.device;
|
|
|
- int size = mr->max_pages * sizeof(u64);
|
|
|
|
|
|
dma_unmap_single(device->dma_device, mr->page_map,
|
|
|
- size, DMA_TO_DEVICE);
|
|
|
- kfree(mr->pages_alloc);
|
|
|
+ mr->page_map_size, DMA_TO_DEVICE);
|
|
|
+ free_page((unsigned long)mr->pages);
|
|
|
mr->pages = NULL;
|
|
|
}
|
|
|
}
|
|
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
|
|
mr->npages = 0;
|
|
|
|
|
|
ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
|
|
|
- sizeof(u64) * mr->max_pages,
|
|
|
- DMA_TO_DEVICE);
|
|
|
+ mr->page_map_size, DMA_TO_DEVICE);
|
|
|
|
|
|
rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
|
|
|
|
|
|
ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
|
|
|
- sizeof(u64) * mr->max_pages,
|
|
|
- DMA_TO_DEVICE);
|
|
|
+ mr->page_map_size, DMA_TO_DEVICE);
|
|
|
|
|
|
return rc;
|
|
|
}
|