|
@@ -292,10 +292,10 @@ mlx4_alloc_priv_pages(struct ib_device *device,
|
|
if (!mr->pages)
|
|
if (!mr->pages)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
- mr->page_map = dma_map_single(device->dma_device, mr->pages,
|
|
|
|
|
|
+ mr->page_map = dma_map_single(device->dev.parent, mr->pages,
|
|
mr->page_map_size, DMA_TO_DEVICE);
|
|
mr->page_map_size, DMA_TO_DEVICE);
|
|
|
|
|
|
- if (dma_mapping_error(device->dma_device, mr->page_map)) {
|
|
|
|
|
|
+ if (dma_mapping_error(device->dev.parent, mr->page_map)) {
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
goto err;
|
|
}
|
|
}
|
|
@@ -313,7 +313,7 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
|
|
if (mr->pages) {
|
|
if (mr->pages) {
|
|
struct ib_device *device = mr->ibmr.device;
|
|
struct ib_device *device = mr->ibmr.device;
|
|
|
|
|
|
- dma_unmap_single(device->dma_device, mr->page_map,
|
|
|
|
|
|
+ dma_unmap_single(device->dev.parent, mr->page_map,
|
|
mr->page_map_size, DMA_TO_DEVICE);
|
|
mr->page_map_size, DMA_TO_DEVICE);
|
|
free_page((unsigned long)mr->pages);
|
|
free_page((unsigned long)mr->pages);
|
|
mr->pages = NULL;
|
|
mr->pages = NULL;
|