|
@@ -157,13 +157,6 @@ unsigned long swiotlb_size_or_default(void)
|
|
|
return size ? size : (IO_TLB_DEFAULT_SIZE);
|
|
|
}
|
|
|
|
|
|
-/* For swiotlb, clear memory encryption mask from dma addresses */
|
|
|
-static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
|
|
|
- phys_addr_t address)
|
|
|
-{
|
|
|
- return __sme_clr(phys_to_dma(hwdev, address));
|
|
|
-}
|
|
|
-
|
|
|
/* Note that this doesn't work with highmem page */
|
|
|
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
|
|
|
volatile void *address)
|
|
@@ -622,7 +615,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
|
|
|
return SWIOTLB_MAP_ERROR;
|
|
|
}
|
|
|
|
|
|
- start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
|
|
|
+ start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
|
|
|
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
|
|
|
dir, attrs);
|
|
|
}
|
|
@@ -726,12 +719,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
|
goto out_warn;
|
|
|
|
|
|
phys_addr = swiotlb_tbl_map_single(dev,
|
|
|
- swiotlb_phys_to_dma(dev, io_tlb_start),
|
|
|
+ __phys_to_dma(dev, io_tlb_start),
|
|
|
0, size, DMA_FROM_DEVICE, 0);
|
|
|
if (phys_addr == SWIOTLB_MAP_ERROR)
|
|
|
goto out_warn;
|
|
|
|
|
|
- *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
|
|
|
+ *dma_handle = __phys_to_dma(dev, phys_addr);
|
|
|
if (dma_coherent_ok(dev, *dma_handle, size))
|
|
|
goto out_unmap;
|
|
|
|
|
@@ -867,10 +860,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|
|
map = map_single(dev, phys, size, dir, attrs);
|
|
|
if (map == SWIOTLB_MAP_ERROR) {
|
|
|
swiotlb_full(dev, size, dir, 1);
|
|
|
- return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
|
|
|
+ return __phys_to_dma(dev, io_tlb_overflow_buffer);
|
|
|
}
|
|
|
|
|
|
- dev_addr = swiotlb_phys_to_dma(dev, map);
|
|
|
+ dev_addr = __phys_to_dma(dev, map);
|
|
|
|
|
|
/* Ensure that the address returned is DMA'ble */
|
|
|
if (dma_capable(dev, dev_addr, size))
|
|
@@ -879,7 +872,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|
|
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
|
|
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
|
|
|
|
|
|
- return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
|
|
|
+ return __phys_to_dma(dev, io_tlb_overflow_buffer);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1009,7 +1002,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
|
|
sg_dma_len(sgl) = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
- sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
|
|
|
+ sg->dma_address = __phys_to_dma(hwdev, map);
|
|
|
} else
|
|
|
sg->dma_address = dev_addr;
|
|
|
sg_dma_len(sg) = sg->length;
|
|
@@ -1073,7 +1066,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
|
int
|
|
|
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
|
|
|
{
|
|
|
- return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
|
|
|
+ return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1085,7 +1078,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
|
|
|
int
|
|
|
swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
|
|
{
|
|
|
- return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
|
|
|
+ return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_DMA_DIRECT_OPS
|