|
@@ -4,6 +4,7 @@
|
|
|
*
|
|
|
* DMA operations that map physical memory directly without using an IOMMU.
|
|
|
*/
|
|
|
+#include <linux/bootmem.h> /* for max_pfn */
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/dma-direct.h>
|
|
@@ -53,11 +54,25 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+static inline dma_addr_t phys_to_dma_direct(struct device *dev,
|
|
|
+ phys_addr_t phys)
|
|
|
+{
|
|
|
+ if (force_dma_unencrypted())
|
|
|
+ return __phys_to_dma(dev, phys);
|
|
|
+ return phys_to_dma(dev, phys);
|
|
|
+}
|
|
|
+
|
|
|
+u64 dma_direct_get_required_mask(struct device *dev)
|
|
|
+{
|
|
|
+ u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
|
|
|
+
|
|
|
+ return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
|
|
|
+}
|
|
|
+
|
|
|
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
|
|
{
|
|
|
- dma_addr_t addr = force_dma_unencrypted() ?
|
|
|
- __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
|
|
|
- return addr + size - 1 <= dev->coherent_dma_mask;
|
|
|
+ return phys_to_dma_direct(dev, phys) + size - 1 <=
|
|
|
+ dev->coherent_dma_mask;
|
|
|
}
|
|
|
|
|
|
void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|
@@ -296,6 +311,7 @@ const struct dma_map_ops dma_direct_ops = {
|
|
|
.unmap_page = dma_direct_unmap_page,
|
|
|
.unmap_sg = dma_direct_unmap_sg,
|
|
|
#endif
|
|
|
+ .get_required_mask = dma_direct_get_required_mask,
|
|
|
.dma_supported = dma_direct_supported,
|
|
|
.mapping_error = dma_direct_mapping_error,
|
|
|
.cache_sync = arch_dma_cache_sync,
|