|
@@ -284,21 +284,25 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
|
|
return nents;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Because 32-bit DMA masks are so common we expect every architecture to be
|
|
|
+ * able to satisfy them - either by not supporting more physical memory, or by
|
|
|
+ * providing a ZONE_DMA32. If neither is the case, the architecture needs to
|
|
|
+ * use an IOMMU instead of the direct mapping.
|
|
|
+ */
|
|
|
int dma_direct_supported(struct device *dev, u64 mask)
|
|
|
{
|
|
|
-#ifdef CONFIG_ZONE_DMA
|
|
|
- if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
|
|
|
- return 0;
|
|
|
-#else
|
|
|
- /*
|
|
|
- * Because 32-bit DMA masks are so common we expect every architecture
|
|
|
- * to be able to satisfy them - either by not supporting more physical
|
|
|
- * memory, or by providing a ZONE_DMA32. If neither is the case, the
|
|
|
- * architecture needs to use an IOMMU instead of the direct mapping.
|
|
|
- */
|
|
|
- if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
|
|
|
+ u64 min_mask;
|
|
|
+
|
|
|
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
|
|
|
+ min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
|
|
|
+ else
|
|
|
+ min_mask = DMA_BIT_MASK(32);
|
|
|
+
|
|
|
+ min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
|
|
|
+
|
|
|
+ if (mask >= phys_to_dma(dev, min_mask))
|
|
|
return 0;
|
|
|
-#endif
|
|
|
return 1;
|
|
|
}
|
|
|
|