|
@@ -122,6 +122,24 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|
|
return nents;
|
|
|
}
|
|
|
|
|
|
+int dma_direct_supported(struct device *dev, u64 mask)
|
|
|
+{
|
|
|
+#ifdef CONFIG_ZONE_DMA
|
|
|
+ if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
|
|
|
+ return 0;
|
|
|
+#else
|
|
|
+ /*
|
|
|
+ * Because 32-bit DMA masks are so common we expect every architecture
|
|
|
+ * to be able to satisfy them - either by not supporting more physical
|
|
|
+ * memory, or by providing a ZONE_DMA32. If neither is the case, the
|
|
|
+ * architecture needs to use an IOMMU instead of the direct mapping.
|
|
|
+ */
|
|
|
+ if (mask < DMA_BIT_MASK(32))
|
|
|
+ return 0;
|
|
|
+#endif
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
{
|
|
|
return dma_addr == DIRECT_MAPPING_ERROR;
|
|
@@ -132,6 +150,7 @@ const struct dma_map_ops dma_direct_ops = {
|
|
|
.free = dma_direct_free,
|
|
|
.map_page = dma_direct_map_page,
|
|
|
.map_sg = dma_direct_map_sg,
|
|
|
+ .dma_supported = dma_direct_supported,
|
|
|
.mapping_error = dma_direct_mapping_error,
|
|
|
};
|
|
|
EXPORT_SYMBOL(dma_direct_ops);
|