|
@@ -1087,3 +1087,46 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
|
|
return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
|
|
|
}
|
|
|
EXPORT_SYMBOL(swiotlb_dma_supported);
|
|
|
+
|
|
|
+#ifdef CONFIG_DMA_DIRECT_OPS
|
|
|
+void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
|
+ gfp_t gfp, unsigned long attrs)
|
|
|
+{
|
|
|
+ void *vaddr;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't print a warning when the first allocation attempt fails.
|
|
|
+ * swiotlb_alloc_coherent() will print a warning when the DMA memory
|
|
|
+ * allocation ultimately failed.
|
|
|
+ */
|
|
|
+ gfp |= __GFP_NOWARN;
|
|
|
+
|
|
|
+ vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
|
|
|
+ if (!vaddr)
|
|
|
+ vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
|
|
+ return vaddr;
|
|
|
+}
|
|
|
+
|
|
|
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
|
|
|
+ dma_addr_t dma_addr, unsigned long attrs)
|
|
|
+{
|
|
|
+ if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
|
|
|
+ swiotlb_free_coherent(dev, size, vaddr, dma_addr);
|
|
|
+ else
|
|
|
+ dma_direct_free(dev, size, vaddr, dma_addr, attrs);
|
|
|
+}
|
|
|
+
|
|
|
+const struct dma_map_ops swiotlb_dma_ops = {
|
|
|
+ .mapping_error = swiotlb_dma_mapping_error,
|
|
|
+ .alloc = swiotlb_alloc,
|
|
|
+ .free = swiotlb_free,
|
|
|
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
|
|
+ .sync_single_for_device = swiotlb_sync_single_for_device,
|
|
|
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
|
|
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
|
|
|
+ .map_sg = swiotlb_map_sg_attrs,
|
|
|
+ .unmap_sg = swiotlb_unmap_sg_attrs,
|
|
|
+ .map_page = swiotlb_map_page,
|
|
|
+ .unmap_page = swiotlb_unmap_page,
|
|
|
+};
|
|
|
+#endif /* CONFIG_DMA_DIRECT_OPS */
|