|
@@ -67,6 +67,8 @@ static unsigned long dma_alloc_coherent_mask(struct device *dev,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
|
|
|
+
|
|
|
static char *xen_io_tlb_start, *xen_io_tlb_end;
|
|
|
static unsigned long xen_io_tlb_nslabs;
|
|
|
/*
|
|
@@ -410,7 +412,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|
|
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
|
|
|
attrs);
|
|
|
if (map == SWIOTLB_MAP_ERROR)
|
|
|
- return DMA_ERROR_CODE;
|
|
|
+ return XEN_SWIOTLB_ERROR_CODE;
|
|
|
|
|
|
dev_addr = xen_phys_to_bus(map);
|
|
|
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
|
|
@@ -425,7 +427,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|
|
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
|
|
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
|
|
|
|
|
|
- return DMA_ERROR_CODE;
|
|
|
+ return XEN_SWIOTLB_ERROR_CODE;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -715,6 +717,11 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
|
return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
|
|
|
}
|
|
|
|
|
|
+static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
+{
|
|
|
+ return dma_addr == XEN_SWIOTLB_ERROR_CODE;
|
|
|
+}
|
|
|
+
|
|
|
const struct dma_map_ops xen_swiotlb_dma_ops = {
|
|
|
.alloc = xen_swiotlb_alloc_coherent,
|
|
|
.free = xen_swiotlb_free_coherent,
|
|
@@ -730,4 +737,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
|
|
|
.set_dma_mask = xen_swiotlb_set_dma_mask,
|
|
|
.mmap = xen_swiotlb_dma_mmap,
|
|
|
.get_sgtable = xen_swiotlb_get_sgtable,
|
|
|
+ .mapping_error = xen_swiotlb_mapping_error,
|
|
|
};
|