|
@@ -661,17 +661,6 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
|
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
|
|
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
|
|
}
|
|
}
|
|
|
|
|
|
-static int
|
|
|
|
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
|
|
|
|
-{
|
|
|
|
- if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
|
|
|
|
- return -EIO;
|
|
|
|
-
|
|
|
|
- *dev->dma_mask = dma_mask;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Create userspace mapping for the DMA-coherent memory.
|
|
* Create userspace mapping for the DMA-coherent memory.
|
|
* This function should be called with the pages from the current domain only,
|
|
* This function should be called with the pages from the current domain only,
|
|
@@ -734,7 +723,6 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
|
|
.map_page = xen_swiotlb_map_page,
|
|
.map_page = xen_swiotlb_map_page,
|
|
.unmap_page = xen_swiotlb_unmap_page,
|
|
.unmap_page = xen_swiotlb_unmap_page,
|
|
.dma_supported = xen_swiotlb_dma_supported,
|
|
.dma_supported = xen_swiotlb_dma_supported,
|
|
- .set_dma_mask = xen_swiotlb_set_dma_mask,
|
|
|
|
.mmap = xen_swiotlb_dma_mmap,
|
|
.mmap = xen_swiotlb_dma_mmap,
|
|
.get_sgtable = xen_swiotlb_get_sgtable,
|
|
.get_sgtable = xen_swiotlb_get_sgtable,
|
|
.mapping_error = xen_swiotlb_mapping_error,
|
|
.mapping_error = xen_swiotlb_mapping_error,
|