|
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|
if (map == SWIOTLB_MAP_ERROR)
|
|
if (map == SWIOTLB_MAP_ERROR)
|
|
return DMA_ERROR_CODE;
|
|
return DMA_ERROR_CODE;
|
|
|
|
|
|
|
|
+ dev_addr = xen_phys_to_bus(map);
|
|
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
|
|
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
|
|
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
|
|
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
|
|
- dev_addr = xen_phys_to_bus(map);
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Ensure that the address returned is DMA'ble
|
|
* Ensure that the address returned is DMA'ble
|
|
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|
sg_dma_len(sgl) = 0;
|
|
sg_dma_len(sgl) = 0;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
+ dev_addr = xen_phys_to_bus(map);
|
|
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
|
|
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
|
|
dev_addr,
|
|
dev_addr,
|
|
map & ~PAGE_MASK,
|
|
map & ~PAGE_MASK,
|
|
sg->length,
|
|
sg->length,
|
|
dir,
|
|
dir,
|
|
attrs);
|
|
attrs);
|
|
- sg->dma_address = xen_phys_to_bus(map);
|
|
|
|
|
|
+ sg->dma_address = dev_addr;
|
|
} else {
|
|
} else {
|
|
/* we are not interested in the dma_addr returned by
|
|
/* we are not interested in the dma_addr returned by
|
|
* xen_dma_map_page, only in the potential cache flushes executed
|
|
* xen_dma_map_page, only in the potential cache flushes executed
|