|
@@ -845,37 +845,27 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
* same here.
|
|
|
*/
|
|
|
int
|
|
|
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
|
|
+swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems,
|
|
|
enum dma_data_direction dir, unsigned long attrs)
|
|
|
{
|
|
|
struct scatterlist *sg;
|
|
|
int i;
|
|
|
|
|
|
- BUG_ON(dir == DMA_NONE);
|
|
|
-
|
|
|
for_each_sg(sgl, sg, nelems, i) {
|
|
|
- phys_addr_t paddr = sg_phys(sg);
|
|
|
- dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
|
|
|
-
|
|
|
- if (swiotlb_force == SWIOTLB_FORCE ||
|
|
|
- !dma_capable(hwdev, dev_addr, sg->length)) {
|
|
|
- phys_addr_t map = map_single(hwdev, sg_phys(sg),
|
|
|
- sg->length, dir, attrs);
|
|
|
- if (map == SWIOTLB_MAP_ERROR) {
|
|
|
- /* Don't panic here, we expect map_sg users
|
|
|
- to do proper error handling. */
|
|
|
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
|
|
- swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
|
|
|
- attrs);
|
|
|
- sg_dma_len(sgl) = 0;
|
|
|
- return 0;
|
|
|
- }
|
|
|
- sg->dma_address = __phys_to_dma(hwdev, map);
|
|
|
- } else
|
|
|
- sg->dma_address = dev_addr;
|
|
|
+ sg->dma_address = swiotlb_map_page(dev, sg_page(sg), sg->offset,
|
|
|
+ sg->length, dir, attrs);
|
|
|
+ if (sg->dma_address == DIRECT_MAPPING_ERROR)
|
|
|
+ goto out_error;
|
|
|
sg_dma_len(sg) = sg->length;
|
|
|
}
|
|
|
+
|
|
|
return nelems;
|
|
|
+
|
|
|
+out_error:
|
|
|
+ swiotlb_unmap_sg_attrs(dev, sgl, i, dir,
|
|
|
+ attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
+ sg_dma_len(sgl) = 0;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|