|
@@ -21,6 +21,7 @@
|
|
|
|
|
|
#include <linux/cache.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/dma-direct.h>
|
|
#include <linux/dma-direct.h>
|
|
|
|
+#include <linux/dma-noncoherent.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/export.h>
|
|
#include <linux/export.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/spinlock.h>
|
|
@@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|
* we can safely return the device addr and not worry about bounce
|
|
* we can safely return the device addr and not worry about bounce
|
|
* buffering it.
|
|
* buffering it.
|
|
*/
|
|
*/
|
|
- if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
|
|
|
|
- return dev_addr;
|
|
|
|
|
|
+ if (!dma_capable(dev, dev_addr, size) ||
|
|
|
|
+ swiotlb_force == SWIOTLB_FORCE) {
|
|
|
|
+ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
|
|
|
|
+ dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!dev_is_dma_coherent(dev) &&
|
|
|
|
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
|
|
|
+ arch_sync_dma_for_device(dev, phys, size, dir);
|
|
|
|
|
|
- trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
|
|
|
|
- return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
|
|
|
|
|
|
+ return dev_addr;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
|
|
|
|
+ if (!dev_is_dma_coherent(hwdev) &&
|
|
|
|
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
|
|
|
+ arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
|
|
|
|
+
|
|
if (is_swiotlb_buffer(paddr)) {
|
|
if (is_swiotlb_buffer(paddr)) {
|
|
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
|
|
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
|
|
return;
|
|
return;
|
|
@@ -730,15 +741,17 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
|
|
- if (is_swiotlb_buffer(paddr)) {
|
|
|
|
|
|
+ if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
|
|
|
|
+ arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
|
|
|
|
+
|
|
|
|
+ if (is_swiotlb_buffer(paddr))
|
|
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
|
|
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
|
|
- if (dir != DMA_FROM_DEVICE)
|
|
|
|
- return;
|
|
|
|
|
|
+ if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
|
|
|
|
+ arch_sync_dma_for_device(hwdev, paddr, size, dir);
|
|
|
|
|
|
- dma_mark_clean(phys_to_virt(paddr), size);
|
|
|
|
|
|
+ if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
|
|
|
|
+ dma_mark_clean(phys_to_virt(paddr), size);
|
|
}
|
|
}
|
|
|
|
|
|
void
|
|
void
|