|
@@ -48,7 +48,7 @@ void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
|
struct dma_attrs *attrs)
|
|
struct dma_attrs *attrs)
|
|
|
|
|
|
|
|
{
|
|
{
|
|
|
- if (!__generic_dma_ops(hwdev)->unmap_page)
|
|
|
|
|
|
|
+ if (is_device_dma_coherent(hwdev))
|
|
|
return;
|
|
return;
|
|
|
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
return;
|
|
return;
|
|
@@ -59,7 +59,7 @@ void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
|
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
{
|
|
|
- if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
|
|
|
|
|
|
|
+ if (is_device_dma_coherent(hwdev))
|
|
|
return;
|
|
return;
|
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
|
}
|
|
}
|
|
@@ -67,7 +67,7 @@ void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
|
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
|
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
{
|
|
|
- if (!__generic_dma_ops(hwdev)->sync_single_for_device)
|
|
|
|
|
|
|
+ if (is_device_dma_coherent(hwdev))
|
|
|
return;
|
|
return;
|
|
|
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
|
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
|
|
}
|
|
}
|