|
@@ -1606,6 +1606,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
|
|
iommu_flush_dev_iotlb(domain, addr, mask);
|
|
|
}
|
|
|
|
|
|
+/* Notification for newly created mappings */
|
|
|
+static inline void __mapping_notify_one(struct intel_iommu *iommu,
|
|
|
+ struct dmar_domain *domain,
|
|
|
+ unsigned long pfn, unsigned int pages)
|
|
|
+{
|
|
|
+ /* It's a non-present to present mapping. Only flush if caching mode */
|
|
|
+ if (cap_caching_mode(iommu->cap))
|
|
|
+ iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
|
|
|
+ else
|
|
|
+ iommu_flush_write_buffer(iommu);
|
|
|
+}
|
|
|
+
|
|
|
static void iommu_flush_iova(struct iova_domain *iovad)
|
|
|
{
|
|
|
struct dmar_domain *domain;
|
|
@@ -3625,13 +3637,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
|
|
if (ret)
|
|
|
goto error;
|
|
|
|
|
|
- /* it's a non-present to present mapping. Only flush if caching mode */
|
|
|
- if (cap_caching_mode(iommu->cap))
|
|
|
- iommu_flush_iotlb_psi(iommu, domain,
|
|
|
- mm_to_dma_pfn(iova_pfn),
|
|
|
- size, 0, 1);
|
|
|
- else
|
|
|
- iommu_flush_write_buffer(iommu);
|
|
|
+ __mapping_notify_one(iommu, domain, mm_to_dma_pfn(iova_pfn), size);
|
|
|
|
|
|
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
|
|
|
start_paddr += paddr & ~PAGE_MASK;
|
|
@@ -3819,11 +3825,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- /* it's a non-present to present mapping. Only flush if caching mode */
|
|
|
- if (cap_caching_mode(iommu->cap))
|
|
|
- iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
|
|
|
- else
|
|
|
- iommu_flush_write_buffer(iommu);
|
|
|
+ __mapping_notify_one(iommu, domain, start_vpfn, size);
|
|
|
|
|
|
return nelems;
|
|
|
}
|