|
@@ -1471,11 +1471,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
|
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
|
|
}
|
|
|
|
|
|
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
|
|
- unsigned long pfn, unsigned int pages, int ih, int map)
|
|
|
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
|
|
+ struct dmar_domain *domain,
|
|
|
+ unsigned long pfn, unsigned int pages,
|
|
|
+ int ih, int map)
|
|
|
{
|
|
|
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
|
|
|
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
|
|
|
+ u16 did = domain->iommu_did[iommu->seq_id];
|
|
|
|
|
|
BUG_ON(pages == 0);
|
|
|
|
|
@@ -3422,7 +3425,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
|
|
|
|
|
/* it's a non-present to present mapping. Only flush if caching mode */
|
|
|
if (cap_caching_mode(iommu->cap))
|
|
|
- iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
|
|
|
+ iommu_flush_iotlb_psi(iommu, domain,
|
|
|
+ mm_to_dma_pfn(iova->pfn_lo),
|
|
|
+ size, 0, 1);
|
|
|
else
|
|
|
iommu_flush_write_buffer(iommu);
|
|
|
|
|
@@ -3473,7 +3478,7 @@ static void flush_unmaps(void)
|
|
|
|
|
|
/* On real hardware multiple invalidations are expensive */
|
|
|
if (cap_caching_mode(iommu->cap))
|
|
|
- iommu_flush_iotlb_psi(iommu, domain->id,
|
|
|
+ iommu_flush_iotlb_psi(iommu, domain,
|
|
|
iova->pfn_lo, iova_size(iova),
|
|
|
!deferred_flush[i].freelist[j], 0);
|
|
|
else {
|
|
@@ -3557,7 +3562,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
|
|
|
freelist = domain_unmap(domain, start_pfn, last_pfn);
|
|
|
|
|
|
if (intel_iommu_strict) {
|
|
|
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
|
|
|
+ iommu_flush_iotlb_psi(iommu, domain, start_pfn,
|
|
|
last_pfn - start_pfn + 1, !freelist, 0);
|
|
|
/* free iova */
|
|
|
__free_iova(&domain->iovad, iova);
|
|
@@ -3715,7 +3720,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
|
|
|
|
|
|
/* it's a non-present to present mapping. Only flush if caching mode */
|
|
|
if (cap_caching_mode(iommu->cap))
|
|
|
- iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
|
|
|
+ iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
|
|
|
else
|
|
|
iommu_flush_write_buffer(iommu);
|
|
|
|
|
@@ -4421,7 +4426,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
|
|
|
|
|
|
rcu_read_lock();
|
|
|
for_each_active_iommu(iommu, drhd)
|
|
|
- iommu_flush_iotlb_psi(iommu, si_domain->id,
|
|
|
+ iommu_flush_iotlb_psi(iommu, si_domain,
|
|
|
iova->pfn_lo, iova_size(iova),
|
|
|
!freelist, 0);
|
|
|
rcu_read_unlock();
|
|
@@ -4872,17 +4877,18 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
|
|
|
npages = last_pfn - start_pfn + 1;
|
|
|
|
|
|
for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
|
|
|
- iommu = g_iommus[iommu_id];
|
|
|
-
|
|
|
- /*
|
|
|
- * find bit position of dmar_domain
|
|
|
- */
|
|
|
- ndomains = cap_ndoms(iommu->cap);
|
|
|
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
|
|
|
- if (get_iommu_domain(iommu, num) == dmar_domain)
|
|
|
- iommu_flush_iotlb_psi(iommu, num, start_pfn,
|
|
|
- npages, !freelist, 0);
|
|
|
- }
|
|
|
+ iommu = g_iommus[iommu_id];
|
|
|
+
|
|
|
+ /*
|
|
|
+ * find bit position of dmar_domain
|
|
|
+ */
|
|
|
+ ndomains = cap_ndoms(iommu->cap);
|
|
|
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
|
|
|
+ if (get_iommu_domain(iommu, num) == dmar_domain)
|
|
|
+ iommu_flush_iotlb_psi(iommu, dmar_domain,
|
|
|
+ start_pfn, npages,
|
|
|
+ !freelist, 0);
|
|
|
+ }
|
|
|
|
|
|
}
|
|
|
|