|
@@ -55,6 +55,9 @@ struct iommu_dma_cookie {
|
|
|
};
|
|
|
struct list_head msi_page_list;
|
|
|
spinlock_t msi_lock;
|
|
|
+
|
|
|
+ /* Domain for flush queue callback; NULL if flush queue not in use */
|
|
|
+ struct iommu_domain *fq_domain;
|
|
|
};
|
|
|
|
|
|
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
|
|
@@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
|
|
|
+{
|
|
|
+ struct iommu_dma_cookie *cookie;
|
|
|
+ struct iommu_domain *domain;
|
|
|
+
|
|
|
+ cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
|
|
|
+ domain = cookie->fq_domain;
|
|
|
+ /*
|
|
|
+ * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
|
|
|
+ * implies that ops->flush_iotlb_all must be non-NULL.
|
|
|
+ */
|
|
|
+ domain->ops->flush_iotlb_all(domain);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* iommu_dma_init_domain - Initialise a DMA mapping domain
|
|
|
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
|
|
@@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|
|
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
|
|
struct iova_domain *iovad = &cookie->iovad;
|
|
|
unsigned long order, base_pfn, end_pfn;
|
|
|
+ int attr;
|
|
|
|
|
|
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
|
|
|
return -EINVAL;
|
|
@@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|
|
}
|
|
|
|
|
|
init_iova_domain(iovad, 1UL << order, base_pfn);
|
|
|
+
|
|
|
+ if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
|
|
|
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
|
|
|
+ cookie->fq_domain = domain;
|
|
|
+ init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
|
|
|
+ }
|
|
|
+
|
|
|
if (!dev)
|
|
|
return 0;
|
|
|
|
|
@@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
|
|
/* The MSI case is only ever cleaning up its most recent allocation */
|
|
|
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
|
|
|
cookie->msi_iova -= size;
|
|
|
+ else if (cookie->fq_domain) /* non-strict mode */
|
|
|
+ queue_iova(iovad, iova_pfn(iovad, iova),
|
|
|
+ size >> iova_shift(iovad), 0);
|
|
|
else
|
|
|
free_iova_fast(iovad, iova_pfn(iovad, iova),
|
|
|
size >> iova_shift(iovad));
|
|
@@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
|
|
|
dma_addr -= iova_off;
|
|
|
size = iova_align(iovad, size + iova_off);
|
|
|
|
|
|
- WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
|
|
|
+ WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
|
|
|
+ if (!cookie->fq_domain)
|
|
|
+ iommu_tlb_sync(domain);
|
|
|
iommu_dma_free_iova(cookie, dma_addr, size);
|
|
|
}
|
|
|
|