|
@@ -167,6 +167,10 @@ struct iommu_resv_region {
|
|
|
* @map: map a physically contiguous memory region to an iommu domain
|
|
|
* @unmap: unmap a physically contiguous memory region from an iommu domain
|
|
|
* @map_sg: map a scatter-gather list of physically contiguous memory chunks
|
|
|
+ * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
|
|
|
+ * @tlb_range_add: Add a given iova range to the flush queue for this domain
|
|
|
+ * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
|
|
|
+ * queue
|
|
|
* to an iommu domain
|
|
|
* @iova_to_phys: translate iova to physical address
|
|
|
* @add_device: add device to iommu grouping
|
|
@@ -199,6 +203,10 @@ struct iommu_ops {
|
|
|
size_t size);
|
|
|
size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
|
|
|
struct scatterlist *sg, unsigned int nents, int prot);
|
|
|
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
|
|
|
+ void (*iotlb_range_add)(struct iommu_domain *domain,
|
|
|
+ unsigned long iova, size_t size);
|
|
|
+ void (*iotlb_sync)(struct iommu_domain *domain);
|
|
|
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
|
|
|
int (*add_device)(struct device *dev);
|
|
|
void (*remove_device)(struct device *dev);
|
|
@@ -286,7 +294,9 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
|
|
|
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|
|
phys_addr_t paddr, size_t size, int prot);
|
|
|
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|
|
- size_t size);
|
|
|
+ size_t size);
|
|
|
+extern size_t iommu_unmap_fast(struct iommu_domain *domain,
|
|
|
+ unsigned long iova, size_t size);
|
|
|
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|
|
struct scatterlist *sg,unsigned int nents,
|
|
|
int prot);
|
|
@@ -343,6 +353,25 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
|
|
|
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
|
|
|
unsigned long iova, int flags);
|
|
|
|
|
|
+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
|
|
|
+{
|
|
|
+ if (domain->ops->flush_iotlb_all)
|
|
|
+ domain->ops->flush_iotlb_all(domain);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iommu_tlb_range_add(struct iommu_domain *domain,
|
|
|
+ unsigned long iova, size_t size)
|
|
|
+{
|
|
|
+ if (domain->ops->iotlb_range_add)
|
|
|
+ domain->ops->iotlb_range_add(domain, iova, size);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iommu_tlb_sync(struct iommu_domain *domain)
|
|
|
+{
|
|
|
+ if (domain->ops->iotlb_sync)
|
|
|
+ domain->ops->iotlb_sync(domain);
|
|
|
+}
|
|
|
+
|
|
|
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
|
|
unsigned long iova, struct scatterlist *sg,
|
|
|
unsigned int nents, int prot)
|
|
@@ -436,6 +465,12 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
+static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova,
|
|
|
+ int gfp_order)
|
|
|
+{
|
|
|
+ return -ENODEV;
|
|
|
+}
|
|
|
+
|
|
|
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
|
|
unsigned long iova, struct scatterlist *sg,
|
|
|
unsigned int nents, int prot)
|
|
@@ -443,6 +478,19 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iommu_tlb_range_add(struct iommu_domain *domain,
|
|
|
+ unsigned long iova, size_t size)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iommu_tlb_sync(struct iommu_domain *domain)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
|
|
|
u32 wnd_nr, phys_addr_t paddr,
|
|
|
u64 size, int prot)
|