|
@@ -250,6 +250,7 @@ enum arm_smmu_domain_stage {
|
|
|
struct arm_smmu_domain {
|
|
|
struct arm_smmu_device *smmu;
|
|
|
struct io_pgtable_ops *pgtbl_ops;
|
|
|
+ const struct iommu_gather_ops *tlb_ops;
|
|
|
struct arm_smmu_cfg cfg;
|
|
|
enum arm_smmu_domain_stage stage;
|
|
|
struct mutex init_mutex; /* Protects smmu pointer */
|
|
@@ -735,7 +736,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
enum io_pgtable_fmt fmt;
|
|
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
|
|
- const struct iommu_gather_ops *tlb_ops;
|
|
|
|
|
|
mutex_lock(&smmu_domain->init_mutex);
|
|
|
if (smmu_domain->smmu)
|
|
@@ -813,7 +813,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
ias = min(ias, 32UL);
|
|
|
oas = min(oas, 32UL);
|
|
|
}
|
|
|
- tlb_ops = &arm_smmu_s1_tlb_ops;
|
|
|
+ smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
|
|
|
break;
|
|
|
case ARM_SMMU_DOMAIN_NESTED:
|
|
|
/*
|
|
@@ -833,9 +833,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
oas = min(oas, 40UL);
|
|
|
}
|
|
|
if (smmu->version == ARM_SMMU_V2)
|
|
|
- tlb_ops = &arm_smmu_s2_tlb_ops_v2;
|
|
|
+ smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
|
|
|
else
|
|
|
- tlb_ops = &arm_smmu_s2_tlb_ops_v1;
|
|
|
+ smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
|
|
|
break;
|
|
|
default:
|
|
|
ret = -EINVAL;
|
|
@@ -863,7 +863,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
.pgsize_bitmap = smmu->pgsize_bitmap,
|
|
|
.ias = ias,
|
|
|
.oas = oas,
|
|
|
- .tlb = tlb_ops,
|
|
|
+ .tlb = smmu_domain->tlb_ops,
|
|
|
.iommu_dev = smmu->dev,
|
|
|
};
|
|
|
|
|
@@ -1259,6 +1259,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|
|
return ops->unmap(ops, iova, size);
|
|
|
}
|
|
|
|
|
|
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
|
|
|
+{
|
|
|
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
+
|
|
|
+ if (smmu_domain->tlb_ops)
|
|
|
+ smmu_domain->tlb_ops->tlb_sync(smmu_domain);
|
|
|
+}
|
|
|
+
|
|
|
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|
|
dma_addr_t iova)
|
|
|
{
|
|
@@ -1562,6 +1570,8 @@ static struct iommu_ops arm_smmu_ops = {
|
|
|
.map = arm_smmu_map,
|
|
|
.unmap = arm_smmu_unmap,
|
|
|
.map_sg = default_iommu_map_sg,
|
|
|
+ .flush_iotlb_all = arm_smmu_iotlb_sync,
|
|
|
+ .iotlb_sync = arm_smmu_iotlb_sync,
|
|
|
.iova_to_phys = arm_smmu_iova_to_phys,
|
|
|
.add_device = arm_smmu_add_device,
|
|
|
.remove_device = arm_smmu_remove_device,
|