|
@@ -246,6 +246,7 @@ struct arm_smmu_domain {
|
|
|
const struct iommu_gather_ops *tlb_ops;
|
|
|
struct arm_smmu_cfg cfg;
|
|
|
enum arm_smmu_domain_stage stage;
|
|
|
+ bool non_strict;
|
|
|
struct mutex init_mutex; /* Protects smmu pointer */
|
|
|
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
|
|
|
struct iommu_domain domain;
|
|
@@ -447,7 +448,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
|
|
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
|
|
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
|
|
|
|
|
|
- writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
|
|
|
+ /*
|
|
|
+ * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
|
|
|
+ * cleared by the current CPU are visible to the SMMU before the TLBI.
|
|
|
+ */
|
|
|
+ writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
|
|
|
arm_smmu_tlb_sync_context(cookie);
|
|
|
}
|
|
|
|
|
@@ -457,7 +462,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
|
|
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
|
|
void __iomem *base = ARM_SMMU_GR0(smmu);
|
|
|
|
|
|
- writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
|
|
+ /* NOTE: see above */
|
|
|
+ writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
|
|
arm_smmu_tlb_sync_global(smmu);
|
|
|
}
|
|
|
|
|
@@ -869,6 +875,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
|
|
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
|
|
|
|
|
|
+ if (smmu_domain->non_strict)
|
|
|
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
|
|
|
+
|
|
|
smmu_domain->smmu = smmu;
|
|
|
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
|
|
|
if (!pgtbl_ops) {
|
|
@@ -1258,6 +1267,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|
|
return ops->unmap(ops, iova, size);
|
|
|
}
|
|
|
|
|
|
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
|
|
|
+{
|
|
|
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
+
|
|
|
+ if (smmu_domain->tlb_ops)
|
|
|
+ smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
|
|
|
+}
|
|
|
+
|
|
|
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
|
|
|
{
|
|
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
@@ -1476,15 +1493,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
|
|
{
|
|
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
|
|
|
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- switch (attr) {
|
|
|
- case DOMAIN_ATTR_NESTING:
|
|
|
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
|
|
- return 0;
|
|
|
+ switch(domain->type) {
|
|
|
+ case IOMMU_DOMAIN_UNMANAGED:
|
|
|
+ switch (attr) {
|
|
|
+ case DOMAIN_ATTR_NESTING:
|
|
|
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
|
|
+ return 0;
|
|
|
+ default:
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IOMMU_DOMAIN_DMA:
|
|
|
+ switch (attr) {
|
|
|
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
|
|
+ *(int *)data = smmu_domain->non_strict;
|
|
|
+ return 0;
|
|
|
+ default:
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+ break;
|
|
|
default:
|
|
|
- return -ENODEV;
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1494,28 +1523,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
|
|
int ret = 0;
|
|
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
|
|
|
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
mutex_lock(&smmu_domain->init_mutex);
|
|
|
|
|
|
- switch (attr) {
|
|
|
- case DOMAIN_ATTR_NESTING:
|
|
|
- if (smmu_domain->smmu) {
|
|
|
- ret = -EPERM;
|
|
|
- goto out_unlock;
|
|
|
+ switch(domain->type) {
|
|
|
+ case IOMMU_DOMAIN_UNMANAGED:
|
|
|
+ switch (attr) {
|
|
|
+ case DOMAIN_ATTR_NESTING:
|
|
|
+ if (smmu_domain->smmu) {
|
|
|
+ ret = -EPERM;
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (*(int *)data)
|
|
|
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
|
|
+ else
|
|
|
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ ret = -ENODEV;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IOMMU_DOMAIN_DMA:
|
|
|
+ switch (attr) {
|
|
|
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
|
|
+ smmu_domain->non_strict = *(int *)data;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ ret = -ENODEV;
|
|
|
}
|
|
|
-
|
|
|
- if (*(int *)data)
|
|
|
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
|
|
- else
|
|
|
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
|
|
-
|
|
|
break;
|
|
|
default:
|
|
|
- ret = -ENODEV;
|
|
|
+ ret = -EINVAL;
|
|
|
}
|
|
|
-
|
|
|
out_unlock:
|
|
|
mutex_unlock(&smmu_domain->init_mutex);
|
|
|
return ret;
|
|
@@ -1568,7 +1607,7 @@ static struct iommu_ops arm_smmu_ops = {
|
|
|
.attach_dev = arm_smmu_attach_dev,
|
|
|
.map = arm_smmu_map,
|
|
|
.unmap = arm_smmu_unmap,
|
|
|
- .flush_iotlb_all = arm_smmu_iotlb_sync,
|
|
|
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
|
|
.iotlb_sync = arm_smmu_iotlb_sync,
|
|
|
.iova_to_phys = arm_smmu_iova_to_phys,
|
|
|
.add_device = arm_smmu_add_device,
|