|
@@ -612,6 +612,7 @@ struct arm_smmu_domain {
|
|
|
struct mutex init_mutex; /* Protects smmu pointer */
|
|
|
|
|
|
struct io_pgtable_ops *pgtbl_ops;
|
|
|
+ bool non_strict;
|
|
|
|
|
|
enum arm_smmu_domain_stage stage;
|
|
|
union {
|
|
@@ -1407,6 +1408,12 @@ static void arm_smmu_tlb_inv_context(void *cookie)
|
|
|
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * NOTE: when io-pgtable is in non-strict mode, we may get here with
|
|
|
+ * PTEs previously cleared by unmaps on the current CPU not yet visible
|
|
|
+ * to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
|
|
|
+ * to guarantee those are observed before the TLBI. Do be careful, 007.
|
|
|
+ */
|
|
|
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
|
|
|
__arm_smmu_tlb_sync(smmu);
|
|
|
}
|
|
@@ -1633,6 +1640,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
|
|
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
|
|
|
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
|
|
|
|
|
|
+ if (smmu_domain->non_strict)
|
|
|
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
|
|
|
+
|
|
|
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
|
|
|
if (!pgtbl_ops)
|
|
|
return -ENOMEM;
|
|
@@ -1934,15 +1944,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
|
|
{
|
|
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
|
|
|
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- switch (attr) {
|
|
|
- case DOMAIN_ATTR_NESTING:
|
|
|
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
|
|
- return 0;
|
|
|
+ switch (domain->type) {
|
|
|
+ case IOMMU_DOMAIN_UNMANAGED:
|
|
|
+ switch (attr) {
|
|
|
+ case DOMAIN_ATTR_NESTING:
|
|
|
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
|
|
+ return 0;
|
|
|
+ default:
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IOMMU_DOMAIN_DMA:
|
|
|
+ switch (attr) {
|
|
|
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
|
|
+ *(int *)data = smmu_domain->non_strict;
|
|
|
+ return 0;
|
|
|
+ default:
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+ break;
|
|
|
default:
|
|
|
- return -ENODEV;
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1952,26 +1974,37 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
|
|
int ret = 0;
|
|
|
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
|
|
|
|
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
mutex_lock(&smmu_domain->init_mutex);
|
|
|
|
|
|
- switch (attr) {
|
|
|
- case DOMAIN_ATTR_NESTING:
|
|
|
- if (smmu_domain->smmu) {
|
|
|
- ret = -EPERM;
|
|
|
- goto out_unlock;
|
|
|
+ switch (domain->type) {
|
|
|
+ case IOMMU_DOMAIN_UNMANAGED:
|
|
|
+ switch (attr) {
|
|
|
+ case DOMAIN_ATTR_NESTING:
|
|
|
+ if (smmu_domain->smmu) {
|
|
|
+ ret = -EPERM;
|
|
|
+ goto out_unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (*(int *)data)
|
|
|
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
|
|
+ else
|
|
|
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ ret = -ENODEV;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IOMMU_DOMAIN_DMA:
|
|
|
+ switch(attr) {
|
|
|
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
|
|
+ smmu_domain->non_strict = *(int *)data;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ ret = -ENODEV;
|
|
|
}
|
|
|
-
|
|
|
- if (*(int *)data)
|
|
|
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
|
|
- else
|
|
|
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
|
|
-
|
|
|
break;
|
|
|
default:
|
|
|
- ret = -ENODEV;
|
|
|
+ ret = -EINVAL;
|
|
|
}
|
|
|
|
|
|
out_unlock:
|