|
@@ -611,24 +611,13 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
|
|
static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
|
|
|
{
|
|
|
struct arm_smmu_domain *smmu_domain = cookie;
|
|
|
- struct arm_smmu_device *smmu = smmu_domain->smmu;
|
|
|
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
|
|
|
-
|
|
|
|
|
|
- /* Ensure new page tables are visible to the hardware walker */
|
|
|
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
|
|
|
+ /*
|
|
|
+ * Ensure new page tables are visible to a coherent hardware walker.
|
|
|
+ * The page table code deals with flushing for the non-coherent case.
|
|
|
+ */
|
|
|
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
|
|
dsb(ishst);
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * If the SMMU can't walk tables in the CPU caches, treat them
|
|
|
- * like non-coherent DMA since we need to flush the new entries
|
|
|
- * all the way out to memory. There's no possibility of
|
|
|
- * recursion here as the SMMU table walker will not be wired
|
|
|
- * through another SMMU.
|
|
|
- */
|
|
|
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
|
|
|
- DMA_TO_DEVICE);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static struct iommu_gather_ops arm_smmu_gather_ops = {
|
|
@@ -899,6 +888,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
|
.ias = ias,
|
|
|
.oas = oas,
|
|
|
.tlb = &arm_smmu_gather_ops,
|
|
|
+ .iommu_dev = smmu->dev,
|
|
|
};
|
|
|
|
|
|
smmu_domain->smmu = smmu;
|