|
@@ -393,7 +393,7 @@ struct arm_smmu_domain {
|
|
|
struct arm_smmu_cfg root_cfg;
|
|
|
phys_addr_t output_mask;
|
|
|
|
|
|
- struct mutex lock;
|
|
|
+ spinlock_t lock;
|
|
|
};
|
|
|
|
|
|
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
|
|
@@ -901,7 +901,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
|
|
|
goto out_free_domain;
|
|
|
smmu_domain->root_cfg.pgd = pgd;
|
|
|
|
|
|
- mutex_init(&smmu_domain->lock);
|
|
|
+ spin_lock_init(&smmu_domain->lock);
|
|
|
domain->priv = smmu_domain;
|
|
|
return 0;
|
|
|
|
|
@@ -1138,7 +1138,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|
|
* Sanity check the domain. We don't currently support domains
|
|
|
* that cross between different SMMU chains.
|
|
|
*/
|
|
|
- mutex_lock(&smmu_domain->lock);
|
|
|
+ spin_lock(&smmu_domain->lock);
|
|
|
if (!smmu_domain->leaf_smmu) {
|
|
|
/* Now that we have a master, we can finalise the domain */
|
|
|
ret = arm_smmu_init_domain_context(domain, dev);
|
|
@@ -1153,7 +1153,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|
|
dev_name(device_smmu->dev));
|
|
|
goto err_unlock;
|
|
|
}
|
|
|
- mutex_unlock(&smmu_domain->lock);
|
|
|
+ spin_unlock(&smmu_domain->lock);
|
|
|
|
|
|
/* Looks ok, so add the device to the domain */
|
|
|
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
|
|
@@ -1163,7 +1163,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
|
|
return arm_smmu_domain_add_master(smmu_domain, master);
|
|
|
|
|
|
err_unlock:
|
|
|
- mutex_unlock(&smmu_domain->lock);
|
|
|
+ spin_unlock(&smmu_domain->lock);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1210,7 +1210,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
|
|
|
|
|
|
if (pmd_none(*pmd)) {
|
|
|
/* Allocate a new set of tables */
|
|
|
- pgtable_t table = alloc_page(PGALLOC_GFP);
|
|
|
+ pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
|
|
|
if (!table)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -1317,7 +1317,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
|
|
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
if (pud_none(*pud)) {
|
|
|
- pmd = pmd_alloc_one(NULL, addr);
|
|
|
+ pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
|
|
|
if (!pmd)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -1349,7 +1349,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
|
|
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
|
if (pgd_none(*pgd)) {
|
|
|
- pud = pud_alloc_one(NULL, addr);
|
|
|
+ pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
|
|
|
if (!pud)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -1403,7 +1403,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
|
|
if (paddr & ~output_mask)
|
|
|
return -ERANGE;
|
|
|
|
|
|
- mutex_lock(&smmu_domain->lock);
|
|
|
+ spin_lock(&smmu_domain->lock);
|
|
|
pgd += pgd_index(iova);
|
|
|
end = iova + size;
|
|
|
do {
|
|
@@ -1419,7 +1419,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
|
|
} while (pgd++, iova != end);
|
|
|
|
|
|
out_unlock:
|
|
|
- mutex_unlock(&smmu_domain->lock);
|
|
|
+ spin_unlock(&smmu_domain->lock);
|
|
|
|
|
|
/* Ensure new page tables are visible to the hardware walker */
|
|
|
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|