|
@@ -80,7 +80,7 @@
|
|
|
*/
|
|
|
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
|
|
|
|
|
|
-static DEFINE_RWLOCK(amd_iommu_devtable_lock);
|
|
|
+static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
|
|
|
static DEFINE_SPINLOCK(pd_bitmap_lock);
|
|
|
static DEFINE_SPINLOCK(iommu_table_lock);
|
|
|
|
|
@@ -2097,9 +2097,9 @@ static int attach_device(struct device *dev,
|
|
|
}
|
|
|
|
|
|
skip_ats_check:
|
|
|
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
|
|
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
|
|
ret = __attach_device(dev_data, domain);
|
|
|
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
|
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
|
|
|
|
/*
|
|
|
* We might boot into a crash-kernel here. The crashed kernel
|
|
@@ -2149,9 +2149,9 @@ static void detach_device(struct device *dev)
|
|
|
domain = dev_data->domain;
|
|
|
|
|
|
/* lock device table */
|
|
|
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
|
|
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
|
|
__detach_device(dev_data);
|
|
|
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
|
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
|
|
|
|
if (!dev_is_pci(dev))
|
|
|
return;
|
|
@@ -2814,7 +2814,7 @@ static void cleanup_domain(struct protection_domain *domain)
|
|
|
struct iommu_dev_data *entry;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
|
|
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
|
|
|
|
|
while (!list_empty(&domain->dev_list)) {
|
|
|
entry = list_first_entry(&domain->dev_list,
|
|
@@ -2822,7 +2822,7 @@ static void cleanup_domain(struct protection_domain *domain)
|
|
|
__detach_device(entry);
|
|
|
}
|
|
|
|
|
|
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
|
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void protection_domain_free(struct protection_domain *domain)
|