|
@@ -3164,6 +3164,7 @@ static bool amd_iommu_capable(enum iommu_cap cap)
|
|
|
static void amd_iommu_get_resv_regions(struct device *dev,
|
|
|
struct list_head *head)
|
|
|
{
|
|
|
+ struct iommu_resv_region *region;
|
|
|
struct unity_map_entry *entry;
|
|
|
int devid;
|
|
|
|
|
@@ -3172,28 +3173,42 @@ static void amd_iommu_get_resv_regions(struct device *dev,
|
|
|
return;
|
|
|
|
|
|
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
|
|
|
- struct iommu_resv_region *region;
|
|
|
+ size_t length;
|
|
|
+ int prot = 0;
|
|
|
|
|
|
if (devid < entry->devid_start || devid > entry->devid_end)
|
|
|
continue;
|
|
|
|
|
|
- region = kzalloc(sizeof(*region), GFP_KERNEL);
|
|
|
+ length = entry->address_end - entry->address_start;
|
|
|
+ if (entry->prot & IOMMU_PROT_IR)
|
|
|
+ prot |= IOMMU_READ;
|
|
|
+ if (entry->prot & IOMMU_PROT_IW)
|
|
|
+ prot |= IOMMU_WRITE;
|
|
|
+
|
|
|
+ region = iommu_alloc_resv_region(entry->address_start,
|
|
|
+ length, prot,
|
|
|
+ IOMMU_RESV_DIRECT);
|
|
|
if (!region) {
|
|
|
pr_err("Out of memory allocating dm-regions for %s\n",
|
|
|
dev_name(dev));
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
- region->start = entry->address_start;
|
|
|
- region->length = entry->address_end - entry->address_start;
|
|
|
- region->type = IOMMU_RESV_DIRECT;
|
|
|
- if (entry->prot & IOMMU_PROT_IR)
|
|
|
- region->prot |= IOMMU_READ;
|
|
|
- if (entry->prot & IOMMU_PROT_IW)
|
|
|
- region->prot |= IOMMU_WRITE;
|
|
|
-
|
|
|
list_add_tail(®ion->list, head);
|
|
|
}
|
|
|
+
|
|
|
+ region = iommu_alloc_resv_region(MSI_RANGE_START,
|
|
|
+ MSI_RANGE_END - MSI_RANGE_START + 1,
|
|
|
+ 0, IOMMU_RESV_RESERVED);
|
|
|
+ if (!region)
|
|
|
+ return;
|
|
|
+ list_add_tail(®ion->list, head);
|
|
|
+
|
|
|
+ region = iommu_alloc_resv_region(HT_RANGE_START,
|
|
|
+ HT_RANGE_END - HT_RANGE_START + 1,
|
|
|
+ 0, IOMMU_RESV_RESERVED);
|
|
|
+ if (!region)
|
|
|
+ return;
|
|
|
+ list_add_tail(®ion->list, head);
|
|
|
}
|
|
|
|
|
|
static void amd_iommu_put_resv_regions(struct device *dev,
|