|
@@ -3412,6 +3412,47 @@ static bool amd_iommu_capable(enum iommu_cap cap)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static void amd_iommu_get_dm_regions(struct device *dev,
|
|
|
+ struct list_head *head)
|
|
|
+{
|
|
|
+ struct unity_map_entry *entry;
|
|
|
+ u16 devid;
|
|
|
+
|
|
|
+ devid = get_device_id(dev);
|
|
|
+
|
|
|
+ list_for_each_entry(entry, &amd_iommu_unity_map, list) {
|
|
|
+ struct iommu_dm_region *region;
|
|
|
+
|
|
|
+ if (devid < entry->devid_start || devid > entry->devid_end)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
|
|
|
+ if (!region) {
|
|
|
+ pr_err("Out of memory allocating dm-regions for %s\n",
|
|
|
+ dev_name(dev));
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ region->start = entry->address_start;
|
|
|
+ region->length = entry->address_end - entry->address_start;
|
|
|
+ if (entry->prot & IOMMU_PROT_IR)
|
|
|
+ region->prot |= IOMMU_READ;
|
|
|
+ if (entry->prot & IOMMU_PROT_IW)
|
|
|
+ region->prot |= IOMMU_WRITE;
|
|
|
+
|
|
|
+ list_add_tail(®ion->list, head);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void amd_iommu_put_dm_regions(struct device *dev,
|
|
|
+ struct list_head *head)
|
|
|
+{
|
|
|
+ struct iommu_dm_region *entry, *next;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(entry, next, head, list)
|
|
|
+ kfree(entry);
|
|
|
+}
|
|
|
+
|
|
|
static const struct iommu_ops amd_iommu_ops = {
|
|
|
.capable = amd_iommu_capable,
|
|
|
.domain_alloc = amd_iommu_domain_alloc,
|
|
@@ -3422,6 +3463,8 @@ static const struct iommu_ops amd_iommu_ops = {
|
|
|
.unmap = amd_iommu_unmap,
|
|
|
.map_sg = default_iommu_map_sg,
|
|
|
.iova_to_phys = amd_iommu_iova_to_phys,
|
|
|
+ .get_dm_regions = amd_iommu_get_dm_regions,
|
|
|
+ .put_dm_regions = amd_iommu_put_dm_regions,
|
|
|
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
|
|
};
|
|
|
|