|
@@ -68,6 +68,12 @@ struct iommu_group_attribute {
|
|
|
const char *buf, size_t count);
|
|
|
};
|
|
|
|
|
|
+static const char * const iommu_group_resv_type_string[] = {
|
|
|
+ [IOMMU_RESV_DIRECT] = "direct",
|
|
|
+ [IOMMU_RESV_RESERVED] = "reserved",
|
|
|
+ [IOMMU_RESV_MSI] = "msi",
|
|
|
+};
|
|
|
+
|
|
|
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
|
|
|
struct iommu_group_attribute iommu_group_attr_##_name = \
|
|
|
__ATTR(_name, _mode, _show, _store)
|
|
@@ -133,8 +139,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
|
|
|
return sprintf(buf, "%s\n", group->name);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * iommu_insert_resv_region - Insert a new region in the
|
|
|
+ * list of reserved regions.
|
|
|
+ * @new: new region to insert
|
|
|
+ * @regions: list of regions
|
|
|
+ *
|
|
|
+ * The new element is sorted by address with respect to the other
|
|
|
+ * regions of the same type. In case it overlaps with another
|
|
|
+ * region of the same type, regions are merged. In case it
|
|
|
+ * overlaps with another region of different type, regions are
|
|
|
+ * not merged.
|
|
|
+ */
|
|
|
+static int iommu_insert_resv_region(struct iommu_resv_region *new,
|
|
|
+ struct list_head *regions)
|
|
|
+{
|
|
|
+ struct iommu_resv_region *region;
|
|
|
+ phys_addr_t start = new->start;
|
|
|
+ phys_addr_t end = new->start + new->length - 1;
|
|
|
+ struct list_head *pos = regions->next;
|
|
|
+
|
|
|
+ while (pos != regions) {
|
|
|
+ struct iommu_resv_region *entry =
|
|
|
+ list_entry(pos, struct iommu_resv_region, list);
|
|
|
+ phys_addr_t a = entry->start;
|
|
|
+ phys_addr_t b = entry->start + entry->length - 1;
|
|
|
+ int type = entry->type;
|
|
|
+
|
|
|
+ if (end < a) {
|
|
|
+ goto insert;
|
|
|
+ } else if (start > b) {
|
|
|
+ pos = pos->next;
|
|
|
+ } else if ((start >= a) && (end <= b)) {
|
|
|
+ if (new->type == type)
|
|
|
+ goto done;
|
|
|
+ else
|
|
|
+ pos = pos->next;
|
|
|
+ } else {
|
|
|
+ if (new->type == type) {
|
|
|
+ phys_addr_t new_start = min(a, start);
|
|
|
+ phys_addr_t new_end = max(b, end);
|
|
|
+
|
|
|
+ list_del(&entry->list);
|
|
|
+ entry->start = new_start;
|
|
|
+ entry->length = new_end - new_start + 1;
|
|
|
+ iommu_insert_resv_region(entry, regions);
|
|
|
+ } else {
|
|
|
+ pos = pos->next;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+insert:
|
|
|
+ region = iommu_alloc_resv_region(new->start, new->length,
|
|
|
+ new->prot, new->type);
|
|
|
+ if (!region)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ list_add_tail(®ion->list, pos);
|
|
|
+done:
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
|
|
|
+ struct list_head *group_resv_regions)
|
|
|
+{
|
|
|
+ struct iommu_resv_region *entry;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ list_for_each_entry(entry, dev_resv_regions, list) {
|
|
|
+ ret = iommu_insert_resv_region(entry, group_resv_regions);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int iommu_get_group_resv_regions(struct iommu_group *group,
|
|
|
+ struct list_head *head)
|
|
|
+{
|
|
|
+ struct iommu_device *device;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ mutex_lock(&group->mutex);
|
|
|
+ list_for_each_entry(device, &group->devices, list) {
|
|
|
+ struct list_head dev_resv_regions;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&dev_resv_regions);
|
|
|
+ iommu_get_resv_regions(device->dev, &dev_resv_regions);
|
|
|
+ ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
|
|
|
+ iommu_put_resv_regions(device->dev, &dev_resv_regions);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ mutex_unlock(&group->mutex);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
|
|
|
+
|
|
|
+static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
|
|
|
+ char *buf)
|
|
|
+{
|
|
|
+ struct iommu_resv_region *region, *next;
|
|
|
+ struct list_head group_resv_regions;
|
|
|
+ char *str = buf;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&group_resv_regions);
|
|
|
+ iommu_get_group_resv_regions(group, &group_resv_regions);
|
|
|
+
|
|
|
+ list_for_each_entry_safe(region, next, &group_resv_regions, list) {
|
|
|
+ str += sprintf(str, "0x%016llx 0x%016llx %s\n",
|
|
|
+ (long long int)region->start,
|
|
|
+ (long long int)(region->start +
|
|
|
+ region->length - 1),
|
|
|
+ iommu_group_resv_type_string[region->type]);
|
|
|
+ kfree(region);
|
|
|
+ }
|
|
|
+
|
|
|
+ return (str - buf);
|
|
|
+}
|
|
|
+
|
|
|
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
|
|
|
|
|
|
+static IOMMU_GROUP_ATTR(reserved_regions, 0444,
|
|
|
+ iommu_group_show_resv_regions, NULL);
|
|
|
+
|
|
|
static void iommu_group_release(struct kobject *kobj)
|
|
|
{
|
|
|
struct iommu_group *group = to_iommu_group(kobj);
|
|
@@ -212,6 +341,11 @@ struct iommu_group *iommu_group_alloc(void)
|
|
|
*/
|
|
|
kobject_put(&group->kobj);
|
|
|
|
|
|
+ ret = iommu_group_create_file(group,
|
|
|
+ &iommu_group_attr_reserved_regions);
|
|
|
+ if (ret)
|
|
|
+ return ERR_PTR(ret);
|
|
|
+
|
|
|
pr_debug("Allocated group %d\n", group->id);
|
|
|
|
|
|
return group;
|
|
@@ -318,7 +452,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
|
|
struct device *dev)
|
|
|
{
|
|
|
struct iommu_domain *domain = group->default_domain;
|
|
|
- struct iommu_dm_region *entry;
|
|
|
+ struct iommu_resv_region *entry;
|
|
|
struct list_head mappings;
|
|
|
unsigned long pg_size;
|
|
|
int ret = 0;
|
|
@@ -331,18 +465,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
|
|
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
|
|
|
INIT_LIST_HEAD(&mappings);
|
|
|
|
|
|
- iommu_get_dm_regions(dev, &mappings);
|
|
|
+ iommu_get_resv_regions(dev, &mappings);
|
|
|
|
|
|
/* We need to consider overlapping regions for different devices */
|
|
|
list_for_each_entry(entry, &mappings, list) {
|
|
|
dma_addr_t start, end, addr;
|
|
|
|
|
|
- if (domain->ops->apply_dm_region)
|
|
|
- domain->ops->apply_dm_region(dev, domain, entry);
|
|
|
+ if (domain->ops->apply_resv_region)
|
|
|
+ domain->ops->apply_resv_region(dev, domain, entry);
|
|
|
|
|
|
start = ALIGN(entry->start, pg_size);
|
|
|
end = ALIGN(entry->start + entry->length, pg_size);
|
|
|
|
|
|
+ if (entry->type != IOMMU_RESV_DIRECT)
|
|
|
+ continue;
|
|
|
+
|
|
|
for (addr = start; addr < end; addr += pg_size) {
|
|
|
phys_addr_t phys_addr;
|
|
|
|
|
@@ -358,7 +495,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- iommu_put_dm_regions(dev, &mappings);
|
|
|
+ iommu_put_resv_regions(dev, &mappings);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -1559,20 +1696,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
|
|
|
|
|
|
-void iommu_get_dm_regions(struct device *dev, struct list_head *list)
|
|
|
+void iommu_get_resv_regions(struct device *dev, struct list_head *list)
|
|
|
{
|
|
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
|
|
|
|
|
- if (ops && ops->get_dm_regions)
|
|
|
- ops->get_dm_regions(dev, list);
|
|
|
+ if (ops && ops->get_resv_regions)
|
|
|
+ ops->get_resv_regions(dev, list);
|
|
|
}
|
|
|
|
|
|
-void iommu_put_dm_regions(struct device *dev, struct list_head *list)
|
|
|
+void iommu_put_resv_regions(struct device *dev, struct list_head *list)
|
|
|
{
|
|
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
|
|
|
|
|
- if (ops && ops->put_dm_regions)
|
|
|
- ops->put_dm_regions(dev, list);
|
|
|
+ if (ops && ops->put_resv_regions)
|
|
|
+ ops->put_resv_regions(dev, list);
|
|
|
+}
|
|
|
+
|
|
|
+struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
|
|
|
+ size_t length,
|
|
|
+ int prot, int type)
|
|
|
+{
|
|
|
+ struct iommu_resv_region *region;
|
|
|
+
|
|
|
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
|
|
|
+ if (!region)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(®ion->list);
|
|
|
+ region->start = start;
|
|
|
+ region->length = length;
|
|
|
+ region->prot = prot;
|
|
|
+ region->type = type;
|
|
|
+ return region;
|
|
|
}
|
|
|
|
|
|
/* Request that a device is direct mapped by the IOMMU */
|