|
@@ -964,6 +964,32 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
|
|
return phys;
|
|
return phys;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int exynos_iommu_add_device(struct device *dev)
|
|
|
|
+{
|
|
|
|
+ struct iommu_group *group;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ group = iommu_group_get(dev);
|
|
|
|
+
|
|
|
|
+ if (!group) {
|
|
|
|
+ group = iommu_group_alloc();
|
|
|
|
+ if (IS_ERR(group)) {
|
|
|
|
+ dev_err(dev, "Failed to allocate IOMMU group\n");
|
|
|
|
+ return PTR_ERR(group);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ret = iommu_group_add_device(group, dev);
|
|
|
|
+ iommu_group_put(group);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void exynos_iommu_remove_device(struct device *dev)
|
|
|
|
+{
|
|
|
|
+ iommu_group_remove_device(dev);
|
|
|
|
+}
|
|
|
|
+
|
|
static struct iommu_ops exynos_iommu_ops = {
|
|
static struct iommu_ops exynos_iommu_ops = {
|
|
.domain_init = &exynos_iommu_domain_init,
|
|
.domain_init = &exynos_iommu_domain_init,
|
|
.domain_destroy = &exynos_iommu_domain_destroy,
|
|
.domain_destroy = &exynos_iommu_domain_destroy,
|
|
@@ -972,6 +998,8 @@ static struct iommu_ops exynos_iommu_ops = {
|
|
.map = &exynos_iommu_map,
|
|
.map = &exynos_iommu_map,
|
|
.unmap = &exynos_iommu_unmap,
|
|
.unmap = &exynos_iommu_unmap,
|
|
.iova_to_phys = &exynos_iommu_iova_to_phys,
|
|
.iova_to_phys = &exynos_iommu_iova_to_phys,
|
|
|
|
+ .add_device = &exynos_iommu_add_device,
|
|
|
|
+ .remove_device = &exynos_iommu_remove_device,
|
|
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
|
|
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
|
|
};
|
|
};
|
|
|
|
|