|
@@ -730,6 +730,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|
|
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
|
|
|
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
|
|
|
|
|
+ if (smmu->version > ARM_SMMU_V1) {
|
|
|
+ /*
|
|
|
+ * CBA2R.
|
|
|
+ * *Must* be initialised before CBAR thanks to VMID16
|
|
|
+ * architectural oversight affected some implementations.
|
|
|
+ */
|
|
|
+#ifdef CONFIG_64BIT
|
|
|
+ reg = CBA2R_RW64_64BIT;
|
|
|
+#else
|
|
|
+ reg = CBA2R_RW64_32BIT;
|
|
|
+#endif
|
|
|
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
|
|
|
+ }
|
|
|
+
|
|
|
/* CBAR */
|
|
|
reg = cfg->cbar;
|
|
|
if (smmu->version == ARM_SMMU_V1)
|
|
@@ -747,16 +761,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|
|
}
|
|
|
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
|
|
|
|
|
|
- if (smmu->version > ARM_SMMU_V1) {
|
|
|
- /* CBA2R */
|
|
|
-#ifdef CONFIG_64BIT
|
|
|
- reg = CBA2R_RW64_64BIT;
|
|
|
-#else
|
|
|
- reg = CBA2R_RW64_32BIT;
|
|
|
-#endif
|
|
|
- writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
|
|
|
- }
|
|
|
-
|
|
|
/* TTBRs */
|
|
|
if (stage1) {
|
|
|
reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
|
|
@@ -1326,61 +1330,83 @@ static void __arm_smmu_release_pci_iommudata(void *data)
|
|
|
kfree(data);
|
|
|
}
|
|
|
|
|
|
-static int arm_smmu_add_device(struct device *dev)
|
|
|
+static int arm_smmu_add_pci_device(struct pci_dev *pdev)
|
|
|
{
|
|
|
- struct arm_smmu_device *smmu;
|
|
|
- struct arm_smmu_master_cfg *cfg;
|
|
|
+ int i, ret;
|
|
|
+ u16 sid;
|
|
|
struct iommu_group *group;
|
|
|
- void (*releasefn)(void *) = NULL;
|
|
|
- int ret;
|
|
|
-
|
|
|
- smmu = find_smmu_for_device(dev);
|
|
|
- if (!smmu)
|
|
|
- return -ENODEV;
|
|
|
+ struct arm_smmu_master_cfg *cfg;
|
|
|
|
|
|
- group = iommu_group_alloc();
|
|
|
- if (IS_ERR(group)) {
|
|
|
- dev_err(dev, "Failed to allocate IOMMU group\n");
|
|
|
+ group = iommu_group_get_for_dev(&pdev->dev);
|
|
|
+ if (IS_ERR(group))
|
|
|
return PTR_ERR(group);
|
|
|
- }
|
|
|
-
|
|
|
- if (dev_is_pci(dev)) {
|
|
|
- struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
|
|
|
+ cfg = iommu_group_get_iommudata(group);
|
|
|
+ if (!cfg) {
|
|
|
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
|
|
|
if (!cfg) {
|
|
|
ret = -ENOMEM;
|
|
|
goto out_put_group;
|
|
|
}
|
|
|
|
|
|
- cfg->num_streamids = 1;
|
|
|
- /*
|
|
|
- * Assume Stream ID == Requester ID for now.
|
|
|
- * We need a way to describe the ID mappings in FDT.
|
|
|
- */
|
|
|
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
|
|
|
- &cfg->streamids[0]);
|
|
|
- releasefn = __arm_smmu_release_pci_iommudata;
|
|
|
- } else {
|
|
|
- struct arm_smmu_master *master;
|
|
|
-
|
|
|
- master = find_smmu_master(smmu, dev->of_node);
|
|
|
- if (!master) {
|
|
|
- ret = -ENODEV;
|
|
|
- goto out_put_group;
|
|
|
- }
|
|
|
+ iommu_group_set_iommudata(group, cfg,
|
|
|
+ __arm_smmu_release_pci_iommudata);
|
|
|
+ }
|
|
|
|
|
|
- cfg = &master->cfg;
|
|
|
+ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
|
|
|
+ ret = -ENOSPC;
|
|
|
+ goto out_put_group;
|
|
|
}
|
|
|
|
|
|
- iommu_group_set_iommudata(group, cfg, releasefn);
|
|
|
- ret = iommu_group_add_device(group, dev);
|
|
|
+ /*
|
|
|
+ * Assume Stream ID == Requester ID for now.
|
|
|
+ * We need a way to describe the ID mappings in FDT.
|
|
|
+ */
|
|
|
+ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
|
|
|
+ for (i = 0; i < cfg->num_streamids; ++i)
|
|
|
+ if (cfg->streamids[i] == sid)
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
|
|
|
+ if (i == cfg->num_streamids)
|
|
|
+ cfg->streamids[cfg->num_streamids++] = sid;
|
|
|
|
|
|
+ return 0;
|
|
|
out_put_group:
|
|
|
iommu_group_put(group);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int arm_smmu_add_platform_device(struct device *dev)
|
|
|
+{
|
|
|
+ struct iommu_group *group;
|
|
|
+ struct arm_smmu_master *master;
|
|
|
+ struct arm_smmu_device *smmu = find_smmu_for_device(dev);
|
|
|
+
|
|
|
+ if (!smmu)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ master = find_smmu_master(smmu, dev->of_node);
|
|
|
+ if (!master)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ /* No automatic group creation for platform devices */
|
|
|
+ group = iommu_group_alloc();
|
|
|
+ if (IS_ERR(group))
|
|
|
+ return PTR_ERR(group);
|
|
|
+
|
|
|
+ iommu_group_set_iommudata(group, &master->cfg, NULL);
|
|
|
+ return iommu_group_add_device(group, dev);
|
|
|
+}
|
|
|
+
|
|
|
+static int arm_smmu_add_device(struct device *dev)
|
|
|
+{
|
|
|
+ if (dev_is_pci(dev))
|
|
|
+ return arm_smmu_add_pci_device(to_pci_dev(dev));
|
|
|
+
|
|
|
+ return arm_smmu_add_platform_device(dev);
|
|
|
+}
|
|
|
+
|
|
|
static void arm_smmu_remove_device(struct device *dev)
|
|
|
{
|
|
|
iommu_group_remove_device(dev);
|
|
@@ -1630,6 +1656,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|
|
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
|
|
|
smmu->pa_size = size;
|
|
|
|
|
|
+ /*
|
|
|
+ * What the page table walker can address actually depends on which
|
|
|
+ * descriptor format is in use, but since a) we don't know that yet,
|
|
|
+ * and b) it can vary per context bank, this will have to do...
|
|
|
+ */
|
|
|
+ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
|
|
|
+ dev_warn(smmu->dev,
|
|
|
+ "failed to set DMA mask for table walker\n");
|
|
|
+
|
|
|
if (smmu->version == ARM_SMMU_V1) {
|
|
|
smmu->va_size = smmu->ipa_size;
|
|
|
size = SZ_4K | SZ_2M | SZ_1G;
|