|
@@ -96,6 +96,19 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(of_get_dma_window);
|
|
EXPORT_SYMBOL_GPL(of_get_dma_window);
|
|
|
|
|
|
|
|
+static bool of_iommu_driver_present(struct device_node *np)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * If the IOMMU still isn't ready by the time we reach init, assume
|
|
|
|
+ * it never will be. We don't want to defer indefinitely, nor attempt
|
|
|
|
+ * to dereference __iommu_of_table after it's been freed.
|
|
|
|
+ */
|
|
|
|
+ if (system_state > SYSTEM_BOOTING)
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ return of_match_node(&__iommu_of_table, np);
|
|
|
|
+}
|
|
|
|
+
|
|
static const struct iommu_ops
|
|
static const struct iommu_ops
|
|
*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec)
|
|
*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec)
|
|
{
|
|
{
|
|
@@ -104,12 +117,20 @@ static const struct iommu_ops
|
|
int err;
|
|
int err;
|
|
|
|
|
|
ops = iommu_ops_from_fwnode(fwnode);
|
|
ops = iommu_ops_from_fwnode(fwnode);
|
|
- if (!ops || !ops->of_xlate)
|
|
|
|
|
|
+ if ((ops && !ops->of_xlate) ||
|
|
|
|
+ (!ops && !of_iommu_driver_present(iommu_spec->np)))
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
|
|
err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
|
|
if (err)
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
return ERR_PTR(err);
|
|
|
|
+ /*
|
|
|
|
+ * The otherwise-empty fwspec handily serves to indicate the specific
|
|
|
|
+ * IOMMU device we're waiting for, which will be useful if we ever get
|
|
|
|
+ * a proper probe-ordering dependency mechanism in future.
|
|
|
|
+ */
|
|
|
|
+ if (!ops)
|
|
|
|
+ return ERR_PTR(-EPROBE_DEFER);
|
|
|
|
|
|
err = ops->of_xlate(dev, iommu_spec);
|
|
err = ops->of_xlate(dev, iommu_spec);
|
|
if (err)
|
|
if (err)
|
|
@@ -186,14 +207,34 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
|
struct device_node *master_np)
|
|
struct device_node *master_np)
|
|
{
|
|
{
|
|
const struct iommu_ops *ops;
|
|
const struct iommu_ops *ops;
|
|
|
|
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
|
|
|
|
|
if (!master_np)
|
|
if (!master_np)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
|
|
+ if (fwspec) {
|
|
|
|
+ if (fwspec->ops)
|
|
|
|
+ return fwspec->ops;
|
|
|
|
+
|
|
|
|
+ /* In the deferred case, start again from scratch */
|
|
|
|
+ iommu_fwspec_free(dev);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (dev_is_pci(dev))
|
|
if (dev_is_pci(dev))
|
|
ops = of_pci_iommu_init(to_pci_dev(dev), master_np);
|
|
ops = of_pci_iommu_init(to_pci_dev(dev), master_np);
|
|
else
|
|
else
|
|
ops = of_platform_iommu_init(dev, master_np);
|
|
ops = of_platform_iommu_init(dev, master_np);
|
|
|
|
+ /*
|
|
|
|
+ * If we have reason to believe the IOMMU driver missed the initial
|
|
|
|
+ * add_device callback for dev, replay it to get things in order.
|
|
|
|
+ */
|
|
|
|
+ if (!IS_ERR_OR_NULL(ops) && ops->add_device &&
|
|
|
|
+ dev->bus && !dev->iommu_group) {
|
|
|
|
+ int err = ops->add_device(dev);
|
|
|
|
+
|
|
|
|
+ if (err)
|
|
|
|
+ ops = ERR_PTR(err);
|
|
|
|
+ }
|
|
|
|
|
|
return IS_ERR(ops) ? NULL : ops;
|
|
return IS_ERR(ops) ? NULL : ops;
|
|
}
|
|
}
|