|
@@ -336,16 +336,7 @@ static void amba_device_release(struct device *dev)
|
|
kfree(d);
|
|
kfree(d);
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * amba_device_add - add a previously allocated AMBA device structure
|
|
|
|
- * @dev: AMBA device allocated by amba_device_alloc
|
|
|
|
- * @parent: resource parent for this devices resources
|
|
|
|
- *
|
|
|
|
- * Claim the resource, and read the device cell ID if not already
|
|
|
|
- * initialized. Register the AMBA device with the Linux device
|
|
|
|
- * manager.
|
|
|
|
- */
|
|
|
|
-int amba_device_add(struct amba_device *dev, struct resource *parent)
|
|
|
|
|
|
+static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
|
|
{
|
|
{
|
|
u32 size;
|
|
u32 size;
|
|
void __iomem *tmp;
|
|
void __iomem *tmp;
|
|
@@ -373,6 +364,12 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
|
|
goto err_release;
|
|
goto err_release;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ ret = dev_pm_domain_attach(&dev->dev, true);
|
|
|
|
+ if (ret == -EPROBE_DEFER) {
|
|
|
|
+ iounmap(tmp);
|
|
|
|
+ goto err_release;
|
|
|
|
+ }
|
|
|
|
+
|
|
ret = amba_get_enable_pclk(dev);
|
|
ret = amba_get_enable_pclk(dev);
|
|
if (ret == 0) {
|
|
if (ret == 0) {
|
|
u32 pid, cid;
|
|
u32 pid, cid;
|
|
@@ -398,6 +395,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
|
|
}
|
|
}
|
|
|
|
|
|
iounmap(tmp);
|
|
iounmap(tmp);
|
|
|
|
+ dev_pm_domain_detach(&dev->dev, true);
|
|
|
|
|
|
if (ret)
|
|
if (ret)
|
|
goto err_release;
|
|
goto err_release;
|
|
@@ -421,6 +419,88 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
|
|
err_out:
|
|
err_out:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Registration of AMBA device require reading its pid and cid registers.
|
|
|
|
+ * To do this, the device must be turned on (if it is a part of power domain)
|
|
|
|
+ * and have clocks enabled. However in some cases those resources might not be
|
|
|
|
+ * yet available. Returning EPROBE_DEFER is not a solution in such case,
|
|
|
|
+ * because callers don't handle this special error code. Instead such devices
|
|
|
|
+ * are added to the special list and their registration is retried from
|
|
|
|
+ * periodic worker, until all resources are available and registration succeeds.
|
|
|
|
+ */
|
|
|
|
+struct deferred_device {
|
|
|
|
+ struct amba_device *dev;
|
|
|
|
+ struct resource *parent;
|
|
|
|
+ struct list_head node;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static LIST_HEAD(deferred_devices);
|
|
|
|
+static DEFINE_MUTEX(deferred_devices_lock);
|
|
|
|
+
|
|
|
|
+static void amba_deferred_retry_func(struct work_struct *dummy);
|
|
|
|
+static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
|
|
|
|
+
|
|
|
|
+#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
|
|
|
|
+
|
|
|
|
+static void amba_deferred_retry_func(struct work_struct *dummy)
|
|
|
|
+{
|
|
|
|
+ struct deferred_device *ddev, *tmp;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&deferred_devices_lock);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) {
|
|
|
|
+ int ret = amba_device_try_add(ddev->dev, ddev->parent);
|
|
|
|
+
|
|
|
|
+ if (ret == -EPROBE_DEFER)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ list_del_init(&ddev->node);
|
|
|
|
+ kfree(ddev);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!list_empty(&deferred_devices))
|
|
|
|
+ schedule_delayed_work(&deferred_retry_work,
|
|
|
|
+ DEFERRED_DEVICE_TIMEOUT);
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&deferred_devices_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * amba_device_add - add a previously allocated AMBA device structure
|
|
|
|
+ * @dev: AMBA device allocated by amba_device_alloc
|
|
|
|
+ * @parent: resource parent for this devices resources
|
|
|
|
+ *
|
|
|
|
+ * Claim the resource, and read the device cell ID if not already
|
|
|
|
+ * initialized. Register the AMBA device with the Linux device
|
|
|
|
+ * manager.
|
|
|
|
+ */
|
|
|
|
+int amba_device_add(struct amba_device *dev, struct resource *parent)
|
|
|
|
+{
|
|
|
|
+ int ret = amba_device_try_add(dev, parent);
|
|
|
|
+
|
|
|
|
+ if (ret == -EPROBE_DEFER) {
|
|
|
|
+ struct deferred_device *ddev;
|
|
|
|
+
|
|
|
|
+ ddev = kmalloc(sizeof(*ddev), GFP_KERNEL);
|
|
|
|
+ if (!ddev)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ ddev->dev = dev;
|
|
|
|
+ ddev->parent = parent;
|
|
|
|
+ ret = 0;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&deferred_devices_lock);
|
|
|
|
+
|
|
|
|
+ if (list_empty(&deferred_devices))
|
|
|
|
+ schedule_delayed_work(&deferred_retry_work,
|
|
|
|
+ DEFERRED_DEVICE_TIMEOUT);
|
|
|
|
+ list_add_tail(&ddev->node, &deferred_devices);
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&deferred_devices_lock);
|
|
|
|
+ }
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
EXPORT_SYMBOL_GPL(amba_device_add);
|
|
EXPORT_SYMBOL_GPL(amba_device_add);
|
|
|
|
|
|
static struct amba_device *
|
|
static struct amba_device *
|