|
@@ -1969,6 +1969,7 @@ static struct musb *allocate_instance(struct device *dev,
|
|
|
INIT_LIST_HEAD(&musb->control);
|
|
|
INIT_LIST_HEAD(&musb->in_bulk);
|
|
|
INIT_LIST_HEAD(&musb->out_bulk);
|
|
|
+ INIT_LIST_HEAD(&musb->pending_list);
|
|
|
|
|
|
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
|
|
|
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
|
|
@@ -2018,6 +2019,84 @@ static void musb_free(struct musb *musb)
|
|
|
musb_host_free(musb);
|
|
|
}
|
|
|
|
|
|
+struct musb_pending_work {
|
|
|
+ int (*callback)(struct musb *musb, void *data);
|
|
|
+ void *data;
|
|
|
+ struct list_head node;
|
|
|
+};
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called from musb_runtime_resume(), musb_resume(), and
|
|
|
+ * musb_queue_resume_work(). Callers must take musb->lock.
|
|
|
+ */
|
|
|
+static int musb_run_resume_work(struct musb *musb)
|
|
|
+{
|
|
|
+ struct musb_pending_work *w, *_w;
|
|
|
+ unsigned long flags;
|
|
|
+ int error = 0;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&musb->list_lock, flags);
|
|
|
+ list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
|
|
|
+ if (w->callback) {
|
|
|
+ error = w->callback(musb, w->data);
|
|
|
+ if (error < 0) {
|
|
|
+ dev_err(musb->controller,
|
|
|
+ "resume callback %p failed: %i\n",
|
|
|
+ w->callback, error);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ list_del(&w->node);
|
|
|
+ devm_kfree(musb->controller, w);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&musb->list_lock, flags);
|
|
|
+
|
|
|
+ return error;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called to run work if device is active or else queue the work to happen
|
|
|
+ * on resume. Caller must take musb->lock and must hold an RPM reference.
|
|
|
+ *
|
|
|
+ * Note that we cowardly refuse queuing work after musb PM runtime
|
|
|
+ * resume is done calling musb_run_resume_work() and return -EINPROGRESS
|
|
|
+ * instead.
|
|
|
+ */
|
|
|
+int musb_queue_resume_work(struct musb *musb,
|
|
|
+ int (*callback)(struct musb *musb, void *data),
|
|
|
+ void *data)
|
|
|
+{
|
|
|
+ struct musb_pending_work *w;
|
|
|
+ unsigned long flags;
|
|
|
+ int error;
|
|
|
+
|
|
|
+ if (WARN_ON(!callback))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (pm_runtime_active(musb->controller))
|
|
|
+ return callback(musb, data);
|
|
|
+
|
|
|
+ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
|
|
|
+ if (!w)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ w->callback = callback;
|
|
|
+ w->data = data;
|
|
|
+ spin_lock_irqsave(&musb->list_lock, flags);
|
|
|
+ if (musb->is_runtime_suspended) {
|
|
|
+ list_add_tail(&w->node, &musb->pending_list);
|
|
|
+ error = 0;
|
|
|
+ } else {
|
|
|
+ dev_err(musb->controller, "could not add resume work %p\n",
|
|
|
+ callback);
|
|
|
+ devm_kfree(musb->controller, w);
|
|
|
+ error = -EINPROGRESS;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&musb->list_lock, flags);
|
|
|
+
|
|
|
+ return error;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(musb_queue_resume_work);
|
|
|
+
|
|
|
static void musb_deassert_reset(struct work_struct *work)
|
|
|
{
|
|
|
struct musb *musb;
|
|
@@ -2065,6 +2144,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
|
|
|
}
|
|
|
|
|
|
spin_lock_init(&musb->lock);
|
|
|
+ spin_lock_init(&musb->list_lock);
|
|
|
musb->board_set_power = plat->set_power;
|
|
|
musb->min_power = plat->min_power;
|
|
|
musb->ops = plat->platform_ops;
|
|
@@ -2558,6 +2638,7 @@ static int musb_suspend(struct device *dev)
|
|
|
|
|
|
musb_platform_disable(musb);
|
|
|
musb_generic_disable(musb);
|
|
|
+ WARN_ON(!list_empty(&musb->pending_list));
|
|
|
|
|
|
spin_lock_irqsave(&musb->lock, flags);
|
|
|
|
|
@@ -2579,9 +2660,11 @@ static int musb_suspend(struct device *dev)
|
|
|
|
|
|
static int musb_resume(struct device *dev)
|
|
|
{
|
|
|
- struct musb *musb = dev_to_musb(dev);
|
|
|
- u8 devctl;
|
|
|
- u8 mask;
|
|
|
+ struct musb *musb = dev_to_musb(dev);
|
|
|
+ unsigned long flags;
|
|
|
+ int error;
|
|
|
+ u8 devctl;
|
|
|
+ u8 mask;
|
|
|
|
|
|
/*
|
|
|
* For static cmos like DaVinci, register values were preserved
|
|
@@ -2615,6 +2698,13 @@ static int musb_resume(struct device *dev)
|
|
|
|
|
|
musb_start(musb);
|
|
|
|
|
|
+ spin_lock_irqsave(&musb->lock, flags);
|
|
|
+ error = musb_run_resume_work(musb);
|
|
|
+ if (error)
|
|
|
+ dev_err(musb->controller, "resume work failed with %i\n",
|
|
|
+ error);
|
|
|
+ spin_unlock_irqrestore(&musb->lock, flags);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2623,13 +2713,16 @@ static int musb_runtime_suspend(struct device *dev)
|
|
|
struct musb *musb = dev_to_musb(dev);
|
|
|
|
|
|
musb_save_context(musb);
|
|
|
+ musb->is_runtime_suspended = 1;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static int musb_runtime_resume(struct device *dev)
|
|
|
{
|
|
|
- struct musb *musb = dev_to_musb(dev);
|
|
|
+ struct musb *musb = dev_to_musb(dev);
|
|
|
+ unsigned long flags;
|
|
|
+ int error;
|
|
|
|
|
|
/*
|
|
|
* When pm_runtime_get_sync called for the first time in driver
|
|
@@ -2651,6 +2744,14 @@ static int musb_runtime_resume(struct device *dev)
|
|
|
msecs_to_jiffies(USB_RESUME_TIMEOUT));
|
|
|
}
|
|
|
|
|
|
+ spin_lock_irqsave(&musb->lock, flags);
|
|
|
+ error = musb_run_resume_work(musb);
|
|
|
+ if (error)
|
|
|
+ dev_err(musb->controller, "resume work failed with %i\n",
|
|
|
+ error);
|
|
|
+ musb->is_runtime_suspended = 0;
|
|
|
+ spin_unlock_irqrestore(&musb->lock, flags);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|