|
@@ -241,7 +241,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
|
|
|
retval = -EACCES;
|
|
|
else if (atomic_read(&dev->power.usage_count) > 0)
|
|
|
retval = -EAGAIN;
|
|
|
- else if (!pm_children_suspended(dev))
|
|
|
+ else if (!dev->power.ignore_children &&
|
|
|
+ atomic_read(&dev->power.child_count))
|
|
|
retval = -EBUSY;
|
|
|
|
|
|
/* Pending resume requests take precedence over suspends. */
|
|
@@ -515,7 +516,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|
|
|
|
|
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
|
|
|
|
|
|
- dev_pm_enable_wake_irq(dev);
|
|
|
+ dev_pm_enable_wake_irq_check(dev, true);
|
|
|
retval = rpm_callback(callback, dev);
|
|
|
if (retval)
|
|
|
goto fail;
|
|
@@ -554,7 +555,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|
|
return retval;
|
|
|
|
|
|
fail:
|
|
|
- dev_pm_disable_wake_irq(dev);
|
|
|
+ dev_pm_disable_wake_irq_check(dev);
|
|
|
__update_runtime_status(dev, RPM_ACTIVE);
|
|
|
dev->power.deferred_resume = false;
|
|
|
wake_up_all(&dev->power.wait_queue);
|
|
@@ -712,8 +713,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|
|
|
|
|
spin_lock(&parent->power.lock);
|
|
|
/*
|
|
|
- * We can resume if the parent's runtime PM is disabled or it
|
|
|
- * is set to ignore children.
|
|
|
+ * Resume the parent if it has runtime PM enabled and not been
|
|
|
+ * set to ignore its children.
|
|
|
*/
|
|
|
if (!parent->power.disable_depth
|
|
|
&& !parent->power.ignore_children) {
|
|
@@ -737,12 +738,12 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|
|
|
|
|
callback = RPM_GET_CALLBACK(dev, runtime_resume);
|
|
|
|
|
|
- dev_pm_disable_wake_irq(dev);
|
|
|
+ dev_pm_disable_wake_irq_check(dev);
|
|
|
retval = rpm_callback(callback, dev);
|
|
|
if (retval) {
|
|
|
__update_runtime_status(dev, RPM_SUSPENDED);
|
|
|
pm_runtime_cancel_pending(dev);
|
|
|
- dev_pm_enable_wake_irq(dev);
|
|
|
+ dev_pm_enable_wake_irq_check(dev, false);
|
|
|
} else {
|
|
|
no_callback:
|
|
|
__update_runtime_status(dev, RPM_ACTIVE);
|
|
@@ -1027,7 +1028,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
|
|
|
goto out_set;
|
|
|
|
|
|
if (status == RPM_SUSPENDED) {
|
|
|
- /* It always is possible to set the status to 'suspended'. */
|
|
|
+ /*
|
|
|
+ * It is invalid to suspend a device with an active child,
|
|
|
+ * unless it has been set to ignore its children.
|
|
|
+ */
|
|
|
+ if (!dev->power.ignore_children &&
|
|
|
+ atomic_read(&dev->power.child_count)) {
|
|
|
+ dev_err(dev, "runtime PM trying to suspend device but active child\n");
|
|
|
+ error = -EBUSY;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
if (parent) {
|
|
|
atomic_add_unless(&parent->power.child_count, -1, 0);
|
|
|
notify_parent = !parent->power.ignore_children;
|
|
@@ -1478,6 +1489,16 @@ int pm_runtime_force_suspend(struct device *dev)
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
|
|
|
+ /*
|
|
|
+ * Increase the runtime PM usage count for the device's parent, in case
|
|
|
+ * when we find the device being used when system suspend was invoked.
|
|
|
+ * This informs pm_runtime_force_resume() to resume the parent
|
|
|
+ * immediately, which is needed to be able to resume its children,
|
|
|
+ * when not deferring the resume to be managed via runtime PM.
|
|
|
+ */
|
|
|
+ if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
|
|
|
+ pm_runtime_get_noresume(dev->parent);
|
|
|
+
|
|
|
pm_runtime_set_suspended(dev);
|
|
|
return 0;
|
|
|
err:
|
|
@@ -1487,16 +1508,20 @@ err:
|
|
|
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
|
|
|
|
|
|
/**
|
|
|
- * pm_runtime_force_resume - Force a device into resume state.
|
|
|
+ * pm_runtime_force_resume - Force a device into resume state if needed.
|
|
|
* @dev: Device to resume.
|
|
|
*
|
|
|
* Prior invoking this function we expect the user to have brought the device
|
|
|
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
|
|
|
- * those actions and brings the device into full power. We update the runtime PM
|
|
|
- * status and re-enables runtime PM.
|
|
|
+ * those actions and brings the device into full power, if it is expected to be
|
|
|
+ * used on system resume. To distinguish that, we check whether the runtime PM
|
|
|
+ * usage count is greater than 1 (the PM core increases the usage count in the
|
|
|
+ * system PM prepare phase), as that indicates a real user (such as a subsystem,
|
|
|
+ * driver, userspace, etc.) is using it. If that is the case, the device is
|
|
|
+ * expected to be used on system resume as well, so then we resume it. In the
|
|
|
+ * other case, we defer the resume to be managed via runtime PM.
|
|
|
*
|
|
|
- * Typically this function may be invoked from a system resume callback to make
|
|
|
- * sure the device is put into full power state.
|
|
|
+ * Typically this function may be invoked from a system resume callback.
|
|
|
*/
|
|
|
int pm_runtime_force_resume(struct device *dev)
|
|
|
{
|
|
@@ -1513,6 +1538,17 @@ int pm_runtime_force_resume(struct device *dev)
|
|
|
if (!pm_runtime_status_suspended(dev))
|
|
|
goto out;
|
|
|
|
|
|
+ /*
|
|
|
+ * Decrease the parent's runtime PM usage count, if we increased it
|
|
|
+ * during system suspend in pm_runtime_force_suspend().
|
|
|
+ */
|
|
|
+ if (atomic_read(&dev->power.usage_count) > 1) {
|
|
|
+ if (dev->parent)
|
|
|
+ pm_runtime_put_noidle(dev->parent);
|
|
|
+ } else {
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
ret = pm_runtime_set_active(dev);
|
|
|
if (ret)
|
|
|
goto out;
|