Browse Source

Merge branches 'pm-runtime' and 'pm-sleep'

* pm-runtime:
  PM / Runtime: Update runtime_idle() documentation for return value meaning

* pm-sleep:
  PM / sleep: Correct whitespace errors in <linux/pm.h>
  PM: Add missing "freeze" state
  PM / Hibernate: Spelling s/anonymouns/anonymous/
  PM / Runtime: Add missing "it" in comment
  PM / suspend: Remove unnecessary !!
  PCI / PM: Resume runtime-suspended devices later during system suspend
  ACPI / PM: Resume runtime-suspended devices later during system suspend
  PM / sleep: Set pm_generic functions to NULL for !CONFIG_PM_SLEEP
  PM: fix typo in comment
  PM / hibernate: use name_to_dev_t to parse resume
  PM / wakeup: Include appropriate header file in kernel/power/wakelock.c
  PM / sleep: Move prototype declaration to header file kernel/power/power.h
  PM / sleep: Asynchronous threads for suspend_late
  PM / sleep: Asynchronous threads for suspend_noirq
  PM / sleep: Asynchronous threads for resume_early
  PM / sleep: Asynchronous threads for resume_noirq
  PM / sleep: Two flags for async suspend_noirq and suspend_late
Rafael J. Wysocki 11 years ago
parent
commit
36cc86e8ec

+ 3 - 2
Documentation/ABI/testing/sysfs-power

@@ -12,8 +12,9 @@ Contact:	Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
 Description:
 		The /sys/power/state file controls the system power state.
 		The /sys/power/state file controls the system power state.
 		Reading from this file returns what states are supported,
 		Reading from this file returns what states are supported,
-		which is hard-coded to 'standby' (Power-On Suspend), 'mem'
-		(Suspend-to-RAM), and 'disk' (Suspend-to-Disk).
+		which is hard-coded to 'freeze' (Low-Power Idle), 'standby'
+		(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
+		(Suspend-to-Disk).
 
 
 		Writing to this file one of these strings causes the system to
 		Writing to this file one of these strings causes the system to
 		transition into that state. Please see the file
 		transition into that state. Please see the file

+ 38 - 3
drivers/acpi/device_pm.c

@@ -901,14 +901,29 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
 int acpi_subsys_prepare(struct device *dev)
 int acpi_subsys_prepare(struct device *dev)
 {
 {
 	/*
 	/*
-	 * Follow PCI and resume devices suspended at run time before running
-	 * their system suspend callbacks.
+	 * Devices having power.ignore_children set may still be necessary for
+	 * suspending their children in the next phase of device suspend.
 	 */
 	 */
-	pm_runtime_resume(dev);
+	if (dev->power.ignore_children)
+		pm_runtime_resume(dev);
+
 	return pm_generic_prepare(dev);
 	return pm_generic_prepare(dev);
 }
 }
 EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
 EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
 
 
+/**
+ * acpi_subsys_suspend - Run the device driver's suspend callback.
+ * @dev: Device to handle.
+ *
+ * Follow PCI and resume devices suspended at run time before running their
+ * system suspend callbacks.
+ */
+int acpi_subsys_suspend(struct device *dev)
+{
+	pm_runtime_resume(dev);
+	return pm_generic_suspend(dev);
+}
+
 /**
 /**
  * acpi_subsys_suspend_late - Suspend device using ACPI.
  * acpi_subsys_suspend_late - Suspend device using ACPI.
  * @dev: Device to suspend.
  * @dev: Device to suspend.
@@ -937,6 +952,23 @@ int acpi_subsys_resume_early(struct device *dev)
 	return ret ? ret : pm_generic_resume_early(dev);
 	return ret ? ret : pm_generic_resume_early(dev);
 }
 }
 EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
 EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+
+/**
+ * acpi_subsys_freeze - Run the device driver's freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze(struct device *dev)
+{
+	/*
+	 * This used to be done in acpi_subsys_prepare() for all devices and
+	 * some drivers may depend on it, so do it here.  Ideally, however,
+	 * runtime-suspended devices should not be touched during freeze/thaw
+	 * transitions.
+	 */
+	pm_runtime_resume(dev);
+	return pm_generic_freeze(dev);
+}
+
 #endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM_SLEEP */
 
 
 static struct dev_pm_domain acpi_general_pm_domain = {
 static struct dev_pm_domain acpi_general_pm_domain = {
@@ -947,8 +979,11 @@ static struct dev_pm_domain acpi_general_pm_domain = {
 #endif
 #endif
 #ifdef CONFIG_PM_SLEEP
 #ifdef CONFIG_PM_SLEEP
 		.prepare = acpi_subsys_prepare,
 		.prepare = acpi_subsys_prepare,
+		.suspend = acpi_subsys_suspend,
 		.suspend_late = acpi_subsys_suspend_late,
 		.suspend_late = acpi_subsys_suspend_late,
 		.resume_early = acpi_subsys_resume_early,
 		.resume_early = acpi_subsys_resume_early,
+		.freeze = acpi_subsys_freeze,
+		.poweroff = acpi_subsys_suspend,
 		.poweroff_late = acpi_subsys_suspend_late,
 		.poweroff_late = acpi_subsys_suspend_late,
 		.restore_early = acpi_subsys_resume_early,
 		.restore_early = acpi_subsys_resume_early,
 #endif
 #endif

+ 225 - 50
drivers/base/power/main.c

@@ -91,6 +91,8 @@ void device_pm_sleep_init(struct device *dev)
 {
 {
 	dev->power.is_prepared = false;
 	dev->power.is_prepared = false;
 	dev->power.is_suspended = false;
 	dev->power.is_suspended = false;
+	dev->power.is_noirq_suspended = false;
+	dev->power.is_late_suspended = false;
 	init_completion(&dev->power.completion);
 	init_completion(&dev->power.completion);
 	complete_all(&dev->power.completion);
 	complete_all(&dev->power.completion);
 	dev->power.wakeup = NULL;
 	dev->power.wakeup = NULL;
@@ -467,7 +469,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
  * The driver of @dev will not receive interrupts while this function is being
  * The driver of @dev will not receive interrupts while this function is being
  * executed.
  * executed.
  */
  */
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 {
 {
 	pm_callback_t callback = NULL;
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	char *info = NULL;
@@ -479,6 +481,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
 	if (dev->power.syscore)
 	if (dev->power.syscore)
 		goto Out;
 		goto Out;
 
 
+	if (!dev->power.is_noirq_suspended)
+		goto Out;
+
+	dpm_wait(dev->parent, async);
+
 	if (dev->pm_domain) {
 	if (dev->pm_domain) {
 		info = "noirq power domain ";
 		info = "noirq power domain ";
 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +506,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
 	}
 	}
 
 
 	error = dpm_run_callback(callback, dev, state, info);
 	error = dpm_run_callback(callback, dev, state, info);
+	dev->power.is_noirq_suspended = false;
 
 
  Out:
  Out:
+	complete_all(&dev->power.completion);
 	TRACE_RESUME(error);
 	TRACE_RESUME(error);
 	return error;
 	return error;
 }
 }
 
 
+static bool is_async(struct device *dev)
+{
+	return dev->power.async_suspend && pm_async_enabled
+		&& !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = device_resume_noirq(dev, pm_transition, true);
+	if (error)
+		pm_dev_err(dev, pm_transition, " async", error);
+
+	put_device(dev);
+}
+
 /**
 /**
  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  * @state: PM transition of the system being carried out.
  * @state: PM transition of the system being carried out.
@@ -514,29 +541,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
  */
  */
 static void dpm_resume_noirq(pm_message_t state)
 static void dpm_resume_noirq(pm_message_t state)
 {
 {
+	struct device *dev;
 	ktime_t starttime = ktime_get();
 	ktime_t starttime = ktime_get();
 
 
 	mutex_lock(&dpm_list_mtx);
 	mutex_lock(&dpm_list_mtx);
-	while (!list_empty(&dpm_noirq_list)) {
-		struct device *dev = to_device(dpm_noirq_list.next);
-		int error;
+	pm_transition = state;
 
 
+	/*
+	 * Advanced the async threads upfront,
+	 * in case the starting of async threads is
+	 * delayed by non-async resuming devices.
+	 */
+	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+		reinit_completion(&dev->power.completion);
+		if (is_async(dev)) {
+			get_device(dev);
+			async_schedule(async_resume_noirq, dev);
+		}
+	}
+
+	while (!list_empty(&dpm_noirq_list)) {
+		dev = to_device(dpm_noirq_list.next);
 		get_device(dev);
 		get_device(dev);
 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
 		mutex_unlock(&dpm_list_mtx);
 		mutex_unlock(&dpm_list_mtx);
 
 
-		error = device_resume_noirq(dev, state);
-		if (error) {
-			suspend_stats.failed_resume_noirq++;
-			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
-			dpm_save_failed_dev(dev_name(dev));
-			pm_dev_err(dev, state, " noirq", error);
+		if (!is_async(dev)) {
+			int error;
+
+			error = device_resume_noirq(dev, state, false);
+			if (error) {
+				suspend_stats.failed_resume_noirq++;
+				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+				dpm_save_failed_dev(dev_name(dev));
+				pm_dev_err(dev, state, " noirq", error);
+			}
 		}
 		}
 
 
 		mutex_lock(&dpm_list_mtx);
 		mutex_lock(&dpm_list_mtx);
 		put_device(dev);
 		put_device(dev);
 	}
 	}
 	mutex_unlock(&dpm_list_mtx);
 	mutex_unlock(&dpm_list_mtx);
+	async_synchronize_full();
 	dpm_show_time(starttime, state, "noirq");
 	dpm_show_time(starttime, state, "noirq");
 	resume_device_irqs();
 	resume_device_irqs();
 	cpuidle_resume();
 	cpuidle_resume();
@@ -549,7 +595,7 @@ static void dpm_resume_noirq(pm_message_t state)
  *
  *
  * Runtime PM is disabled for @dev while this function is being executed.
  * Runtime PM is disabled for @dev while this function is being executed.
  */
  */
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 {
 {
 	pm_callback_t callback = NULL;
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	char *info = NULL;
@@ -561,6 +607,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
 	if (dev->power.syscore)
 	if (dev->power.syscore)
 		goto Out;
 		goto Out;
 
 
+	if (!dev->power.is_late_suspended)
+		goto Out;
+
+	dpm_wait(dev->parent, async);
+
 	if (dev->pm_domain) {
 	if (dev->pm_domain) {
 		info = "early power domain ";
 		info = "early power domain ";
 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +632,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
 	}
 	}
 
 
 	error = dpm_run_callback(callback, dev, state, info);
 	error = dpm_run_callback(callback, dev, state, info);
+	dev->power.is_late_suspended = false;
 
 
  Out:
  Out:
 	TRACE_RESUME(error);
 	TRACE_RESUME(error);
 
 
 	pm_runtime_enable(dev);
 	pm_runtime_enable(dev);
+	complete_all(&dev->power.completion);
 	return error;
 	return error;
 }
 }
 
 
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = device_resume_early(dev, pm_transition, true);
+	if (error)
+		pm_dev_err(dev, pm_transition, " async", error);
+
+	put_device(dev);
+}
+
 /**
 /**
  * dpm_resume_early - Execute "early resume" callbacks for all devices.
  * dpm_resume_early - Execute "early resume" callbacks for all devices.
  * @state: PM transition of the system being carried out.
  * @state: PM transition of the system being carried out.
  */
  */
 static void dpm_resume_early(pm_message_t state)
 static void dpm_resume_early(pm_message_t state)
 {
 {
+	struct device *dev;
 	ktime_t starttime = ktime_get();
 	ktime_t starttime = ktime_get();
 
 
 	mutex_lock(&dpm_list_mtx);
 	mutex_lock(&dpm_list_mtx);
-	while (!list_empty(&dpm_late_early_list)) {
-		struct device *dev = to_device(dpm_late_early_list.next);
-		int error;
+	pm_transition = state;
+
+	/*
+	 * Advanced the async threads upfront,
+	 * in case the starting of async threads is
+	 * delayed by non-async resuming devices.
+	 */
+	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+		reinit_completion(&dev->power.completion);
+		if (is_async(dev)) {
+			get_device(dev);
+			async_schedule(async_resume_early, dev);
+		}
+	}
 
 
+	while (!list_empty(&dpm_late_early_list)) {
+		dev = to_device(dpm_late_early_list.next);
 		get_device(dev);
 		get_device(dev);
 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
 		mutex_unlock(&dpm_list_mtx);
 		mutex_unlock(&dpm_list_mtx);
 
 
-		error = device_resume_early(dev, state);
-		if (error) {
-			suspend_stats.failed_resume_early++;
-			dpm_save_failed_step(SUSPEND_RESUME_EARLY);
-			dpm_save_failed_dev(dev_name(dev));
-			pm_dev_err(dev, state, " early", error);
-		}
+		if (!is_async(dev)) {
+			int error;
 
 
+			error = device_resume_early(dev, state, false);
+			if (error) {
+				suspend_stats.failed_resume_early++;
+				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+				dpm_save_failed_dev(dev_name(dev));
+				pm_dev_err(dev, state, " early", error);
+			}
+		}
 		mutex_lock(&dpm_list_mtx);
 		mutex_lock(&dpm_list_mtx);
 		put_device(dev);
 		put_device(dev);
 	}
 	}
 	mutex_unlock(&dpm_list_mtx);
 	mutex_unlock(&dpm_list_mtx);
+	async_synchronize_full();
 	dpm_show_time(starttime, state, "early");
 	dpm_show_time(starttime, state, "early");
 }
 }
 
 
@@ -732,12 +815,6 @@ static void async_resume(void *data, async_cookie_t cookie)
 	put_device(dev);
 	put_device(dev);
 }
 }
 
 
-static bool is_async(struct device *dev)
-{
-	return dev->power.async_suspend && pm_async_enabled
-		&& !pm_trace_is_enabled();
-}
-
 /**
 /**
  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  * @state: PM transition of the system being carried out.
  * @state: PM transition of the system being carried out.
@@ -913,13 +990,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
  * The driver of @dev will not receive interrupts while this function is being
  * The driver of @dev will not receive interrupts while this function is being
  * executed.
  * executed.
  */
  */
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
 {
 {
 	pm_callback_t callback = NULL;
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	char *info = NULL;
+	int error = 0;
+
+	if (async_error)
+		goto Complete;
+
+	if (pm_wakeup_pending()) {
+		async_error = -EBUSY;
+		goto Complete;
+	}
 
 
 	if (dev->power.syscore)
 	if (dev->power.syscore)
-		return 0;
+		goto Complete;
+
+	dpm_wait_for_children(dev, async);
 
 
 	if (dev->pm_domain) {
 	if (dev->pm_domain) {
 		info = "noirq power domain ";
 		info = "noirq power domain ";
@@ -940,7 +1028,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
 		callback = pm_noirq_op(dev->driver->pm, state);
 		callback = pm_noirq_op(dev->driver->pm, state);
 	}
 	}
 
 
-	return dpm_run_callback(callback, dev, state, info);
+	error = dpm_run_callback(callback, dev, state, info);
+	if (!error)
+		dev->power.is_noirq_suspended = true;
+	else
+		async_error = error;
+
+Complete:
+	complete_all(&dev->power.completion);
+	return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = __device_suspend_noirq(dev, pm_transition, true);
+	if (error) {
+		dpm_save_failed_dev(dev_name(dev));
+		pm_dev_err(dev, pm_transition, " async", error);
+	}
+
+	put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+	reinit_completion(&dev->power.completion);
+
+	if (pm_async_enabled && dev->power.async_suspend) {
+		get_device(dev);
+		async_schedule(async_suspend_noirq, dev);
+		return 0;
+	}
+	return __device_suspend_noirq(dev, pm_transition, false);
 }
 }
 
 
 /**
 /**
@@ -958,19 +1080,20 @@ static int dpm_suspend_noirq(pm_message_t state)
 	cpuidle_pause();
 	cpuidle_pause();
 	suspend_device_irqs();
 	suspend_device_irqs();
 	mutex_lock(&dpm_list_mtx);
 	mutex_lock(&dpm_list_mtx);
+	pm_transition = state;
+	async_error = 0;
+
 	while (!list_empty(&dpm_late_early_list)) {
 	while (!list_empty(&dpm_late_early_list)) {
 		struct device *dev = to_device(dpm_late_early_list.prev);
 		struct device *dev = to_device(dpm_late_early_list.prev);
 
 
 		get_device(dev);
 		get_device(dev);
 		mutex_unlock(&dpm_list_mtx);
 		mutex_unlock(&dpm_list_mtx);
 
 
-		error = device_suspend_noirq(dev, state);
+		error = device_suspend_noirq(dev);
 
 
 		mutex_lock(&dpm_list_mtx);
 		mutex_lock(&dpm_list_mtx);
 		if (error) {
 		if (error) {
 			pm_dev_err(dev, state, " noirq", error);
 			pm_dev_err(dev, state, " noirq", error);
-			suspend_stats.failed_suspend_noirq++;
-			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
 			dpm_save_failed_dev(dev_name(dev));
 			dpm_save_failed_dev(dev_name(dev));
 			put_device(dev);
 			put_device(dev);
 			break;
 			break;
@@ -979,16 +1102,21 @@ static int dpm_suspend_noirq(pm_message_t state)
 			list_move(&dev->power.entry, &dpm_noirq_list);
 			list_move(&dev->power.entry, &dpm_noirq_list);
 		put_device(dev);
 		put_device(dev);
 
 
-		if (pm_wakeup_pending()) {
-			error = -EBUSY;
+		if (async_error)
 			break;
 			break;
-		}
 	}
 	}
 	mutex_unlock(&dpm_list_mtx);
 	mutex_unlock(&dpm_list_mtx);
-	if (error)
+	async_synchronize_full();
+	if (!error)
+		error = async_error;
+
+	if (error) {
+		suspend_stats.failed_suspend_noirq++;
+		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
 		dpm_resume_noirq(resume_event(state));
 		dpm_resume_noirq(resume_event(state));
-	else
+	} else {
 		dpm_show_time(starttime, state, "noirq");
 		dpm_show_time(starttime, state, "noirq");
+	}
 	return error;
 	return error;
 }
 }
 
 
@@ -999,15 +1127,26 @@ static int dpm_suspend_noirq(pm_message_t state)
  *
  *
  * Runtime PM is disabled for @dev while this function is being executed.
  * Runtime PM is disabled for @dev while this function is being executed.
  */
  */
-static int device_suspend_late(struct device *dev, pm_message_t state)
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
 {
 {
 	pm_callback_t callback = NULL;
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	char *info = NULL;
+	int error = 0;
 
 
 	__pm_runtime_disable(dev, false);
 	__pm_runtime_disable(dev, false);
 
 
+	if (async_error)
+		goto Complete;
+
+	if (pm_wakeup_pending()) {
+		async_error = -EBUSY;
+		goto Complete;
+	}
+
 	if (dev->power.syscore)
 	if (dev->power.syscore)
-		return 0;
+		goto Complete;
+
+	dpm_wait_for_children(dev, async);
 
 
 	if (dev->pm_domain) {
 	if (dev->pm_domain) {
 		info = "late power domain ";
 		info = "late power domain ";
@@ -1028,7 +1167,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
 		callback = pm_late_early_op(dev->driver->pm, state);
 		callback = pm_late_early_op(dev->driver->pm, state);
 	}
 	}
 
 
-	return dpm_run_callback(callback, dev, state, info);
+	error = dpm_run_callback(callback, dev, state, info);
+	if (!error)
+		dev->power.is_late_suspended = true;
+	else
+		async_error = error;
+
+Complete:
+	complete_all(&dev->power.completion);
+	return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = __device_suspend_late(dev, pm_transition, true);
+	if (error) {
+		dpm_save_failed_dev(dev_name(dev));
+		pm_dev_err(dev, pm_transition, " async", error);
+	}
+	put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+	reinit_completion(&dev->power.completion);
+
+	if (pm_async_enabled && dev->power.async_suspend) {
+		get_device(dev);
+		async_schedule(async_suspend_late, dev);
+		return 0;
+	}
+
+	return __device_suspend_late(dev, pm_transition, false);
 }
 }
 
 
 /**
 /**
@@ -1041,19 +1214,20 @@ static int dpm_suspend_late(pm_message_t state)
 	int error = 0;
 	int error = 0;
 
 
 	mutex_lock(&dpm_list_mtx);
 	mutex_lock(&dpm_list_mtx);
+	pm_transition = state;
+	async_error = 0;
+
 	while (!list_empty(&dpm_suspended_list)) {
 	while (!list_empty(&dpm_suspended_list)) {
 		struct device *dev = to_device(dpm_suspended_list.prev);
 		struct device *dev = to_device(dpm_suspended_list.prev);
 
 
 		get_device(dev);
 		get_device(dev);
 		mutex_unlock(&dpm_list_mtx);
 		mutex_unlock(&dpm_list_mtx);
 
 
-		error = device_suspend_late(dev, state);
+		error = device_suspend_late(dev);
 
 
 		mutex_lock(&dpm_list_mtx);
 		mutex_lock(&dpm_list_mtx);
 		if (error) {
 		if (error) {
 			pm_dev_err(dev, state, " late", error);
 			pm_dev_err(dev, state, " late", error);
-			suspend_stats.failed_suspend_late++;
-			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
 			dpm_save_failed_dev(dev_name(dev));
 			dpm_save_failed_dev(dev_name(dev));
 			put_device(dev);
 			put_device(dev);
 			break;
 			break;
@@ -1062,17 +1236,18 @@ static int dpm_suspend_late(pm_message_t state)
 			list_move(&dev->power.entry, &dpm_late_early_list);
 			list_move(&dev->power.entry, &dpm_late_early_list);
 		put_device(dev);
 		put_device(dev);
 
 
-		if (pm_wakeup_pending()) {
-			error = -EBUSY;
+		if (async_error)
 			break;
 			break;
-		}
 	}
 	}
 	mutex_unlock(&dpm_list_mtx);
 	mutex_unlock(&dpm_list_mtx);
-	if (error)
+	async_synchronize_full();
+	if (error) {
+		suspend_stats.failed_suspend_late++;
+		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
 		dpm_resume_early(resume_event(state));
 		dpm_resume_early(resume_event(state));
-	else
+	} else {
 		dpm_show_time(starttime, state, "late");
 		dpm_show_time(starttime, state, "late");
-
+	}
 	return error;
 	return error;
 }
 }
 
 

+ 1 - 1
drivers/base/power/runtime.c

@@ -1131,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
  * @dev: Device to handle.
  * @dev: Device to handle.
  * @check_resume: If set, check if there's a resume request for the device.
  * @check_resume: If set, check if there's a resume request for the device.
  *
  *
- * Increment power.disable_depth for the device and if was zero previously,
+ * Increment power.disable_depth for the device and if it was zero previously,
  * cancel all pending runtime PM requests for the device and wait for all
  * cancel all pending runtime PM requests for the device and wait for all
  * operations in progress to complete.  The device can be either active or
  * operations in progress to complete.  The device can be either active or
  * suspended after its runtime PM has been disabled.
  * suspended after its runtime PM has been disabled.

+ 25 - 8
drivers/pci/pci-driver.c

@@ -616,15 +616,11 @@ static int pci_pm_prepare(struct device *dev)
 	int error = 0;
 	int error = 0;
 
 
 	/*
 	/*
-	 * PCI devices suspended at run time need to be resumed at this
-	 * point, because in general it is necessary to reconfigure them for
-	 * system suspend.  Namely, if the device is supposed to wake up the
-	 * system from the sleep state, we may need to reconfigure it for this
-	 * purpose.  In turn, if the device is not supposed to wake up the
-	 * system from the sleep state, we'll have to prevent it from signaling
-	 * wake-up.
+	 * Devices having power.ignore_children set may still be necessary for
+	 * suspending their children in the next phase of device suspend.
 	 */
 	 */
-	pm_runtime_resume(dev);
+	if (dev->power.ignore_children)
+		pm_runtime_resume(dev);
 
 
 	if (drv && drv->pm && drv->pm->prepare)
 	if (drv && drv->pm && drv->pm->prepare)
 		error = drv->pm->prepare(dev);
 		error = drv->pm->prepare(dev);
@@ -654,6 +650,16 @@ static int pci_pm_suspend(struct device *dev)
 		goto Fixup;
 		goto Fixup;
 	}
 	}
 
 
+	/*
+	 * PCI devices suspended at run time need to be resumed at this point,
+	 * because in general it is necessary to reconfigure them for system
+	 * suspend.  Namely, if the device is supposed to wake up the system
+	 * from the sleep state, we may need to reconfigure it for this purpose.
+	 * In turn, if the device is not supposed to wake up the system from the
+	 * sleep state, we'll have to prevent it from signaling wake-up.
+	 */
+	pm_runtime_resume(dev);
+
 	pci_dev->state_saved = false;
 	pci_dev->state_saved = false;
 	if (pm->suspend) {
 	if (pm->suspend) {
 		pci_power_t prev = pci_dev->current_state;
 		pci_power_t prev = pci_dev->current_state;
@@ -808,6 +814,14 @@ static int pci_pm_freeze(struct device *dev)
 		return 0;
 		return 0;
 	}
 	}
 
 
+	/*
+	 * This used to be done in pci_pm_prepare() for all devices and some
+	 * drivers may depend on it, so do it here.  Ideally, runtime-suspended
+	 * devices should not be touched during freeze/thaw transitions,
+	 * however.
+	 */
+	pm_runtime_resume(dev);
+
 	pci_dev->state_saved = false;
 	pci_dev->state_saved = false;
 	if (pm->freeze) {
 	if (pm->freeze) {
 		int error;
 		int error;
@@ -915,6 +929,9 @@ static int pci_pm_poweroff(struct device *dev)
 		goto Fixup;
 		goto Fixup;
 	}
 	}
 
 
+	/* The reason to do that is the same as in pci_pm_suspend(). */
+	pm_runtime_resume(dev);
+
 	pci_dev->state_saved = false;
 	pci_dev->state_saved = false;
 	if (pm->poweroff) {
 	if (pm->poweroff) {
 		int error;
 		int error;

+ 42 - 28
include/linux/pm.h

@@ -264,9 +264,9 @@ typedef struct pm_message {
  *	registers, so that it is fully operational.
  *	registers, so that it is fully operational.
  *
  *
  * @runtime_idle: Device appears to be inactive and it might be put into a
  * @runtime_idle: Device appears to be inactive and it might be put into a
- *	low-power state if all of the necessary conditions are satisfied.  Check
- *	these conditions and handle the device as appropriate, possibly queueing
- *	a suspend request for it.  The return value is ignored by the PM core.
+ *	low-power state if all of the necessary conditions are satisfied.
+ *	Check these conditions, and return 0 if it's appropriate to let the PM
+ *	core queue a suspend request for the device.
  *
  *
  * Refer to Documentation/power/runtime_pm.txt for more information about the
  * Refer to Documentation/power/runtime_pm.txt for more information about the
  * role of the above callbacks in device runtime power management.
  * role of the above callbacks in device runtime power management.
@@ -352,7 +352,7 @@ const struct dev_pm_ops name = { \
 
 
 /*
 /*
  * Use this for defining a set of PM operations to be used in all situations
  * Use this for defining a set of PM operations to be used in all situations
- * (sustem suspend, hibernation or runtime PM).
+ * (system suspend, hibernation or runtime PM).
  * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should
  * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should
  * be different from the corresponding runtime PM callbacks, .runtime_suspend(),
  * be different from the corresponding runtime PM callbacks, .runtime_suspend(),
  * and .runtime_resume(), because .runtime_suspend() always works on an already
  * and .runtime_resume(), because .runtime_suspend() always works on an already
@@ -379,7 +379,7 @@ const struct dev_pm_ops name = { \
  *
  *
  * ON		No transition.
  * ON		No transition.
  *
  *
- * FREEZE 	System is going to hibernate, call ->prepare() and ->freeze()
+ * FREEZE	System is going to hibernate, call ->prepare() and ->freeze()
  *		for all devices.
  *		for all devices.
  *
  *
  * SUSPEND	System is going to suspend, call ->prepare() and ->suspend()
  * SUSPEND	System is going to suspend, call ->prepare() and ->suspend()
@@ -423,7 +423,7 @@ const struct dev_pm_ops name = { \
 
 
 #define PM_EVENT_INVALID	(-1)
 #define PM_EVENT_INVALID	(-1)
 #define PM_EVENT_ON		0x0000
 #define PM_EVENT_ON		0x0000
-#define PM_EVENT_FREEZE 	0x0001
+#define PM_EVENT_FREEZE		0x0001
 #define PM_EVENT_SUSPEND	0x0002
 #define PM_EVENT_SUSPEND	0x0002
 #define PM_EVENT_HIBERNATE	0x0004
 #define PM_EVENT_HIBERNATE	0x0004
 #define PM_EVENT_QUIESCE	0x0008
 #define PM_EVENT_QUIESCE	0x0008
@@ -542,6 +542,8 @@ struct dev_pm_info {
 	unsigned int		async_suspend:1;
 	unsigned int		async_suspend:1;
 	bool			is_prepared:1;	/* Owned by the PM core */
 	bool			is_prepared:1;	/* Owned by the PM core */
 	bool			is_suspended:1;	/* Ditto */
 	bool			is_suspended:1;	/* Ditto */
+	bool			is_noirq_suspended:1;
+	bool			is_late_suspended:1;
 	bool			ignore_children:1;
 	bool			ignore_children:1;
 	bool			early_init:1;	/* Owned by the PM core */
 	bool			early_init:1;	/* Owned by the PM core */
 	spinlock_t		lock;
 	spinlock_t		lock;
@@ -613,11 +615,11 @@ struct dev_pm_domain {
  * message is implicit:
  * message is implicit:
  *
  *
  * ON		Driver starts working again, responding to hardware events
  * ON		Driver starts working again, responding to hardware events
- * 		and software requests.  The hardware may have gone through
- * 		a power-off reset, or it may have maintained state from the
- * 		previous suspend() which the driver will rely on while
- * 		resuming.  On most platforms, there are no restrictions on
- * 		availability of resources like clocks during resume().
+ *		and software requests.  The hardware may have gone through
+ *		a power-off reset, or it may have maintained state from the
+ *		previous suspend() which the driver will rely on while
+ *		resuming.  On most platforms, there are no restrictions on
+ *		availability of resources like clocks during resume().
  *
  *
  * Other transitions are triggered by messages sent using suspend().  All
  * Other transitions are triggered by messages sent using suspend().  All
  * these transitions quiesce the driver, so that I/O queues are inactive.
  * these transitions quiesce the driver, so that I/O queues are inactive.
@@ -627,21 +629,21 @@ struct dev_pm_domain {
  * differ according to the message:
  * differ according to the message:
  *
  *
  * SUSPEND	Quiesce, enter a low power device state appropriate for
  * SUSPEND	Quiesce, enter a low power device state appropriate for
- * 		the upcoming system state (such as PCI_D3hot), and enable
- * 		wakeup events as appropriate.
+ *		the upcoming system state (such as PCI_D3hot), and enable
+ *		wakeup events as appropriate.
  *
  *
  * HIBERNATE	Enter a low power device state appropriate for the hibernation
  * HIBERNATE	Enter a low power device state appropriate for the hibernation
- * 		state (eg. ACPI S4) and enable wakeup events as appropriate.
+ *		state (eg. ACPI S4) and enable wakeup events as appropriate.
  *
  *
  * FREEZE	Quiesce operations so that a consistent image can be saved;
  * FREEZE	Quiesce operations so that a consistent image can be saved;
- * 		but do NOT otherwise enter a low power device state, and do
- * 		NOT emit system wakeup events.
+ *		but do NOT otherwise enter a low power device state, and do
+ *		NOT emit system wakeup events.
  *
  *
  * PRETHAW	Quiesce as if for FREEZE; additionally, prepare for restoring
  * PRETHAW	Quiesce as if for FREEZE; additionally, prepare for restoring
- * 		the system from a snapshot taken after an earlier FREEZE.
- * 		Some drivers will need to reset their hardware state instead
- * 		of preserving it, to ensure that it's never mistaken for the
- * 		state which that earlier snapshot had set up.
+ *		the system from a snapshot taken after an earlier FREEZE.
+ *		Some drivers will need to reset their hardware state instead
+ *		of preserving it, to ensure that it's never mistaken for the
+ *		state which that earlier snapshot had set up.
  *
  *
  * A minimally power-aware driver treats all messages as SUSPEND, fully
  * A minimally power-aware driver treats all messages as SUSPEND, fully
  * reinitializes its device during resume() -- whether or not it was reset
  * reinitializes its device during resume() -- whether or not it was reset
@@ -718,14 +720,26 @@ static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void
 {
 {
 }
 }
 
 
-#define pm_generic_prepare	NULL
-#define pm_generic_suspend	NULL
-#define pm_generic_resume	NULL
-#define pm_generic_freeze	NULL
-#define pm_generic_thaw		NULL
-#define pm_generic_restore	NULL
-#define pm_generic_poweroff	NULL
-#define pm_generic_complete	NULL
+#define pm_generic_prepare		NULL
+#define pm_generic_suspend_late		NULL
+#define pm_generic_suspend_noirq	NULL
+#define pm_generic_suspend		NULL
+#define pm_generic_resume_early		NULL
+#define pm_generic_resume_noirq		NULL
+#define pm_generic_resume		NULL
+#define pm_generic_freeze_noirq		NULL
+#define pm_generic_freeze_late		NULL
+#define pm_generic_freeze		NULL
+#define pm_generic_thaw_noirq		NULL
+#define pm_generic_thaw_early		NULL
+#define pm_generic_thaw			NULL
+#define pm_generic_restore_noirq	NULL
+#define pm_generic_restore_early	NULL
+#define pm_generic_restore		NULL
+#define pm_generic_poweroff_noirq	NULL
+#define pm_generic_poweroff_late	NULL
+#define pm_generic_poweroff		NULL
+#define pm_generic_complete		NULL
 #endif /* !CONFIG_PM_SLEEP */
 #endif /* !CONFIG_PM_SLEEP */
 
 
 /* How to reorder dpm_list after device_move() */
 /* How to reorder dpm_list after device_move() */

+ 12 - 10
kernel/power/hibernate.c

@@ -973,16 +973,20 @@ static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
 			    const char *buf, size_t n)
 			    const char *buf, size_t n)
 {
 {
-	unsigned int maj, min;
 	dev_t res;
 	dev_t res;
-	int ret = -EINVAL;
+	int len = n;
+	char *name;
 
 
-	if (sscanf(buf, "%u:%u", &maj, &min) != 2)
-		goto out;
+	if (len && buf[len-1] == '\n')
+		len--;
+	name = kstrndup(buf, len, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
 
 
-	res = MKDEV(maj,min);
-	if (maj != MAJOR(res) || min != MINOR(res))
-		goto out;
+	res = name_to_dev_t(name);
+	kfree(name);
+	if (!res)
+		return -EINVAL;
 
 
 	lock_system_sleep();
 	lock_system_sleep();
 	swsusp_resume_device = res;
 	swsusp_resume_device = res;
@@ -990,9 +994,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
 	printk(KERN_INFO "PM: Starting manual resume from disk\n");
 	printk(KERN_INFO "PM: Starting manual resume from disk\n");
 	noresume = 0;
 	noresume = 0;
 	software_resume();
 	software_resume();
-	ret = n;
- out:
-	return ret;
+	return n;
 }
 }
 
 
 power_attr(resume);
 power_attr(resume);

+ 2 - 2
kernel/power/main.c

@@ -282,8 +282,8 @@ struct kobject *power_kobj;
  *	state - control system power state.
  *	state - control system power state.
  *
  *
  *	show() returns what states are supported, which is hard-coded to
  *	show() returns what states are supported, which is hard-coded to
- *	'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
- *	'disk' (Suspend-to-Disk).
+ *	'freeze' (Low-Power Idle), 'standby' (Power-On Suspend),
+ *	'mem' (Suspend-to-RAM), and 'disk' (Suspend-to-Disk).
  *
  *
  *	store() accepts one of those strings, translates it into the
  *	store() accepts one of those strings, translates it into the
  *	proper enumerated value, and initiates a suspend transition.
  *	proper enumerated value, and initiates a suspend transition.

+ 2 - 0
kernel/power/power.h

@@ -49,6 +49,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
  */
  */
 #define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
 #define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
 
 
+asmlinkage int swsusp_save(void);
+
 /* kernel/power/hibernate.c */
 /* kernel/power/hibernate.c */
 extern bool freezer_test_done;
 extern bool freezer_test_done;
 
 

+ 1 - 1
kernel/power/snapshot.c

@@ -1268,7 +1268,7 @@ static void free_unnecessary_pages(void)
  * [number of saveable pages] - [number of pages that can be freed in theory]
  * [number of saveable pages] - [number of pages that can be freed in theory]
  *
  *
  * where the second term is the sum of (1) reclaimable slab pages, (2) active
  * where the second term is the sum of (1) reclaimable slab pages, (2) active
- * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
+ * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
  * minus mapped file pages.
  * minus mapped file pages.
  */
  */
 static unsigned long minimum_image_size(unsigned long saveable)
 static unsigned long minimum_image_size(unsigned long saveable)

+ 1 - 1
kernel/power/suspend.c

@@ -39,7 +39,7 @@ static const struct platform_suspend_ops *suspend_ops;
 
 
 static bool need_suspend_ops(suspend_state_t state)
 static bool need_suspend_ops(suspend_state_t state)
 {
 {
-	return !!(state > PM_SUSPEND_FREEZE);
+	return state > PM_SUSPEND_FREEZE;
 }
 }
 
 
 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);

+ 2 - 0
kernel/power/wakelock.c

@@ -18,6 +18,8 @@
 #include <linux/rbtree.h>
 #include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 
 
+#include "power.h"
+
 static DEFINE_MUTEX(wakelocks_lock);
 static DEFINE_MUTEX(wakelocks_lock);
 
 
 struct wakelock {
 struct wakelock {