main.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. /*
  2. * drivers/base/power/main.c - Where the driver meets power management.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. *
  10. * The driver model core calls device_pm_add() when a device is registered.
  11. * This will initialize the embedded device_pm_info object in the device
  12. * and add it to the list of power-controlled devices. sysfs entries for
  13. * controlling device power management will also be added.
  14. *
  15. * A separate list is used for keeping track of power info, because the power
  16. * domain dependencies may differ from the ancestral dependencies that the
  17. * subsystem list maintains.
  18. */
  19. #include <linux/device.h>
  20. #include <linux/kallsyms.h>
  21. #include <linux/export.h>
  22. #include <linux/mutex.h>
  23. #include <linux/pm.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/resume-trace.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/sched.h>
  28. #include <linux/async.h>
  29. #include <linux/suspend.h>
  30. #include <trace/events/power.h>
  31. #include <linux/cpuidle.h>
  32. #include <linux/timer.h>
  33. #include "../base.h"
  34. #include "power.h"
  35. typedef int (*pm_callback_t)(struct device *);
  36. /*
  37. * The entries in the dpm_list list are in a depth first order, simply
  38. * because children are guaranteed to be discovered after parents, and
  39. * are inserted at the back of the list on discovery.
  40. *
  41. * Since device_pm_add() may be called with a device lock held,
  42. * we must never try to acquire a device lock while holding
  43. * dpm_list_mutex.
  44. */
  45. LIST_HEAD(dpm_list);
  46. static LIST_HEAD(dpm_prepared_list);
  47. static LIST_HEAD(dpm_suspended_list);
  48. static LIST_HEAD(dpm_late_early_list);
  49. static LIST_HEAD(dpm_noirq_list);
  50. struct suspend_stats suspend_stats;
  51. static DEFINE_MUTEX(dpm_list_mtx);
  52. static pm_message_t pm_transition;
  53. static int async_error;
  54. static char *pm_verb(int event)
  55. {
  56. switch (event) {
  57. case PM_EVENT_SUSPEND:
  58. return "suspend";
  59. case PM_EVENT_RESUME:
  60. return "resume";
  61. case PM_EVENT_FREEZE:
  62. return "freeze";
  63. case PM_EVENT_QUIESCE:
  64. return "quiesce";
  65. case PM_EVENT_HIBERNATE:
  66. return "hibernate";
  67. case PM_EVENT_THAW:
  68. return "thaw";
  69. case PM_EVENT_RESTORE:
  70. return "restore";
  71. case PM_EVENT_RECOVER:
  72. return "recover";
  73. default:
  74. return "(unknown PM event)";
  75. }
  76. }
  77. /**
  78. * device_pm_sleep_init - Initialize system suspend-related device fields.
  79. * @dev: Device object being initialized.
  80. */
  81. void device_pm_sleep_init(struct device *dev)
  82. {
  83. dev->power.is_prepared = false;
  84. dev->power.is_suspended = false;
  85. dev->power.is_noirq_suspended = false;
  86. dev->power.is_late_suspended = false;
  87. init_completion(&dev->power.completion);
  88. complete_all(&dev->power.completion);
  89. dev->power.wakeup = NULL;
  90. INIT_LIST_HEAD(&dev->power.entry);
  91. }
  92. /**
  93. * device_pm_lock - Lock the list of active devices used by the PM core.
  94. */
  95. void device_pm_lock(void)
  96. {
  97. mutex_lock(&dpm_list_mtx);
  98. }
  99. /**
  100. * device_pm_unlock - Unlock the list of active devices used by the PM core.
  101. */
  102. void device_pm_unlock(void)
  103. {
  104. mutex_unlock(&dpm_list_mtx);
  105. }
  106. /**
  107. * device_pm_add - Add a device to the PM core's list of active devices.
  108. * @dev: Device to add to the list.
  109. */
  110. void device_pm_add(struct device *dev)
  111. {
  112. pr_debug("PM: Adding info for %s:%s\n",
  113. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  114. mutex_lock(&dpm_list_mtx);
  115. if (dev->parent && dev->parent->power.is_prepared)
  116. dev_warn(dev, "parent %s should not be sleeping\n",
  117. dev_name(dev->parent));
  118. list_add_tail(&dev->power.entry, &dpm_list);
  119. mutex_unlock(&dpm_list_mtx);
  120. }
  121. /**
  122. * device_pm_remove - Remove a device from the PM core's list of active devices.
  123. * @dev: Device to be removed from the list.
  124. */
  125. void device_pm_remove(struct device *dev)
  126. {
  127. pr_debug("PM: Removing info for %s:%s\n",
  128. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  129. complete_all(&dev->power.completion);
  130. mutex_lock(&dpm_list_mtx);
  131. list_del_init(&dev->power.entry);
  132. mutex_unlock(&dpm_list_mtx);
  133. device_wakeup_disable(dev);
  134. pm_runtime_remove(dev);
  135. }
  136. /**
  137. * device_pm_move_before - Move device in the PM core's list of active devices.
  138. * @deva: Device to move in dpm_list.
  139. * @devb: Device @deva should come before.
  140. */
  141. void device_pm_move_before(struct device *deva, struct device *devb)
  142. {
  143. pr_debug("PM: Moving %s:%s before %s:%s\n",
  144. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  145. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  146. /* Delete deva from dpm_list and reinsert before devb. */
  147. list_move_tail(&deva->power.entry, &devb->power.entry);
  148. }
  149. /**
  150. * device_pm_move_after - Move device in the PM core's list of active devices.
  151. * @deva: Device to move in dpm_list.
  152. * @devb: Device @deva should come after.
  153. */
  154. void device_pm_move_after(struct device *deva, struct device *devb)
  155. {
  156. pr_debug("PM: Moving %s:%s after %s:%s\n",
  157. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  158. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  159. /* Delete deva from dpm_list and reinsert after devb. */
  160. list_move(&deva->power.entry, &devb->power.entry);
  161. }
  162. /**
  163. * device_pm_move_last - Move device to end of the PM core's list of devices.
  164. * @dev: Device to move in dpm_list.
  165. */
  166. void device_pm_move_last(struct device *dev)
  167. {
  168. pr_debug("PM: Moving %s:%s to end of list\n",
  169. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  170. list_move_tail(&dev->power.entry, &dpm_list);
  171. }
  172. static ktime_t initcall_debug_start(struct device *dev)
  173. {
  174. ktime_t calltime = ktime_set(0, 0);
  175. if (pm_print_times_enabled) {
  176. pr_info("calling %s+ @ %i, parent: %s\n",
  177. dev_name(dev), task_pid_nr(current),
  178. dev->parent ? dev_name(dev->parent) : "none");
  179. calltime = ktime_get();
  180. }
  181. return calltime;
  182. }
  183. static void initcall_debug_report(struct device *dev, ktime_t calltime,
  184. int error, pm_message_t state, char *info)
  185. {
  186. ktime_t rettime;
  187. s64 nsecs;
  188. rettime = ktime_get();
  189. nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
  190. if (pm_print_times_enabled) {
  191. pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
  192. error, (unsigned long long)nsecs >> 10);
  193. }
  194. trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
  195. error);
  196. }
  197. /**
  198. * dpm_wait - Wait for a PM operation to complete.
  199. * @dev: Device to wait for.
  200. * @async: If unset, wait only if the device's power.async_suspend flag is set.
  201. */
  202. static void dpm_wait(struct device *dev, bool async)
  203. {
  204. if (!dev)
  205. return;
  206. if (async || (pm_async_enabled && dev->power.async_suspend))
  207. wait_for_completion(&dev->power.completion);
  208. }
  209. static int dpm_wait_fn(struct device *dev, void *async_ptr)
  210. {
  211. dpm_wait(dev, *((bool *)async_ptr));
  212. return 0;
  213. }
  214. static void dpm_wait_for_children(struct device *dev, bool async)
  215. {
  216. device_for_each_child(dev, &async, dpm_wait_fn);
  217. }
  218. /**
  219. * pm_op - Return the PM operation appropriate for given PM event.
  220. * @ops: PM operations to choose from.
  221. * @state: PM transition of the system being carried out.
  222. */
  223. static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
  224. {
  225. switch (state.event) {
  226. #ifdef CONFIG_SUSPEND
  227. case PM_EVENT_SUSPEND:
  228. return ops->suspend;
  229. case PM_EVENT_RESUME:
  230. return ops->resume;
  231. #endif /* CONFIG_SUSPEND */
  232. #ifdef CONFIG_HIBERNATE_CALLBACKS
  233. case PM_EVENT_FREEZE:
  234. case PM_EVENT_QUIESCE:
  235. return ops->freeze;
  236. case PM_EVENT_HIBERNATE:
  237. return ops->poweroff;
  238. case PM_EVENT_THAW:
  239. case PM_EVENT_RECOVER:
  240. return ops->thaw;
  241. break;
  242. case PM_EVENT_RESTORE:
  243. return ops->restore;
  244. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  245. }
  246. return NULL;
  247. }
  248. /**
  249. * pm_late_early_op - Return the PM operation appropriate for given PM event.
  250. * @ops: PM operations to choose from.
  251. * @state: PM transition of the system being carried out.
  252. *
  253. * Runtime PM is disabled for @dev while this function is being executed.
  254. */
  255. static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
  256. pm_message_t state)
  257. {
  258. switch (state.event) {
  259. #ifdef CONFIG_SUSPEND
  260. case PM_EVENT_SUSPEND:
  261. return ops->suspend_late;
  262. case PM_EVENT_RESUME:
  263. return ops->resume_early;
  264. #endif /* CONFIG_SUSPEND */
  265. #ifdef CONFIG_HIBERNATE_CALLBACKS
  266. case PM_EVENT_FREEZE:
  267. case PM_EVENT_QUIESCE:
  268. return ops->freeze_late;
  269. case PM_EVENT_HIBERNATE:
  270. return ops->poweroff_late;
  271. case PM_EVENT_THAW:
  272. case PM_EVENT_RECOVER:
  273. return ops->thaw_early;
  274. case PM_EVENT_RESTORE:
  275. return ops->restore_early;
  276. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  277. }
  278. return NULL;
  279. }
  280. /**
  281. * pm_noirq_op - Return the PM operation appropriate for given PM event.
  282. * @ops: PM operations to choose from.
  283. * @state: PM transition of the system being carried out.
  284. *
  285. * The driver of @dev will not receive interrupts while this function is being
  286. * executed.
  287. */
  288. static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
  289. {
  290. switch (state.event) {
  291. #ifdef CONFIG_SUSPEND
  292. case PM_EVENT_SUSPEND:
  293. return ops->suspend_noirq;
  294. case PM_EVENT_RESUME:
  295. return ops->resume_noirq;
  296. #endif /* CONFIG_SUSPEND */
  297. #ifdef CONFIG_HIBERNATE_CALLBACKS
  298. case PM_EVENT_FREEZE:
  299. case PM_EVENT_QUIESCE:
  300. return ops->freeze_noirq;
  301. case PM_EVENT_HIBERNATE:
  302. return ops->poweroff_noirq;
  303. case PM_EVENT_THAW:
  304. case PM_EVENT_RECOVER:
  305. return ops->thaw_noirq;
  306. case PM_EVENT_RESTORE:
  307. return ops->restore_noirq;
  308. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  309. }
  310. return NULL;
  311. }
  312. static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
  313. {
  314. dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
  315. ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
  316. ", may wakeup" : "");
  317. }
  318. static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
  319. int error)
  320. {
  321. printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
  322. dev_name(dev), pm_verb(state.event), info, error);
  323. }
  324. static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
  325. {
  326. ktime_t calltime;
  327. u64 usecs64;
  328. int usecs;
  329. calltime = ktime_get();
  330. usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
  331. do_div(usecs64, NSEC_PER_USEC);
  332. usecs = usecs64;
  333. if (usecs == 0)
  334. usecs = 1;
  335. pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
  336. info ?: "", info ? " " : "", pm_verb(state.event),
  337. usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
  338. }
  339. static int dpm_run_callback(pm_callback_t cb, struct device *dev,
  340. pm_message_t state, char *info)
  341. {
  342. ktime_t calltime;
  343. int error;
  344. if (!cb)
  345. return 0;
  346. calltime = initcall_debug_start(dev);
  347. pm_dev_dbg(dev, state, info);
  348. error = cb(dev);
  349. suspend_report_result(cb, error);
  350. initcall_debug_report(dev, calltime, error, state, info);
  351. return error;
  352. }
  353. #ifdef CONFIG_DPM_WATCHDOG
  354. struct dpm_watchdog {
  355. struct device *dev;
  356. struct task_struct *tsk;
  357. struct timer_list timer;
  358. };
  359. #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
  360. struct dpm_watchdog wd
  361. /**
  362. * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
  363. * @data: Watchdog object address.
  364. *
  365. * Called when a driver has timed out suspending or resuming.
  366. * There's not much we can do here to recover so panic() to
  367. * capture a crash-dump in pstore.
  368. */
  369. static void dpm_watchdog_handler(unsigned long data)
  370. {
  371. struct dpm_watchdog *wd = (void *)data;
  372. dev_emerg(wd->dev, "**** DPM device timeout ****\n");
  373. show_stack(wd->tsk, NULL);
  374. panic("%s %s: unrecoverable failure\n",
  375. dev_driver_string(wd->dev), dev_name(wd->dev));
  376. }
  377. /**
  378. * dpm_watchdog_set - Enable pm watchdog for given device.
  379. * @wd: Watchdog. Must be allocated on the stack.
  380. * @dev: Device to handle.
  381. */
  382. static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
  383. {
  384. struct timer_list *timer = &wd->timer;
  385. wd->dev = dev;
  386. wd->tsk = current;
  387. init_timer_on_stack(timer);
  388. /* use same timeout value for both suspend and resume */
  389. timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
  390. timer->function = dpm_watchdog_handler;
  391. timer->data = (unsigned long)wd;
  392. add_timer(timer);
  393. }
  394. /**
  395. * dpm_watchdog_clear - Disable suspend/resume watchdog.
  396. * @wd: Watchdog to disable.
  397. */
  398. static void dpm_watchdog_clear(struct dpm_watchdog *wd)
  399. {
  400. struct timer_list *timer = &wd->timer;
  401. del_timer_sync(timer);
  402. destroy_timer_on_stack(timer);
  403. }
  404. #else
  405. #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
  406. #define dpm_watchdog_set(x, y)
  407. #define dpm_watchdog_clear(x)
  408. #endif
  409. /*------------------------- Resume routines -------------------------*/
  410. /**
  411. * device_resume_noirq - Execute an "early resume" callback for given device.
  412. * @dev: Device to handle.
  413. * @state: PM transition of the system being carried out.
  414. *
  415. * The driver of @dev will not receive interrupts while this function is being
  416. * executed.
  417. */
  418. static int device_resume_noirq(struct device *dev, pm_message_t state)
  419. {
  420. pm_callback_t callback = NULL;
  421. char *info = NULL;
  422. int error = 0;
  423. TRACE_DEVICE(dev);
  424. TRACE_RESUME(0);
  425. if (dev->power.syscore)
  426. goto Out;
  427. if (!dev->power.is_noirq_suspended)
  428. goto Out;
  429. if (dev->pm_domain) {
  430. info = "noirq power domain ";
  431. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  432. } else if (dev->type && dev->type->pm) {
  433. info = "noirq type ";
  434. callback = pm_noirq_op(dev->type->pm, state);
  435. } else if (dev->class && dev->class->pm) {
  436. info = "noirq class ";
  437. callback = pm_noirq_op(dev->class->pm, state);
  438. } else if (dev->bus && dev->bus->pm) {
  439. info = "noirq bus ";
  440. callback = pm_noirq_op(dev->bus->pm, state);
  441. }
  442. if (!callback && dev->driver && dev->driver->pm) {
  443. info = "noirq driver ";
  444. callback = pm_noirq_op(dev->driver->pm, state);
  445. }
  446. error = dpm_run_callback(callback, dev, state, info);
  447. dev->power.is_noirq_suspended = false;
  448. Out:
  449. TRACE_RESUME(error);
  450. return error;
  451. }
  452. /**
  453. * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  454. * @state: PM transition of the system being carried out.
  455. *
  456. * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
  457. * enable device drivers to receive interrupts.
  458. */
  459. static void dpm_resume_noirq(pm_message_t state)
  460. {
  461. ktime_t starttime = ktime_get();
  462. mutex_lock(&dpm_list_mtx);
  463. while (!list_empty(&dpm_noirq_list)) {
  464. struct device *dev = to_device(dpm_noirq_list.next);
  465. int error;
  466. get_device(dev);
  467. list_move_tail(&dev->power.entry, &dpm_late_early_list);
  468. mutex_unlock(&dpm_list_mtx);
  469. error = device_resume_noirq(dev, state);
  470. if (error) {
  471. suspend_stats.failed_resume_noirq++;
  472. dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
  473. dpm_save_failed_dev(dev_name(dev));
  474. pm_dev_err(dev, state, " noirq", error);
  475. }
  476. mutex_lock(&dpm_list_mtx);
  477. put_device(dev);
  478. }
  479. mutex_unlock(&dpm_list_mtx);
  480. dpm_show_time(starttime, state, "noirq");
  481. resume_device_irqs();
  482. cpuidle_resume();
  483. }
  484. /**
  485. * device_resume_early - Execute an "early resume" callback for given device.
  486. * @dev: Device to handle.
  487. * @state: PM transition of the system being carried out.
  488. *
  489. * Runtime PM is disabled for @dev while this function is being executed.
  490. */
  491. static int device_resume_early(struct device *dev, pm_message_t state)
  492. {
  493. pm_callback_t callback = NULL;
  494. char *info = NULL;
  495. int error = 0;
  496. TRACE_DEVICE(dev);
  497. TRACE_RESUME(0);
  498. if (dev->power.syscore)
  499. goto Out;
  500. if (!dev->power.is_late_suspended)
  501. goto Out;
  502. if (dev->pm_domain) {
  503. info = "early power domain ";
  504. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  505. } else if (dev->type && dev->type->pm) {
  506. info = "early type ";
  507. callback = pm_late_early_op(dev->type->pm, state);
  508. } else if (dev->class && dev->class->pm) {
  509. info = "early class ";
  510. callback = pm_late_early_op(dev->class->pm, state);
  511. } else if (dev->bus && dev->bus->pm) {
  512. info = "early bus ";
  513. callback = pm_late_early_op(dev->bus->pm, state);
  514. }
  515. if (!callback && dev->driver && dev->driver->pm) {
  516. info = "early driver ";
  517. callback = pm_late_early_op(dev->driver->pm, state);
  518. }
  519. error = dpm_run_callback(callback, dev, state, info);
  520. dev->power.is_late_suspended = false;
  521. Out:
  522. TRACE_RESUME(error);
  523. pm_runtime_enable(dev);
  524. return error;
  525. }
  526. /**
  527. * dpm_resume_early - Execute "early resume" callbacks for all devices.
  528. * @state: PM transition of the system being carried out.
  529. */
  530. static void dpm_resume_early(pm_message_t state)
  531. {
  532. ktime_t starttime = ktime_get();
  533. mutex_lock(&dpm_list_mtx);
  534. while (!list_empty(&dpm_late_early_list)) {
  535. struct device *dev = to_device(dpm_late_early_list.next);
  536. int error;
  537. get_device(dev);
  538. list_move_tail(&dev->power.entry, &dpm_suspended_list);
  539. mutex_unlock(&dpm_list_mtx);
  540. error = device_resume_early(dev, state);
  541. if (error) {
  542. suspend_stats.failed_resume_early++;
  543. dpm_save_failed_step(SUSPEND_RESUME_EARLY);
  544. dpm_save_failed_dev(dev_name(dev));
  545. pm_dev_err(dev, state, " early", error);
  546. }
  547. mutex_lock(&dpm_list_mtx);
  548. put_device(dev);
  549. }
  550. mutex_unlock(&dpm_list_mtx);
  551. dpm_show_time(starttime, state, "early");
  552. }
  553. /**
  554. * dpm_resume_start - Execute "noirq" and "early" device callbacks.
  555. * @state: PM transition of the system being carried out.
  556. */
  557. void dpm_resume_start(pm_message_t state)
  558. {
  559. dpm_resume_noirq(state);
  560. dpm_resume_early(state);
  561. }
  562. EXPORT_SYMBOL_GPL(dpm_resume_start);
  563. /**
  564. * device_resume - Execute "resume" callbacks for given device.
  565. * @dev: Device to handle.
  566. * @state: PM transition of the system being carried out.
  567. * @async: If true, the device is being resumed asynchronously.
  568. */
  569. static int device_resume(struct device *dev, pm_message_t state, bool async)
  570. {
  571. pm_callback_t callback = NULL;
  572. char *info = NULL;
  573. int error = 0;
  574. DECLARE_DPM_WATCHDOG_ON_STACK(wd);
  575. TRACE_DEVICE(dev);
  576. TRACE_RESUME(0);
  577. if (dev->power.syscore)
  578. goto Complete;
  579. dpm_wait(dev->parent, async);
  580. dpm_watchdog_set(&wd, dev);
  581. device_lock(dev);
  582. /*
  583. * This is a fib. But we'll allow new children to be added below
  584. * a resumed device, even if the device hasn't been completed yet.
  585. */
  586. dev->power.is_prepared = false;
  587. if (!dev->power.is_suspended)
  588. goto Unlock;
  589. if (dev->pm_domain) {
  590. info = "power domain ";
  591. callback = pm_op(&dev->pm_domain->ops, state);
  592. goto Driver;
  593. }
  594. if (dev->type && dev->type->pm) {
  595. info = "type ";
  596. callback = pm_op(dev->type->pm, state);
  597. goto Driver;
  598. }
  599. if (dev->class) {
  600. if (dev->class->pm) {
  601. info = "class ";
  602. callback = pm_op(dev->class->pm, state);
  603. goto Driver;
  604. } else if (dev->class->resume) {
  605. info = "legacy class ";
  606. callback = dev->class->resume;
  607. goto End;
  608. }
  609. }
  610. if (dev->bus) {
  611. if (dev->bus->pm) {
  612. info = "bus ";
  613. callback = pm_op(dev->bus->pm, state);
  614. } else if (dev->bus->resume) {
  615. info = "legacy bus ";
  616. callback = dev->bus->resume;
  617. goto End;
  618. }
  619. }
  620. Driver:
  621. if (!callback && dev->driver && dev->driver->pm) {
  622. info = "driver ";
  623. callback = pm_op(dev->driver->pm, state);
  624. }
  625. End:
  626. error = dpm_run_callback(callback, dev, state, info);
  627. dev->power.is_suspended = false;
  628. Unlock:
  629. device_unlock(dev);
  630. dpm_watchdog_clear(&wd);
  631. Complete:
  632. complete_all(&dev->power.completion);
  633. TRACE_RESUME(error);
  634. return error;
  635. }
  636. static void async_resume(void *data, async_cookie_t cookie)
  637. {
  638. struct device *dev = (struct device *)data;
  639. int error;
  640. error = device_resume(dev, pm_transition, true);
  641. if (error)
  642. pm_dev_err(dev, pm_transition, " async", error);
  643. put_device(dev);
  644. }
  645. static bool is_async(struct device *dev)
  646. {
  647. return dev->power.async_suspend && pm_async_enabled
  648. && !pm_trace_is_enabled();
  649. }
  650. /**
  651. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  652. * @state: PM transition of the system being carried out.
  653. *
  654. * Execute the appropriate "resume" callback for all devices whose status
  655. * indicates that they are suspended.
  656. */
  657. void dpm_resume(pm_message_t state)
  658. {
  659. struct device *dev;
  660. ktime_t starttime = ktime_get();
  661. might_sleep();
  662. mutex_lock(&dpm_list_mtx);
  663. pm_transition = state;
  664. async_error = 0;
  665. list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
  666. reinit_completion(&dev->power.completion);
  667. if (is_async(dev)) {
  668. get_device(dev);
  669. async_schedule(async_resume, dev);
  670. }
  671. }
  672. while (!list_empty(&dpm_suspended_list)) {
  673. dev = to_device(dpm_suspended_list.next);
  674. get_device(dev);
  675. if (!is_async(dev)) {
  676. int error;
  677. mutex_unlock(&dpm_list_mtx);
  678. error = device_resume(dev, state, false);
  679. if (error) {
  680. suspend_stats.failed_resume++;
  681. dpm_save_failed_step(SUSPEND_RESUME);
  682. dpm_save_failed_dev(dev_name(dev));
  683. pm_dev_err(dev, state, "", error);
  684. }
  685. mutex_lock(&dpm_list_mtx);
  686. }
  687. if (!list_empty(&dev->power.entry))
  688. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  689. put_device(dev);
  690. }
  691. mutex_unlock(&dpm_list_mtx);
  692. async_synchronize_full();
  693. dpm_show_time(starttime, state, NULL);
  694. }
  695. /**
  696. * device_complete - Complete a PM transition for given device.
  697. * @dev: Device to handle.
  698. * @state: PM transition of the system being carried out.
  699. */
  700. static void device_complete(struct device *dev, pm_message_t state)
  701. {
  702. void (*callback)(struct device *) = NULL;
  703. char *info = NULL;
  704. if (dev->power.syscore)
  705. return;
  706. device_lock(dev);
  707. if (dev->pm_domain) {
  708. info = "completing power domain ";
  709. callback = dev->pm_domain->ops.complete;
  710. } else if (dev->type && dev->type->pm) {
  711. info = "completing type ";
  712. callback = dev->type->pm->complete;
  713. } else if (dev->class && dev->class->pm) {
  714. info = "completing class ";
  715. callback = dev->class->pm->complete;
  716. } else if (dev->bus && dev->bus->pm) {
  717. info = "completing bus ";
  718. callback = dev->bus->pm->complete;
  719. }
  720. if (!callback && dev->driver && dev->driver->pm) {
  721. info = "completing driver ";
  722. callback = dev->driver->pm->complete;
  723. }
  724. if (callback) {
  725. pm_dev_dbg(dev, state, info);
  726. callback(dev);
  727. }
  728. device_unlock(dev);
  729. pm_runtime_put(dev);
  730. }
  731. /**
  732. * dpm_complete - Complete a PM transition for all non-sysdev devices.
  733. * @state: PM transition of the system being carried out.
  734. *
  735. * Execute the ->complete() callbacks for all devices whose PM status is not
  736. * DPM_ON (this allows new devices to be registered).
  737. */
  738. void dpm_complete(pm_message_t state)
  739. {
  740. struct list_head list;
  741. might_sleep();
  742. INIT_LIST_HEAD(&list);
  743. mutex_lock(&dpm_list_mtx);
  744. while (!list_empty(&dpm_prepared_list)) {
  745. struct device *dev = to_device(dpm_prepared_list.prev);
  746. get_device(dev);
  747. dev->power.is_prepared = false;
  748. list_move(&dev->power.entry, &list);
  749. mutex_unlock(&dpm_list_mtx);
  750. device_complete(dev, state);
  751. mutex_lock(&dpm_list_mtx);
  752. put_device(dev);
  753. }
  754. list_splice(&list, &dpm_list);
  755. mutex_unlock(&dpm_list_mtx);
  756. }
  757. /**
  758. * dpm_resume_end - Execute "resume" callbacks and complete system transition.
  759. * @state: PM transition of the system being carried out.
  760. *
  761. * Execute "resume" callbacks for all devices and complete the PM transition of
  762. * the system.
  763. */
  764. void dpm_resume_end(pm_message_t state)
  765. {
  766. dpm_resume(state);
  767. dpm_complete(state);
  768. }
  769. EXPORT_SYMBOL_GPL(dpm_resume_end);
  770. /*------------------------- Suspend routines -------------------------*/
  771. /**
  772. * resume_event - Return a "resume" message for given "suspend" sleep state.
  773. * @sleep_state: PM message representing a sleep state.
  774. *
  775. * Return a PM message representing the resume event corresponding to given
  776. * sleep state.
  777. */
  778. static pm_message_t resume_event(pm_message_t sleep_state)
  779. {
  780. switch (sleep_state.event) {
  781. case PM_EVENT_SUSPEND:
  782. return PMSG_RESUME;
  783. case PM_EVENT_FREEZE:
  784. case PM_EVENT_QUIESCE:
  785. return PMSG_RECOVER;
  786. case PM_EVENT_HIBERNATE:
  787. return PMSG_RESTORE;
  788. }
  789. return PMSG_ON;
  790. }
  791. /**
  792. * device_suspend_noirq - Execute a "late suspend" callback for given device.
  793. * @dev: Device to handle.
  794. * @state: PM transition of the system being carried out.
  795. *
  796. * The driver of @dev will not receive interrupts while this function is being
  797. * executed.
  798. */
  799. static int device_suspend_noirq(struct device *dev, pm_message_t state)
  800. {
  801. pm_callback_t callback = NULL;
  802. char *info = NULL;
  803. int error;
  804. if (dev->power.syscore)
  805. return 0;
  806. if (dev->pm_domain) {
  807. info = "noirq power domain ";
  808. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  809. } else if (dev->type && dev->type->pm) {
  810. info = "noirq type ";
  811. callback = pm_noirq_op(dev->type->pm, state);
  812. } else if (dev->class && dev->class->pm) {
  813. info = "noirq class ";
  814. callback = pm_noirq_op(dev->class->pm, state);
  815. } else if (dev->bus && dev->bus->pm) {
  816. info = "noirq bus ";
  817. callback = pm_noirq_op(dev->bus->pm, state);
  818. }
  819. if (!callback && dev->driver && dev->driver->pm) {
  820. info = "noirq driver ";
  821. callback = pm_noirq_op(dev->driver->pm, state);
  822. }
  823. error = dpm_run_callback(callback, dev, state, info);
  824. if (!error)
  825. dev->power.is_noirq_suspended = true;
  826. return error;
  827. }
  828. /**
  829. * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
  830. * @state: PM transition of the system being carried out.
  831. *
  832. * Prevent device drivers from receiving interrupts and call the "noirq" suspend
  833. * handlers for all non-sysdev devices.
  834. */
  835. static int dpm_suspend_noirq(pm_message_t state)
  836. {
  837. ktime_t starttime = ktime_get();
  838. int error = 0;
  839. cpuidle_pause();
  840. suspend_device_irqs();
  841. mutex_lock(&dpm_list_mtx);
  842. while (!list_empty(&dpm_late_early_list)) {
  843. struct device *dev = to_device(dpm_late_early_list.prev);
  844. get_device(dev);
  845. mutex_unlock(&dpm_list_mtx);
  846. error = device_suspend_noirq(dev, state);
  847. mutex_lock(&dpm_list_mtx);
  848. if (error) {
  849. pm_dev_err(dev, state, " noirq", error);
  850. suspend_stats.failed_suspend_noirq++;
  851. dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
  852. dpm_save_failed_dev(dev_name(dev));
  853. put_device(dev);
  854. break;
  855. }
  856. if (!list_empty(&dev->power.entry))
  857. list_move(&dev->power.entry, &dpm_noirq_list);
  858. put_device(dev);
  859. if (pm_wakeup_pending()) {
  860. error = -EBUSY;
  861. break;
  862. }
  863. }
  864. mutex_unlock(&dpm_list_mtx);
  865. if (error)
  866. dpm_resume_noirq(resume_event(state));
  867. else
  868. dpm_show_time(starttime, state, "noirq");
  869. return error;
  870. }
  871. /**
  872. * device_suspend_late - Execute a "late suspend" callback for given device.
  873. * @dev: Device to handle.
  874. * @state: PM transition of the system being carried out.
  875. *
  876. * Runtime PM is disabled for @dev while this function is being executed.
  877. */
  878. static int device_suspend_late(struct device *dev, pm_message_t state)
  879. {
  880. pm_callback_t callback = NULL;
  881. char *info = NULL;
  882. int error;
  883. __pm_runtime_disable(dev, false);
  884. if (dev->power.syscore)
  885. return 0;
  886. if (dev->pm_domain) {
  887. info = "late power domain ";
  888. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  889. } else if (dev->type && dev->type->pm) {
  890. info = "late type ";
  891. callback = pm_late_early_op(dev->type->pm, state);
  892. } else if (dev->class && dev->class->pm) {
  893. info = "late class ";
  894. callback = pm_late_early_op(dev->class->pm, state);
  895. } else if (dev->bus && dev->bus->pm) {
  896. info = "late bus ";
  897. callback = pm_late_early_op(dev->bus->pm, state);
  898. }
  899. if (!callback && dev->driver && dev->driver->pm) {
  900. info = "late driver ";
  901. callback = pm_late_early_op(dev->driver->pm, state);
  902. }
  903. error = dpm_run_callback(callback, dev, state, info);
  904. if (!error)
  905. dev->power.is_late_suspended = true;
  906. return error;
  907. }
  908. /**
  909. * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
  910. * @state: PM transition of the system being carried out.
  911. */
  912. static int dpm_suspend_late(pm_message_t state)
  913. {
  914. ktime_t starttime = ktime_get();
  915. int error = 0;
  916. mutex_lock(&dpm_list_mtx);
  917. while (!list_empty(&dpm_suspended_list)) {
  918. struct device *dev = to_device(dpm_suspended_list.prev);
  919. get_device(dev);
  920. mutex_unlock(&dpm_list_mtx);
  921. error = device_suspend_late(dev, state);
  922. mutex_lock(&dpm_list_mtx);
  923. if (error) {
  924. pm_dev_err(dev, state, " late", error);
  925. suspend_stats.failed_suspend_late++;
  926. dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
  927. dpm_save_failed_dev(dev_name(dev));
  928. put_device(dev);
  929. break;
  930. }
  931. if (!list_empty(&dev->power.entry))
  932. list_move(&dev->power.entry, &dpm_late_early_list);
  933. put_device(dev);
  934. if (pm_wakeup_pending()) {
  935. error = -EBUSY;
  936. break;
  937. }
  938. }
  939. mutex_unlock(&dpm_list_mtx);
  940. if (error)
  941. dpm_resume_early(resume_event(state));
  942. else
  943. dpm_show_time(starttime, state, "late");
  944. return error;
  945. }
  946. /**
  947. * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
  948. * @state: PM transition of the system being carried out.
  949. */
  950. int dpm_suspend_end(pm_message_t state)
  951. {
  952. int error = dpm_suspend_late(state);
  953. if (error)
  954. return error;
  955. error = dpm_suspend_noirq(state);
  956. if (error) {
  957. dpm_resume_early(resume_event(state));
  958. return error;
  959. }
  960. return 0;
  961. }
  962. EXPORT_SYMBOL_GPL(dpm_suspend_end);
  963. /**
  964. * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
  965. * @dev: Device to suspend.
  966. * @state: PM transition of the system being carried out.
  967. * @cb: Suspend callback to execute.
  968. */
  969. static int legacy_suspend(struct device *dev, pm_message_t state,
  970. int (*cb)(struct device *dev, pm_message_t state),
  971. char *info)
  972. {
  973. int error;
  974. ktime_t calltime;
  975. calltime = initcall_debug_start(dev);
  976. error = cb(dev, state);
  977. suspend_report_result(cb, error);
  978. initcall_debug_report(dev, calltime, error, state, info);
  979. return error;
  980. }
  981. /**
  982. * device_suspend - Execute "suspend" callbacks for given device.
  983. * @dev: Device to handle.
  984. * @state: PM transition of the system being carried out.
  985. * @async: If true, the device is being suspended asynchronously.
  986. */
  987. static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  988. {
  989. pm_callback_t callback = NULL;
  990. char *info = NULL;
  991. int error = 0;
  992. DECLARE_DPM_WATCHDOG_ON_STACK(wd);
  993. dpm_wait_for_children(dev, async);
  994. if (async_error)
  995. goto Complete;
  996. /*
  997. * If a device configured to wake up the system from sleep states
  998. * has been suspended at run time and there's a resume request pending
  999. * for it, this is equivalent to the device signaling wakeup, so the
  1000. * system suspend operation should be aborted.
  1001. */
  1002. if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
  1003. pm_wakeup_event(dev, 0);
  1004. if (pm_wakeup_pending()) {
  1005. async_error = -EBUSY;
  1006. goto Complete;
  1007. }
  1008. if (dev->power.syscore)
  1009. goto Complete;
  1010. dpm_watchdog_set(&wd, dev);
  1011. device_lock(dev);
  1012. if (dev->pm_domain) {
  1013. info = "power domain ";
  1014. callback = pm_op(&dev->pm_domain->ops, state);
  1015. goto Run;
  1016. }
  1017. if (dev->type && dev->type->pm) {
  1018. info = "type ";
  1019. callback = pm_op(dev->type->pm, state);
  1020. goto Run;
  1021. }
  1022. if (dev->class) {
  1023. if (dev->class->pm) {
  1024. info = "class ";
  1025. callback = pm_op(dev->class->pm, state);
  1026. goto Run;
  1027. } else if (dev->class->suspend) {
  1028. pm_dev_dbg(dev, state, "legacy class ");
  1029. error = legacy_suspend(dev, state, dev->class->suspend,
  1030. "legacy class ");
  1031. goto End;
  1032. }
  1033. }
  1034. if (dev->bus) {
  1035. if (dev->bus->pm) {
  1036. info = "bus ";
  1037. callback = pm_op(dev->bus->pm, state);
  1038. } else if (dev->bus->suspend) {
  1039. pm_dev_dbg(dev, state, "legacy bus ");
  1040. error = legacy_suspend(dev, state, dev->bus->suspend,
  1041. "legacy bus ");
  1042. goto End;
  1043. }
  1044. }
  1045. Run:
  1046. if (!callback && dev->driver && dev->driver->pm) {
  1047. info = "driver ";
  1048. callback = pm_op(dev->driver->pm, state);
  1049. }
  1050. error = dpm_run_callback(callback, dev, state, info);
  1051. End:
  1052. if (!error) {
  1053. dev->power.is_suspended = true;
  1054. if (dev->power.wakeup_path
  1055. && dev->parent && !dev->parent->power.ignore_children)
  1056. dev->parent->power.wakeup_path = true;
  1057. }
  1058. device_unlock(dev);
  1059. dpm_watchdog_clear(&wd);
  1060. Complete:
  1061. complete_all(&dev->power.completion);
  1062. if (error)
  1063. async_error = error;
  1064. return error;
  1065. }
  1066. static void async_suspend(void *data, async_cookie_t cookie)
  1067. {
  1068. struct device *dev = (struct device *)data;
  1069. int error;
  1070. error = __device_suspend(dev, pm_transition, true);
  1071. if (error) {
  1072. dpm_save_failed_dev(dev_name(dev));
  1073. pm_dev_err(dev, pm_transition, " async", error);
  1074. }
  1075. put_device(dev);
  1076. }
  1077. static int device_suspend(struct device *dev)
  1078. {
  1079. reinit_completion(&dev->power.completion);
  1080. if (pm_async_enabled && dev->power.async_suspend) {
  1081. get_device(dev);
  1082. async_schedule(async_suspend, dev);
  1083. return 0;
  1084. }
  1085. return __device_suspend(dev, pm_transition, false);
  1086. }
  1087. /**
  1088. * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
  1089. * @state: PM transition of the system being carried out.
  1090. */
  1091. int dpm_suspend(pm_message_t state)
  1092. {
  1093. ktime_t starttime = ktime_get();
  1094. int error = 0;
  1095. might_sleep();
  1096. mutex_lock(&dpm_list_mtx);
  1097. pm_transition = state;
  1098. async_error = 0;
  1099. while (!list_empty(&dpm_prepared_list)) {
  1100. struct device *dev = to_device(dpm_prepared_list.prev);
  1101. get_device(dev);
  1102. mutex_unlock(&dpm_list_mtx);
  1103. error = device_suspend(dev);
  1104. mutex_lock(&dpm_list_mtx);
  1105. if (error) {
  1106. pm_dev_err(dev, state, "", error);
  1107. dpm_save_failed_dev(dev_name(dev));
  1108. put_device(dev);
  1109. break;
  1110. }
  1111. if (!list_empty(&dev->power.entry))
  1112. list_move(&dev->power.entry, &dpm_suspended_list);
  1113. put_device(dev);
  1114. if (async_error)
  1115. break;
  1116. }
  1117. mutex_unlock(&dpm_list_mtx);
  1118. async_synchronize_full();
  1119. if (!error)
  1120. error = async_error;
  1121. if (error) {
  1122. suspend_stats.failed_suspend++;
  1123. dpm_save_failed_step(SUSPEND_SUSPEND);
  1124. } else
  1125. dpm_show_time(starttime, state, NULL);
  1126. return error;
  1127. }
  1128. /**
  1129. * device_prepare - Prepare a device for system power transition.
  1130. * @dev: Device to handle.
  1131. * @state: PM transition of the system being carried out.
  1132. *
  1133. * Execute the ->prepare() callback(s) for given device. No new children of the
  1134. * device may be registered after this function has returned.
  1135. */
  1136. static int device_prepare(struct device *dev, pm_message_t state)
  1137. {
  1138. int (*callback)(struct device *) = NULL;
  1139. char *info = NULL;
  1140. int error = 0;
  1141. if (dev->power.syscore)
  1142. return 0;
  1143. /*
  1144. * If a device's parent goes into runtime suspend at the wrong time,
  1145. * it won't be possible to resume the device. To prevent this we
  1146. * block runtime suspend here, during the prepare phase, and allow
  1147. * it again during the complete phase.
  1148. */
  1149. pm_runtime_get_noresume(dev);
  1150. device_lock(dev);
  1151. dev->power.wakeup_path = device_may_wakeup(dev);
  1152. if (dev->pm_domain) {
  1153. info = "preparing power domain ";
  1154. callback = dev->pm_domain->ops.prepare;
  1155. } else if (dev->type && dev->type->pm) {
  1156. info = "preparing type ";
  1157. callback = dev->type->pm->prepare;
  1158. } else if (dev->class && dev->class->pm) {
  1159. info = "preparing class ";
  1160. callback = dev->class->pm->prepare;
  1161. } else if (dev->bus && dev->bus->pm) {
  1162. info = "preparing bus ";
  1163. callback = dev->bus->pm->prepare;
  1164. }
  1165. if (!callback && dev->driver && dev->driver->pm) {
  1166. info = "preparing driver ";
  1167. callback = dev->driver->pm->prepare;
  1168. }
  1169. if (callback) {
  1170. error = callback(dev);
  1171. suspend_report_result(callback, error);
  1172. }
  1173. device_unlock(dev);
  1174. if (error)
  1175. pm_runtime_put(dev);
  1176. return error;
  1177. }
  1178. /**
  1179. * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
  1180. * @state: PM transition of the system being carried out.
  1181. *
  1182. * Execute the ->prepare() callback(s) for all devices.
  1183. */
  1184. int dpm_prepare(pm_message_t state)
  1185. {
  1186. int error = 0;
  1187. might_sleep();
  1188. mutex_lock(&dpm_list_mtx);
  1189. while (!list_empty(&dpm_list)) {
  1190. struct device *dev = to_device(dpm_list.next);
  1191. get_device(dev);
  1192. mutex_unlock(&dpm_list_mtx);
  1193. error = device_prepare(dev, state);
  1194. mutex_lock(&dpm_list_mtx);
  1195. if (error) {
  1196. if (error == -EAGAIN) {
  1197. put_device(dev);
  1198. error = 0;
  1199. continue;
  1200. }
  1201. printk(KERN_INFO "PM: Device %s not prepared "
  1202. "for power transition: code %d\n",
  1203. dev_name(dev), error);
  1204. put_device(dev);
  1205. break;
  1206. }
  1207. dev->power.is_prepared = true;
  1208. if (!list_empty(&dev->power.entry))
  1209. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  1210. put_device(dev);
  1211. }
  1212. mutex_unlock(&dpm_list_mtx);
  1213. return error;
  1214. }
  1215. /**
  1216. * dpm_suspend_start - Prepare devices for PM transition and suspend them.
  1217. * @state: PM transition of the system being carried out.
  1218. *
  1219. * Prepare all non-sysdev devices for system PM transition and execute "suspend"
  1220. * callbacks for them.
  1221. */
  1222. int dpm_suspend_start(pm_message_t state)
  1223. {
  1224. int error;
  1225. error = dpm_prepare(state);
  1226. if (error) {
  1227. suspend_stats.failed_prepare++;
  1228. dpm_save_failed_step(SUSPEND_PREPARE);
  1229. } else
  1230. error = dpm_suspend(state);
  1231. return error;
  1232. }
  1233. EXPORT_SYMBOL_GPL(dpm_suspend_start);
  1234. void __suspend_report_result(const char *function, void *fn, int ret)
  1235. {
  1236. if (ret)
  1237. printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
  1238. }
  1239. EXPORT_SYMBOL_GPL(__suspend_report_result);
  1240. /**
  1241. * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
  1242. * @dev: Device to wait for.
  1243. * @subordinate: Device that needs to wait for @dev.
  1244. */
  1245. int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
  1246. {
  1247. dpm_wait(dev, subordinate->power.async_suspend);
  1248. return async_error;
  1249. }
  1250. EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
  1251. /**
  1252. * dpm_for_each_dev - device iterator.
  1253. * @data: data for the callback.
  1254. * @fn: function to be called for each device.
  1255. *
  1256. * Iterate over devices in dpm_list, and call @fn for each device,
  1257. * passing it @data.
  1258. */
  1259. void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
  1260. {
  1261. struct device *dev;
  1262. if (!fn)
  1263. return;
  1264. device_pm_lock();
  1265. list_for_each_entry(dev, &dpm_list, power.entry)
  1266. fn(dev, data);
  1267. device_pm_unlock();
  1268. }
  1269. EXPORT_SYMBOL_GPL(dpm_for_each_dev);