main.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700
  1. /*
  2. * drivers/base/power/main.c - Where the driver meets power management.
  3. *
  4. * Copyright (c) 2003 Patrick Mochel
  5. * Copyright (c) 2003 Open Source Development Lab
  6. *
  7. * This file is released under the GPLv2
  8. *
  9. *
  10. * The driver model core calls device_pm_add() when a device is registered.
  11. * This will initialize the embedded device_pm_info object in the device
  12. * and add it to the list of power-controlled devices. sysfs entries for
  13. * controlling device power management will also be added.
  14. *
  15. * A separate list is used for keeping track of power info, because the power
  16. * domain dependencies may differ from the ancestral dependencies that the
  17. * subsystem list maintains.
  18. */
  19. #include <linux/device.h>
  20. #include <linux/kallsyms.h>
  21. #include <linux/export.h>
  22. #include <linux/mutex.h>
  23. #include <linux/pm.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/resume-trace.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/sched.h>
  28. #include <linux/async.h>
  29. #include <linux/suspend.h>
  30. #include <trace/events/power.h>
  31. #include <linux/cpufreq.h>
  32. #include <linux/cpuidle.h>
  33. #include <linux/timer.h>
  34. #include "../base.h"
  35. #include "power.h"
  36. typedef int (*pm_callback_t)(struct device *);
  37. /*
  38. * The entries in the dpm_list list are in a depth first order, simply
  39. * because children are guaranteed to be discovered after parents, and
  40. * are inserted at the back of the list on discovery.
  41. *
  42. * Since device_pm_add() may be called with a device lock held,
  43. * we must never try to acquire a device lock while holding
  44. * dpm_list_mutex.
  45. */
  46. LIST_HEAD(dpm_list);
  47. static LIST_HEAD(dpm_prepared_list);
  48. static LIST_HEAD(dpm_suspended_list);
  49. static LIST_HEAD(dpm_late_early_list);
  50. static LIST_HEAD(dpm_noirq_list);
  51. struct suspend_stats suspend_stats;
  52. static DEFINE_MUTEX(dpm_list_mtx);
  53. static pm_message_t pm_transition;
  54. static int async_error;
  55. static char *pm_verb(int event)
  56. {
  57. switch (event) {
  58. case PM_EVENT_SUSPEND:
  59. return "suspend";
  60. case PM_EVENT_RESUME:
  61. return "resume";
  62. case PM_EVENT_FREEZE:
  63. return "freeze";
  64. case PM_EVENT_QUIESCE:
  65. return "quiesce";
  66. case PM_EVENT_HIBERNATE:
  67. return "hibernate";
  68. case PM_EVENT_THAW:
  69. return "thaw";
  70. case PM_EVENT_RESTORE:
  71. return "restore";
  72. case PM_EVENT_RECOVER:
  73. return "recover";
  74. default:
  75. return "(unknown PM event)";
  76. }
  77. }
  78. /**
  79. * device_pm_sleep_init - Initialize system suspend-related device fields.
  80. * @dev: Device object being initialized.
  81. */
  82. void device_pm_sleep_init(struct device *dev)
  83. {
  84. dev->power.is_prepared = false;
  85. dev->power.is_suspended = false;
  86. dev->power.is_noirq_suspended = false;
  87. dev->power.is_late_suspended = false;
  88. init_completion(&dev->power.completion);
  89. complete_all(&dev->power.completion);
  90. dev->power.wakeup = NULL;
  91. INIT_LIST_HEAD(&dev->power.entry);
  92. }
  93. /**
  94. * device_pm_lock - Lock the list of active devices used by the PM core.
  95. */
  96. void device_pm_lock(void)
  97. {
  98. mutex_lock(&dpm_list_mtx);
  99. }
  100. /**
  101. * device_pm_unlock - Unlock the list of active devices used by the PM core.
  102. */
  103. void device_pm_unlock(void)
  104. {
  105. mutex_unlock(&dpm_list_mtx);
  106. }
  107. /**
  108. * device_pm_add - Add a device to the PM core's list of active devices.
  109. * @dev: Device to add to the list.
  110. */
  111. void device_pm_add(struct device *dev)
  112. {
  113. pr_debug("PM: Adding info for %s:%s\n",
  114. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  115. mutex_lock(&dpm_list_mtx);
  116. if (dev->parent && dev->parent->power.is_prepared)
  117. dev_warn(dev, "parent %s should not be sleeping\n",
  118. dev_name(dev->parent));
  119. list_add_tail(&dev->power.entry, &dpm_list);
  120. mutex_unlock(&dpm_list_mtx);
  121. }
  122. /**
  123. * device_pm_remove - Remove a device from the PM core's list of active devices.
  124. * @dev: Device to be removed from the list.
  125. */
  126. void device_pm_remove(struct device *dev)
  127. {
  128. pr_debug("PM: Removing info for %s:%s\n",
  129. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  130. complete_all(&dev->power.completion);
  131. mutex_lock(&dpm_list_mtx);
  132. list_del_init(&dev->power.entry);
  133. mutex_unlock(&dpm_list_mtx);
  134. device_wakeup_disable(dev);
  135. pm_runtime_remove(dev);
  136. }
  137. /**
  138. * device_pm_move_before - Move device in the PM core's list of active devices.
  139. * @deva: Device to move in dpm_list.
  140. * @devb: Device @deva should come before.
  141. */
  142. void device_pm_move_before(struct device *deva, struct device *devb)
  143. {
  144. pr_debug("PM: Moving %s:%s before %s:%s\n",
  145. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  146. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  147. /* Delete deva from dpm_list and reinsert before devb. */
  148. list_move_tail(&deva->power.entry, &devb->power.entry);
  149. }
  150. /**
  151. * device_pm_move_after - Move device in the PM core's list of active devices.
  152. * @deva: Device to move in dpm_list.
  153. * @devb: Device @deva should come after.
  154. */
  155. void device_pm_move_after(struct device *deva, struct device *devb)
  156. {
  157. pr_debug("PM: Moving %s:%s after %s:%s\n",
  158. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  159. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  160. /* Delete deva from dpm_list and reinsert after devb. */
  161. list_move(&deva->power.entry, &devb->power.entry);
  162. }
  163. /**
  164. * device_pm_move_last - Move device to end of the PM core's list of devices.
  165. * @dev: Device to move in dpm_list.
  166. */
  167. void device_pm_move_last(struct device *dev)
  168. {
  169. pr_debug("PM: Moving %s:%s to end of list\n",
  170. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  171. list_move_tail(&dev->power.entry, &dpm_list);
  172. }
  173. static ktime_t initcall_debug_start(struct device *dev)
  174. {
  175. ktime_t calltime = ktime_set(0, 0);
  176. if (pm_print_times_enabled) {
  177. pr_info("calling %s+ @ %i, parent: %s\n",
  178. dev_name(dev), task_pid_nr(current),
  179. dev->parent ? dev_name(dev->parent) : "none");
  180. calltime = ktime_get();
  181. }
  182. return calltime;
  183. }
  184. static void initcall_debug_report(struct device *dev, ktime_t calltime,
  185. int error, pm_message_t state, char *info)
  186. {
  187. ktime_t rettime;
  188. s64 nsecs;
  189. rettime = ktime_get();
  190. nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
  191. if (pm_print_times_enabled) {
  192. pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
  193. error, (unsigned long long)nsecs >> 10);
  194. }
  195. }
  196. /**
  197. * dpm_wait - Wait for a PM operation to complete.
  198. * @dev: Device to wait for.
  199. * @async: If unset, wait only if the device's power.async_suspend flag is set.
  200. */
  201. static void dpm_wait(struct device *dev, bool async)
  202. {
  203. if (!dev)
  204. return;
  205. if (async || (pm_async_enabled && dev->power.async_suspend))
  206. wait_for_completion(&dev->power.completion);
  207. }
  208. static int dpm_wait_fn(struct device *dev, void *async_ptr)
  209. {
  210. dpm_wait(dev, *((bool *)async_ptr));
  211. return 0;
  212. }
  213. static void dpm_wait_for_children(struct device *dev, bool async)
  214. {
  215. device_for_each_child(dev, &async, dpm_wait_fn);
  216. }
  217. /**
  218. * pm_op - Return the PM operation appropriate for given PM event.
  219. * @ops: PM operations to choose from.
  220. * @state: PM transition of the system being carried out.
  221. */
  222. static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
  223. {
  224. switch (state.event) {
  225. #ifdef CONFIG_SUSPEND
  226. case PM_EVENT_SUSPEND:
  227. return ops->suspend;
  228. case PM_EVENT_RESUME:
  229. return ops->resume;
  230. #endif /* CONFIG_SUSPEND */
  231. #ifdef CONFIG_HIBERNATE_CALLBACKS
  232. case PM_EVENT_FREEZE:
  233. case PM_EVENT_QUIESCE:
  234. return ops->freeze;
  235. case PM_EVENT_HIBERNATE:
  236. return ops->poweroff;
  237. case PM_EVENT_THAW:
  238. case PM_EVENT_RECOVER:
  239. return ops->thaw;
  240. break;
  241. case PM_EVENT_RESTORE:
  242. return ops->restore;
  243. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  244. }
  245. return NULL;
  246. }
  247. /**
  248. * pm_late_early_op - Return the PM operation appropriate for given PM event.
  249. * @ops: PM operations to choose from.
  250. * @state: PM transition of the system being carried out.
  251. *
  252. * Runtime PM is disabled for @dev while this function is being executed.
  253. */
  254. static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
  255. pm_message_t state)
  256. {
  257. switch (state.event) {
  258. #ifdef CONFIG_SUSPEND
  259. case PM_EVENT_SUSPEND:
  260. return ops->suspend_late;
  261. case PM_EVENT_RESUME:
  262. return ops->resume_early;
  263. #endif /* CONFIG_SUSPEND */
  264. #ifdef CONFIG_HIBERNATE_CALLBACKS
  265. case PM_EVENT_FREEZE:
  266. case PM_EVENT_QUIESCE:
  267. return ops->freeze_late;
  268. case PM_EVENT_HIBERNATE:
  269. return ops->poweroff_late;
  270. case PM_EVENT_THAW:
  271. case PM_EVENT_RECOVER:
  272. return ops->thaw_early;
  273. case PM_EVENT_RESTORE:
  274. return ops->restore_early;
  275. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  276. }
  277. return NULL;
  278. }
  279. /**
  280. * pm_noirq_op - Return the PM operation appropriate for given PM event.
  281. * @ops: PM operations to choose from.
  282. * @state: PM transition of the system being carried out.
  283. *
  284. * The driver of @dev will not receive interrupts while this function is being
  285. * executed.
  286. */
  287. static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
  288. {
  289. switch (state.event) {
  290. #ifdef CONFIG_SUSPEND
  291. case PM_EVENT_SUSPEND:
  292. return ops->suspend_noirq;
  293. case PM_EVENT_RESUME:
  294. return ops->resume_noirq;
  295. #endif /* CONFIG_SUSPEND */
  296. #ifdef CONFIG_HIBERNATE_CALLBACKS
  297. case PM_EVENT_FREEZE:
  298. case PM_EVENT_QUIESCE:
  299. return ops->freeze_noirq;
  300. case PM_EVENT_HIBERNATE:
  301. return ops->poweroff_noirq;
  302. case PM_EVENT_THAW:
  303. case PM_EVENT_RECOVER:
  304. return ops->thaw_noirq;
  305. case PM_EVENT_RESTORE:
  306. return ops->restore_noirq;
  307. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  308. }
  309. return NULL;
  310. }
  311. static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
  312. {
  313. dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
  314. ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
  315. ", may wakeup" : "");
  316. }
  317. static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
  318. int error)
  319. {
  320. printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
  321. dev_name(dev), pm_verb(state.event), info, error);
  322. }
  323. static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
  324. {
  325. ktime_t calltime;
  326. u64 usecs64;
  327. int usecs;
  328. calltime = ktime_get();
  329. usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
  330. do_div(usecs64, NSEC_PER_USEC);
  331. usecs = usecs64;
  332. if (usecs == 0)
  333. usecs = 1;
  334. pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
  335. info ?: "", info ? " " : "", pm_verb(state.event),
  336. usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
  337. }
  338. static int dpm_run_callback(pm_callback_t cb, struct device *dev,
  339. pm_message_t state, char *info)
  340. {
  341. ktime_t calltime;
  342. int error;
  343. if (!cb)
  344. return 0;
  345. calltime = initcall_debug_start(dev);
  346. pm_dev_dbg(dev, state, info);
  347. trace_device_pm_callback_start(dev, info, state.event);
  348. error = cb(dev);
  349. trace_device_pm_callback_end(dev, error);
  350. suspend_report_result(cb, error);
  351. initcall_debug_report(dev, calltime, error, state, info);
  352. return error;
  353. }
  354. #ifdef CONFIG_DPM_WATCHDOG
  355. struct dpm_watchdog {
  356. struct device *dev;
  357. struct task_struct *tsk;
  358. struct timer_list timer;
  359. };
  360. #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
  361. struct dpm_watchdog wd
  362. /**
  363. * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
  364. * @data: Watchdog object address.
  365. *
  366. * Called when a driver has timed out suspending or resuming.
  367. * There's not much we can do here to recover so panic() to
  368. * capture a crash-dump in pstore.
  369. */
  370. static void dpm_watchdog_handler(unsigned long data)
  371. {
  372. struct dpm_watchdog *wd = (void *)data;
  373. dev_emerg(wd->dev, "**** DPM device timeout ****\n");
  374. show_stack(wd->tsk, NULL);
  375. panic("%s %s: unrecoverable failure\n",
  376. dev_driver_string(wd->dev), dev_name(wd->dev));
  377. }
  378. /**
  379. * dpm_watchdog_set - Enable pm watchdog for given device.
  380. * @wd: Watchdog. Must be allocated on the stack.
  381. * @dev: Device to handle.
  382. */
  383. static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
  384. {
  385. struct timer_list *timer = &wd->timer;
  386. wd->dev = dev;
  387. wd->tsk = current;
  388. init_timer_on_stack(timer);
  389. /* use same timeout value for both suspend and resume */
  390. timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
  391. timer->function = dpm_watchdog_handler;
  392. timer->data = (unsigned long)wd;
  393. add_timer(timer);
  394. }
  395. /**
  396. * dpm_watchdog_clear - Disable suspend/resume watchdog.
  397. * @wd: Watchdog to disable.
  398. */
  399. static void dpm_watchdog_clear(struct dpm_watchdog *wd)
  400. {
  401. struct timer_list *timer = &wd->timer;
  402. del_timer_sync(timer);
  403. destroy_timer_on_stack(timer);
  404. }
  405. #else
  406. #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
  407. #define dpm_watchdog_set(x, y)
  408. #define dpm_watchdog_clear(x)
  409. #endif
  410. /*------------------------- Resume routines -------------------------*/
  411. /**
  412. * device_resume_noirq - Execute an "early resume" callback for given device.
  413. * @dev: Device to handle.
  414. * @state: PM transition of the system being carried out.
  415. *
  416. * The driver of @dev will not receive interrupts while this function is being
  417. * executed.
  418. */
  419. static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
  420. {
  421. pm_callback_t callback = NULL;
  422. char *info = NULL;
  423. int error = 0;
  424. TRACE_DEVICE(dev);
  425. TRACE_RESUME(0);
  426. if (dev->power.syscore || dev->power.direct_complete)
  427. goto Out;
  428. if (!dev->power.is_noirq_suspended)
  429. goto Out;
  430. dpm_wait(dev->parent, async);
  431. if (dev->pm_domain) {
  432. info = "noirq power domain ";
  433. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  434. } else if (dev->type && dev->type->pm) {
  435. info = "noirq type ";
  436. callback = pm_noirq_op(dev->type->pm, state);
  437. } else if (dev->class && dev->class->pm) {
  438. info = "noirq class ";
  439. callback = pm_noirq_op(dev->class->pm, state);
  440. } else if (dev->bus && dev->bus->pm) {
  441. info = "noirq bus ";
  442. callback = pm_noirq_op(dev->bus->pm, state);
  443. }
  444. if (!callback && dev->driver && dev->driver->pm) {
  445. info = "noirq driver ";
  446. callback = pm_noirq_op(dev->driver->pm, state);
  447. }
  448. error = dpm_run_callback(callback, dev, state, info);
  449. dev->power.is_noirq_suspended = false;
  450. Out:
  451. complete_all(&dev->power.completion);
  452. TRACE_RESUME(error);
  453. return error;
  454. }
  455. static bool is_async(struct device *dev)
  456. {
  457. return dev->power.async_suspend && pm_async_enabled
  458. && !pm_trace_is_enabled();
  459. }
  460. static void async_resume_noirq(void *data, async_cookie_t cookie)
  461. {
  462. struct device *dev = (struct device *)data;
  463. int error;
  464. error = device_resume_noirq(dev, pm_transition, true);
  465. if (error)
  466. pm_dev_err(dev, pm_transition, " async", error);
  467. put_device(dev);
  468. }
  469. /**
  470. * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  471. * @state: PM transition of the system being carried out.
  472. *
  473. * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
  474. * enable device drivers to receive interrupts.
  475. */
  476. static void dpm_resume_noirq(pm_message_t state)
  477. {
  478. struct device *dev;
  479. ktime_t starttime = ktime_get();
  480. trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
  481. mutex_lock(&dpm_list_mtx);
  482. pm_transition = state;
  483. /*
  484. * Advanced the async threads upfront,
  485. * in case the starting of async threads is
  486. * delayed by non-async resuming devices.
  487. */
  488. list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
  489. reinit_completion(&dev->power.completion);
  490. if (is_async(dev)) {
  491. get_device(dev);
  492. async_schedule(async_resume_noirq, dev);
  493. }
  494. }
  495. while (!list_empty(&dpm_noirq_list)) {
  496. dev = to_device(dpm_noirq_list.next);
  497. get_device(dev);
  498. list_move_tail(&dev->power.entry, &dpm_late_early_list);
  499. mutex_unlock(&dpm_list_mtx);
  500. if (!is_async(dev)) {
  501. int error;
  502. error = device_resume_noirq(dev, state, false);
  503. if (error) {
  504. suspend_stats.failed_resume_noirq++;
  505. dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
  506. dpm_save_failed_dev(dev_name(dev));
  507. pm_dev_err(dev, state, " noirq", error);
  508. }
  509. }
  510. mutex_lock(&dpm_list_mtx);
  511. put_device(dev);
  512. }
  513. mutex_unlock(&dpm_list_mtx);
  514. async_synchronize_full();
  515. dpm_show_time(starttime, state, "noirq");
  516. resume_device_irqs();
  517. cpuidle_resume();
  518. trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
  519. }
  520. /**
  521. * device_resume_early - Execute an "early resume" callback for given device.
  522. * @dev: Device to handle.
  523. * @state: PM transition of the system being carried out.
  524. *
  525. * Runtime PM is disabled for @dev while this function is being executed.
  526. */
  527. static int device_resume_early(struct device *dev, pm_message_t state, bool async)
  528. {
  529. pm_callback_t callback = NULL;
  530. char *info = NULL;
  531. int error = 0;
  532. TRACE_DEVICE(dev);
  533. TRACE_RESUME(0);
  534. if (dev->power.syscore || dev->power.direct_complete)
  535. goto Out;
  536. if (!dev->power.is_late_suspended)
  537. goto Out;
  538. dpm_wait(dev->parent, async);
  539. if (dev->pm_domain) {
  540. info = "early power domain ";
  541. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  542. } else if (dev->type && dev->type->pm) {
  543. info = "early type ";
  544. callback = pm_late_early_op(dev->type->pm, state);
  545. } else if (dev->class && dev->class->pm) {
  546. info = "early class ";
  547. callback = pm_late_early_op(dev->class->pm, state);
  548. } else if (dev->bus && dev->bus->pm) {
  549. info = "early bus ";
  550. callback = pm_late_early_op(dev->bus->pm, state);
  551. }
  552. if (!callback && dev->driver && dev->driver->pm) {
  553. info = "early driver ";
  554. callback = pm_late_early_op(dev->driver->pm, state);
  555. }
  556. error = dpm_run_callback(callback, dev, state, info);
  557. dev->power.is_late_suspended = false;
  558. Out:
  559. TRACE_RESUME(error);
  560. pm_runtime_enable(dev);
  561. complete_all(&dev->power.completion);
  562. return error;
  563. }
  564. static void async_resume_early(void *data, async_cookie_t cookie)
  565. {
  566. struct device *dev = (struct device *)data;
  567. int error;
  568. error = device_resume_early(dev, pm_transition, true);
  569. if (error)
  570. pm_dev_err(dev, pm_transition, " async", error);
  571. put_device(dev);
  572. }
  573. /**
  574. * dpm_resume_early - Execute "early resume" callbacks for all devices.
  575. * @state: PM transition of the system being carried out.
  576. */
  577. static void dpm_resume_early(pm_message_t state)
  578. {
  579. struct device *dev;
  580. ktime_t starttime = ktime_get();
  581. trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
  582. mutex_lock(&dpm_list_mtx);
  583. pm_transition = state;
  584. /*
  585. * Advanced the async threads upfront,
  586. * in case the starting of async threads is
  587. * delayed by non-async resuming devices.
  588. */
  589. list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
  590. reinit_completion(&dev->power.completion);
  591. if (is_async(dev)) {
  592. get_device(dev);
  593. async_schedule(async_resume_early, dev);
  594. }
  595. }
  596. while (!list_empty(&dpm_late_early_list)) {
  597. dev = to_device(dpm_late_early_list.next);
  598. get_device(dev);
  599. list_move_tail(&dev->power.entry, &dpm_suspended_list);
  600. mutex_unlock(&dpm_list_mtx);
  601. if (!is_async(dev)) {
  602. int error;
  603. error = device_resume_early(dev, state, false);
  604. if (error) {
  605. suspend_stats.failed_resume_early++;
  606. dpm_save_failed_step(SUSPEND_RESUME_EARLY);
  607. dpm_save_failed_dev(dev_name(dev));
  608. pm_dev_err(dev, state, " early", error);
  609. }
  610. }
  611. mutex_lock(&dpm_list_mtx);
  612. put_device(dev);
  613. }
  614. mutex_unlock(&dpm_list_mtx);
  615. async_synchronize_full();
  616. dpm_show_time(starttime, state, "early");
  617. trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
  618. }
  619. /**
  620. * dpm_resume_start - Execute "noirq" and "early" device callbacks.
  621. * @state: PM transition of the system being carried out.
  622. */
  623. void dpm_resume_start(pm_message_t state)
  624. {
  625. dpm_resume_noirq(state);
  626. dpm_resume_early(state);
  627. }
  628. EXPORT_SYMBOL_GPL(dpm_resume_start);
  629. /**
  630. * device_resume - Execute "resume" callbacks for given device.
  631. * @dev: Device to handle.
  632. * @state: PM transition of the system being carried out.
  633. * @async: If true, the device is being resumed asynchronously.
  634. */
  635. static int device_resume(struct device *dev, pm_message_t state, bool async)
  636. {
  637. pm_callback_t callback = NULL;
  638. char *info = NULL;
  639. int error = 0;
  640. DECLARE_DPM_WATCHDOG_ON_STACK(wd);
  641. TRACE_DEVICE(dev);
  642. TRACE_RESUME(0);
  643. if (dev->power.syscore)
  644. goto Complete;
  645. if (dev->power.direct_complete) {
  646. /* Match the pm_runtime_disable() in __device_suspend(). */
  647. pm_runtime_enable(dev);
  648. goto Complete;
  649. }
  650. dpm_wait(dev->parent, async);
  651. dpm_watchdog_set(&wd, dev);
  652. device_lock(dev);
  653. /*
  654. * This is a fib. But we'll allow new children to be added below
  655. * a resumed device, even if the device hasn't been completed yet.
  656. */
  657. dev->power.is_prepared = false;
  658. if (!dev->power.is_suspended)
  659. goto Unlock;
  660. if (dev->pm_domain) {
  661. info = "power domain ";
  662. callback = pm_op(&dev->pm_domain->ops, state);
  663. goto Driver;
  664. }
  665. if (dev->type && dev->type->pm) {
  666. info = "type ";
  667. callback = pm_op(dev->type->pm, state);
  668. goto Driver;
  669. }
  670. if (dev->class) {
  671. if (dev->class->pm) {
  672. info = "class ";
  673. callback = pm_op(dev->class->pm, state);
  674. goto Driver;
  675. } else if (dev->class->resume) {
  676. info = "legacy class ";
  677. callback = dev->class->resume;
  678. goto End;
  679. }
  680. }
  681. if (dev->bus) {
  682. if (dev->bus->pm) {
  683. info = "bus ";
  684. callback = pm_op(dev->bus->pm, state);
  685. } else if (dev->bus->resume) {
  686. info = "legacy bus ";
  687. callback = dev->bus->resume;
  688. goto End;
  689. }
  690. }
  691. Driver:
  692. if (!callback && dev->driver && dev->driver->pm) {
  693. info = "driver ";
  694. callback = pm_op(dev->driver->pm, state);
  695. }
  696. End:
  697. error = dpm_run_callback(callback, dev, state, info);
  698. dev->power.is_suspended = false;
  699. Unlock:
  700. device_unlock(dev);
  701. dpm_watchdog_clear(&wd);
  702. Complete:
  703. complete_all(&dev->power.completion);
  704. TRACE_RESUME(error);
  705. return error;
  706. }
  707. static void async_resume(void *data, async_cookie_t cookie)
  708. {
  709. struct device *dev = (struct device *)data;
  710. int error;
  711. error = device_resume(dev, pm_transition, true);
  712. if (error)
  713. pm_dev_err(dev, pm_transition, " async", error);
  714. put_device(dev);
  715. }
  716. /**
  717. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  718. * @state: PM transition of the system being carried out.
  719. *
  720. * Execute the appropriate "resume" callback for all devices whose status
  721. * indicates that they are suspended.
  722. */
  723. void dpm_resume(pm_message_t state)
  724. {
  725. struct device *dev;
  726. ktime_t starttime = ktime_get();
  727. trace_suspend_resume(TPS("dpm_resume"), state.event, true);
  728. might_sleep();
  729. mutex_lock(&dpm_list_mtx);
  730. pm_transition = state;
  731. async_error = 0;
  732. list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
  733. reinit_completion(&dev->power.completion);
  734. if (is_async(dev)) {
  735. get_device(dev);
  736. async_schedule(async_resume, dev);
  737. }
  738. }
  739. while (!list_empty(&dpm_suspended_list)) {
  740. dev = to_device(dpm_suspended_list.next);
  741. get_device(dev);
  742. if (!is_async(dev)) {
  743. int error;
  744. mutex_unlock(&dpm_list_mtx);
  745. error = device_resume(dev, state, false);
  746. if (error) {
  747. suspend_stats.failed_resume++;
  748. dpm_save_failed_step(SUSPEND_RESUME);
  749. dpm_save_failed_dev(dev_name(dev));
  750. pm_dev_err(dev, state, "", error);
  751. }
  752. mutex_lock(&dpm_list_mtx);
  753. }
  754. if (!list_empty(&dev->power.entry))
  755. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  756. put_device(dev);
  757. }
  758. mutex_unlock(&dpm_list_mtx);
  759. async_synchronize_full();
  760. dpm_show_time(starttime, state, NULL);
  761. cpufreq_resume();
  762. trace_suspend_resume(TPS("dpm_resume"), state.event, false);
  763. }
  764. /**
  765. * device_complete - Complete a PM transition for given device.
  766. * @dev: Device to handle.
  767. * @state: PM transition of the system being carried out.
  768. */
  769. static void device_complete(struct device *dev, pm_message_t state)
  770. {
  771. void (*callback)(struct device *) = NULL;
  772. char *info = NULL;
  773. if (dev->power.syscore)
  774. return;
  775. device_lock(dev);
  776. if (dev->pm_domain) {
  777. info = "completing power domain ";
  778. callback = dev->pm_domain->ops.complete;
  779. } else if (dev->type && dev->type->pm) {
  780. info = "completing type ";
  781. callback = dev->type->pm->complete;
  782. } else if (dev->class && dev->class->pm) {
  783. info = "completing class ";
  784. callback = dev->class->pm->complete;
  785. } else if (dev->bus && dev->bus->pm) {
  786. info = "completing bus ";
  787. callback = dev->bus->pm->complete;
  788. }
  789. if (!callback && dev->driver && dev->driver->pm) {
  790. info = "completing driver ";
  791. callback = dev->driver->pm->complete;
  792. }
  793. if (callback) {
  794. pm_dev_dbg(dev, state, info);
  795. trace_device_pm_callback_start(dev, info, state.event);
  796. callback(dev);
  797. trace_device_pm_callback_end(dev, 0);
  798. }
  799. device_unlock(dev);
  800. pm_runtime_put(dev);
  801. }
  802. /**
  803. * dpm_complete - Complete a PM transition for all non-sysdev devices.
  804. * @state: PM transition of the system being carried out.
  805. *
  806. * Execute the ->complete() callbacks for all devices whose PM status is not
  807. * DPM_ON (this allows new devices to be registered).
  808. */
  809. void dpm_complete(pm_message_t state)
  810. {
  811. struct list_head list;
  812. trace_suspend_resume(TPS("dpm_complete"), state.event, true);
  813. might_sleep();
  814. INIT_LIST_HEAD(&list);
  815. mutex_lock(&dpm_list_mtx);
  816. while (!list_empty(&dpm_prepared_list)) {
  817. struct device *dev = to_device(dpm_prepared_list.prev);
  818. get_device(dev);
  819. dev->power.is_prepared = false;
  820. list_move(&dev->power.entry, &list);
  821. mutex_unlock(&dpm_list_mtx);
  822. device_complete(dev, state);
  823. mutex_lock(&dpm_list_mtx);
  824. put_device(dev);
  825. }
  826. list_splice(&list, &dpm_list);
  827. mutex_unlock(&dpm_list_mtx);
  828. trace_suspend_resume(TPS("dpm_complete"), state.event, false);
  829. }
  830. /**
  831. * dpm_resume_end - Execute "resume" callbacks and complete system transition.
  832. * @state: PM transition of the system being carried out.
  833. *
  834. * Execute "resume" callbacks for all devices and complete the PM transition of
  835. * the system.
  836. */
  837. void dpm_resume_end(pm_message_t state)
  838. {
  839. dpm_resume(state);
  840. dpm_complete(state);
  841. }
  842. EXPORT_SYMBOL_GPL(dpm_resume_end);
  843. /*------------------------- Suspend routines -------------------------*/
  844. /**
  845. * resume_event - Return a "resume" message for given "suspend" sleep state.
  846. * @sleep_state: PM message representing a sleep state.
  847. *
  848. * Return a PM message representing the resume event corresponding to given
  849. * sleep state.
  850. */
  851. static pm_message_t resume_event(pm_message_t sleep_state)
  852. {
  853. switch (sleep_state.event) {
  854. case PM_EVENT_SUSPEND:
  855. return PMSG_RESUME;
  856. case PM_EVENT_FREEZE:
  857. case PM_EVENT_QUIESCE:
  858. return PMSG_RECOVER;
  859. case PM_EVENT_HIBERNATE:
  860. return PMSG_RESTORE;
  861. }
  862. return PMSG_ON;
  863. }
  864. /**
  865. * device_suspend_noirq - Execute a "late suspend" callback for given device.
  866. * @dev: Device to handle.
  867. * @state: PM transition of the system being carried out.
  868. *
  869. * The driver of @dev will not receive interrupts while this function is being
  870. * executed.
  871. */
  872. static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
  873. {
  874. pm_callback_t callback = NULL;
  875. char *info = NULL;
  876. int error = 0;
  877. if (async_error)
  878. goto Complete;
  879. if (pm_wakeup_pending()) {
  880. async_error = -EBUSY;
  881. goto Complete;
  882. }
  883. if (dev->power.syscore || dev->power.direct_complete)
  884. goto Complete;
  885. dpm_wait_for_children(dev, async);
  886. if (dev->pm_domain) {
  887. info = "noirq power domain ";
  888. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  889. } else if (dev->type && dev->type->pm) {
  890. info = "noirq type ";
  891. callback = pm_noirq_op(dev->type->pm, state);
  892. } else if (dev->class && dev->class->pm) {
  893. info = "noirq class ";
  894. callback = pm_noirq_op(dev->class->pm, state);
  895. } else if (dev->bus && dev->bus->pm) {
  896. info = "noirq bus ";
  897. callback = pm_noirq_op(dev->bus->pm, state);
  898. }
  899. if (!callback && dev->driver && dev->driver->pm) {
  900. info = "noirq driver ";
  901. callback = pm_noirq_op(dev->driver->pm, state);
  902. }
  903. error = dpm_run_callback(callback, dev, state, info);
  904. if (!error)
  905. dev->power.is_noirq_suspended = true;
  906. else
  907. async_error = error;
  908. Complete:
  909. complete_all(&dev->power.completion);
  910. return error;
  911. }
  912. static void async_suspend_noirq(void *data, async_cookie_t cookie)
  913. {
  914. struct device *dev = (struct device *)data;
  915. int error;
  916. error = __device_suspend_noirq(dev, pm_transition, true);
  917. if (error) {
  918. dpm_save_failed_dev(dev_name(dev));
  919. pm_dev_err(dev, pm_transition, " async", error);
  920. }
  921. put_device(dev);
  922. }
  923. static int device_suspend_noirq(struct device *dev)
  924. {
  925. reinit_completion(&dev->power.completion);
  926. if (pm_async_enabled && dev->power.async_suspend) {
  927. get_device(dev);
  928. async_schedule(async_suspend_noirq, dev);
  929. return 0;
  930. }
  931. return __device_suspend_noirq(dev, pm_transition, false);
  932. }
  933. /**
  934. * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
  935. * @state: PM transition of the system being carried out.
  936. *
  937. * Prevent device drivers from receiving interrupts and call the "noirq" suspend
  938. * handlers for all non-sysdev devices.
  939. */
  940. static int dpm_suspend_noirq(pm_message_t state)
  941. {
  942. ktime_t starttime = ktime_get();
  943. int error = 0;
  944. trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
  945. cpuidle_pause();
  946. suspend_device_irqs();
  947. mutex_lock(&dpm_list_mtx);
  948. pm_transition = state;
  949. async_error = 0;
  950. while (!list_empty(&dpm_late_early_list)) {
  951. struct device *dev = to_device(dpm_late_early_list.prev);
  952. get_device(dev);
  953. mutex_unlock(&dpm_list_mtx);
  954. error = device_suspend_noirq(dev);
  955. mutex_lock(&dpm_list_mtx);
  956. if (error) {
  957. pm_dev_err(dev, state, " noirq", error);
  958. dpm_save_failed_dev(dev_name(dev));
  959. put_device(dev);
  960. break;
  961. }
  962. if (!list_empty(&dev->power.entry))
  963. list_move(&dev->power.entry, &dpm_noirq_list);
  964. put_device(dev);
  965. if (async_error)
  966. break;
  967. }
  968. mutex_unlock(&dpm_list_mtx);
  969. async_synchronize_full();
  970. if (!error)
  971. error = async_error;
  972. if (error) {
  973. suspend_stats.failed_suspend_noirq++;
  974. dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
  975. dpm_resume_noirq(resume_event(state));
  976. } else {
  977. dpm_show_time(starttime, state, "noirq");
  978. }
  979. trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
  980. return error;
  981. }
  982. /**
  983. * device_suspend_late - Execute a "late suspend" callback for given device.
  984. * @dev: Device to handle.
  985. * @state: PM transition of the system being carried out.
  986. *
  987. * Runtime PM is disabled for @dev while this function is being executed.
  988. */
  989. static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
  990. {
  991. pm_callback_t callback = NULL;
  992. char *info = NULL;
  993. int error = 0;
  994. __pm_runtime_disable(dev, false);
  995. if (async_error)
  996. goto Complete;
  997. if (pm_wakeup_pending()) {
  998. async_error = -EBUSY;
  999. goto Complete;
  1000. }
  1001. if (dev->power.syscore || dev->power.direct_complete)
  1002. goto Complete;
  1003. dpm_wait_for_children(dev, async);
  1004. if (dev->pm_domain) {
  1005. info = "late power domain ";
  1006. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  1007. } else if (dev->type && dev->type->pm) {
  1008. info = "late type ";
  1009. callback = pm_late_early_op(dev->type->pm, state);
  1010. } else if (dev->class && dev->class->pm) {
  1011. info = "late class ";
  1012. callback = pm_late_early_op(dev->class->pm, state);
  1013. } else if (dev->bus && dev->bus->pm) {
  1014. info = "late bus ";
  1015. callback = pm_late_early_op(dev->bus->pm, state);
  1016. }
  1017. if (!callback && dev->driver && dev->driver->pm) {
  1018. info = "late driver ";
  1019. callback = pm_late_early_op(dev->driver->pm, state);
  1020. }
  1021. error = dpm_run_callback(callback, dev, state, info);
  1022. if (!error)
  1023. dev->power.is_late_suspended = true;
  1024. else
  1025. async_error = error;
  1026. Complete:
  1027. complete_all(&dev->power.completion);
  1028. return error;
  1029. }
  1030. static void async_suspend_late(void *data, async_cookie_t cookie)
  1031. {
  1032. struct device *dev = (struct device *)data;
  1033. int error;
  1034. error = __device_suspend_late(dev, pm_transition, true);
  1035. if (error) {
  1036. dpm_save_failed_dev(dev_name(dev));
  1037. pm_dev_err(dev, pm_transition, " async", error);
  1038. }
  1039. put_device(dev);
  1040. }
  1041. static int device_suspend_late(struct device *dev)
  1042. {
  1043. reinit_completion(&dev->power.completion);
  1044. if (pm_async_enabled && dev->power.async_suspend) {
  1045. get_device(dev);
  1046. async_schedule(async_suspend_late, dev);
  1047. return 0;
  1048. }
  1049. return __device_suspend_late(dev, pm_transition, false);
  1050. }
  1051. /**
  1052. * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
  1053. * @state: PM transition of the system being carried out.
  1054. */
  1055. static int dpm_suspend_late(pm_message_t state)
  1056. {
  1057. ktime_t starttime = ktime_get();
  1058. int error = 0;
  1059. trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
  1060. mutex_lock(&dpm_list_mtx);
  1061. pm_transition = state;
  1062. async_error = 0;
  1063. while (!list_empty(&dpm_suspended_list)) {
  1064. struct device *dev = to_device(dpm_suspended_list.prev);
  1065. get_device(dev);
  1066. mutex_unlock(&dpm_list_mtx);
  1067. error = device_suspend_late(dev);
  1068. mutex_lock(&dpm_list_mtx);
  1069. if (error) {
  1070. pm_dev_err(dev, state, " late", error);
  1071. dpm_save_failed_dev(dev_name(dev));
  1072. put_device(dev);
  1073. break;
  1074. }
  1075. if (!list_empty(&dev->power.entry))
  1076. list_move(&dev->power.entry, &dpm_late_early_list);
  1077. put_device(dev);
  1078. if (async_error)
  1079. break;
  1080. }
  1081. mutex_unlock(&dpm_list_mtx);
  1082. async_synchronize_full();
  1083. if (error) {
  1084. suspend_stats.failed_suspend_late++;
  1085. dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
  1086. dpm_resume_early(resume_event(state));
  1087. } else {
  1088. dpm_show_time(starttime, state, "late");
  1089. }
  1090. trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
  1091. return error;
  1092. }
  1093. /**
  1094. * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
  1095. * @state: PM transition of the system being carried out.
  1096. */
  1097. int dpm_suspend_end(pm_message_t state)
  1098. {
  1099. int error = dpm_suspend_late(state);
  1100. if (error)
  1101. return error;
  1102. error = dpm_suspend_noirq(state);
  1103. if (error) {
  1104. dpm_resume_early(resume_event(state));
  1105. return error;
  1106. }
  1107. return 0;
  1108. }
  1109. EXPORT_SYMBOL_GPL(dpm_suspend_end);
  1110. /**
  1111. * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
  1112. * @dev: Device to suspend.
  1113. * @state: PM transition of the system being carried out.
  1114. * @cb: Suspend callback to execute.
  1115. */
  1116. static int legacy_suspend(struct device *dev, pm_message_t state,
  1117. int (*cb)(struct device *dev, pm_message_t state),
  1118. char *info)
  1119. {
  1120. int error;
  1121. ktime_t calltime;
  1122. calltime = initcall_debug_start(dev);
  1123. trace_device_pm_callback_start(dev, info, state.event);
  1124. error = cb(dev, state);
  1125. trace_device_pm_callback_end(dev, error);
  1126. suspend_report_result(cb, error);
  1127. initcall_debug_report(dev, calltime, error, state, info);
  1128. return error;
  1129. }
  1130. /**
  1131. * device_suspend - Execute "suspend" callbacks for given device.
  1132. * @dev: Device to handle.
  1133. * @state: PM transition of the system being carried out.
  1134. * @async: If true, the device is being suspended asynchronously.
  1135. */
  1136. static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  1137. {
  1138. pm_callback_t callback = NULL;
  1139. char *info = NULL;
  1140. int error = 0;
  1141. DECLARE_DPM_WATCHDOG_ON_STACK(wd);
  1142. dpm_wait_for_children(dev, async);
  1143. if (async_error)
  1144. goto Complete;
  1145. /*
  1146. * If a device configured to wake up the system from sleep states
  1147. * has been suspended at run time and there's a resume request pending
  1148. * for it, this is equivalent to the device signaling wakeup, so the
  1149. * system suspend operation should be aborted.
  1150. */
  1151. if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
  1152. pm_wakeup_event(dev, 0);
  1153. if (pm_wakeup_pending()) {
  1154. async_error = -EBUSY;
  1155. goto Complete;
  1156. }
  1157. if (dev->power.syscore)
  1158. goto Complete;
  1159. if (dev->power.direct_complete) {
  1160. if (pm_runtime_status_suspended(dev)) {
  1161. pm_runtime_disable(dev);
  1162. if (pm_runtime_suspended_if_enabled(dev))
  1163. goto Complete;
  1164. pm_runtime_enable(dev);
  1165. }
  1166. dev->power.direct_complete = false;
  1167. }
  1168. dpm_watchdog_set(&wd, dev);
  1169. device_lock(dev);
  1170. if (dev->pm_domain) {
  1171. info = "power domain ";
  1172. callback = pm_op(&dev->pm_domain->ops, state);
  1173. goto Run;
  1174. }
  1175. if (dev->type && dev->type->pm) {
  1176. info = "type ";
  1177. callback = pm_op(dev->type->pm, state);
  1178. goto Run;
  1179. }
  1180. if (dev->class) {
  1181. if (dev->class->pm) {
  1182. info = "class ";
  1183. callback = pm_op(dev->class->pm, state);
  1184. goto Run;
  1185. } else if (dev->class->suspend) {
  1186. pm_dev_dbg(dev, state, "legacy class ");
  1187. error = legacy_suspend(dev, state, dev->class->suspend,
  1188. "legacy class ");
  1189. goto End;
  1190. }
  1191. }
  1192. if (dev->bus) {
  1193. if (dev->bus->pm) {
  1194. info = "bus ";
  1195. callback = pm_op(dev->bus->pm, state);
  1196. } else if (dev->bus->suspend) {
  1197. pm_dev_dbg(dev, state, "legacy bus ");
  1198. error = legacy_suspend(dev, state, dev->bus->suspend,
  1199. "legacy bus ");
  1200. goto End;
  1201. }
  1202. }
  1203. Run:
  1204. if (!callback && dev->driver && dev->driver->pm) {
  1205. info = "driver ";
  1206. callback = pm_op(dev->driver->pm, state);
  1207. }
  1208. error = dpm_run_callback(callback, dev, state, info);
  1209. End:
  1210. if (!error) {
  1211. struct device *parent = dev->parent;
  1212. dev->power.is_suspended = true;
  1213. if (parent) {
  1214. spin_lock_irq(&parent->power.lock);
  1215. dev->parent->power.direct_complete = false;
  1216. if (dev->power.wakeup_path
  1217. && !dev->parent->power.ignore_children)
  1218. dev->parent->power.wakeup_path = true;
  1219. spin_unlock_irq(&parent->power.lock);
  1220. }
  1221. }
  1222. device_unlock(dev);
  1223. dpm_watchdog_clear(&wd);
  1224. Complete:
  1225. complete_all(&dev->power.completion);
  1226. if (error)
  1227. async_error = error;
  1228. return error;
  1229. }
  1230. static void async_suspend(void *data, async_cookie_t cookie)
  1231. {
  1232. struct device *dev = (struct device *)data;
  1233. int error;
  1234. error = __device_suspend(dev, pm_transition, true);
  1235. if (error) {
  1236. dpm_save_failed_dev(dev_name(dev));
  1237. pm_dev_err(dev, pm_transition, " async", error);
  1238. }
  1239. put_device(dev);
  1240. }
  1241. static int device_suspend(struct device *dev)
  1242. {
  1243. reinit_completion(&dev->power.completion);
  1244. if (pm_async_enabled && dev->power.async_suspend) {
  1245. get_device(dev);
  1246. async_schedule(async_suspend, dev);
  1247. return 0;
  1248. }
  1249. return __device_suspend(dev, pm_transition, false);
  1250. }
  1251. /**
  1252. * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
  1253. * @state: PM transition of the system being carried out.
  1254. */
  1255. int dpm_suspend(pm_message_t state)
  1256. {
  1257. ktime_t starttime = ktime_get();
  1258. int error = 0;
  1259. trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
  1260. might_sleep();
  1261. cpufreq_suspend();
  1262. mutex_lock(&dpm_list_mtx);
  1263. pm_transition = state;
  1264. async_error = 0;
  1265. while (!list_empty(&dpm_prepared_list)) {
  1266. struct device *dev = to_device(dpm_prepared_list.prev);
  1267. get_device(dev);
  1268. mutex_unlock(&dpm_list_mtx);
  1269. error = device_suspend(dev);
  1270. mutex_lock(&dpm_list_mtx);
  1271. if (error) {
  1272. pm_dev_err(dev, state, "", error);
  1273. dpm_save_failed_dev(dev_name(dev));
  1274. put_device(dev);
  1275. break;
  1276. }
  1277. if (!list_empty(&dev->power.entry))
  1278. list_move(&dev->power.entry, &dpm_suspended_list);
  1279. put_device(dev);
  1280. if (async_error)
  1281. break;
  1282. }
  1283. mutex_unlock(&dpm_list_mtx);
  1284. async_synchronize_full();
  1285. if (!error)
  1286. error = async_error;
  1287. if (error) {
  1288. suspend_stats.failed_suspend++;
  1289. dpm_save_failed_step(SUSPEND_SUSPEND);
  1290. } else
  1291. dpm_show_time(starttime, state, NULL);
  1292. trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
  1293. return error;
  1294. }
  1295. /**
  1296. * device_prepare - Prepare a device for system power transition.
  1297. * @dev: Device to handle.
  1298. * @state: PM transition of the system being carried out.
  1299. *
  1300. * Execute the ->prepare() callback(s) for given device. No new children of the
  1301. * device may be registered after this function has returned.
  1302. */
  1303. static int device_prepare(struct device *dev, pm_message_t state)
  1304. {
  1305. int (*callback)(struct device *) = NULL;
  1306. char *info = NULL;
  1307. int ret = 0;
  1308. if (dev->power.syscore)
  1309. return 0;
  1310. /*
  1311. * If a device's parent goes into runtime suspend at the wrong time,
  1312. * it won't be possible to resume the device. To prevent this we
  1313. * block runtime suspend here, during the prepare phase, and allow
  1314. * it again during the complete phase.
  1315. */
  1316. pm_runtime_get_noresume(dev);
  1317. device_lock(dev);
  1318. dev->power.wakeup_path = device_may_wakeup(dev);
  1319. if (dev->pm_domain) {
  1320. info = "preparing power domain ";
  1321. callback = dev->pm_domain->ops.prepare;
  1322. } else if (dev->type && dev->type->pm) {
  1323. info = "preparing type ";
  1324. callback = dev->type->pm->prepare;
  1325. } else if (dev->class && dev->class->pm) {
  1326. info = "preparing class ";
  1327. callback = dev->class->pm->prepare;
  1328. } else if (dev->bus && dev->bus->pm) {
  1329. info = "preparing bus ";
  1330. callback = dev->bus->pm->prepare;
  1331. }
  1332. if (!callback && dev->driver && dev->driver->pm) {
  1333. info = "preparing driver ";
  1334. callback = dev->driver->pm->prepare;
  1335. }
  1336. if (callback) {
  1337. trace_device_pm_callback_start(dev, info, state.event);
  1338. ret = callback(dev);
  1339. trace_device_pm_callback_end(dev, ret);
  1340. }
  1341. device_unlock(dev);
  1342. if (ret < 0) {
  1343. suspend_report_result(callback, ret);
  1344. pm_runtime_put(dev);
  1345. return ret;
  1346. }
  1347. /*
  1348. * A positive return value from ->prepare() means "this device appears
  1349. * to be runtime-suspended and its state is fine, so if it really is
  1350. * runtime-suspended, you can leave it in that state provided that you
  1351. * will do the same thing with all of its descendants". This only
  1352. * applies to suspend transitions, however.
  1353. */
  1354. spin_lock_irq(&dev->power.lock);
  1355. dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
  1356. spin_unlock_irq(&dev->power.lock);
  1357. return 0;
  1358. }
  1359. /**
  1360. * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
  1361. * @state: PM transition of the system being carried out.
  1362. *
  1363. * Execute the ->prepare() callback(s) for all devices.
  1364. */
  1365. int dpm_prepare(pm_message_t state)
  1366. {
  1367. int error = 0;
  1368. trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
  1369. might_sleep();
  1370. mutex_lock(&dpm_list_mtx);
  1371. while (!list_empty(&dpm_list)) {
  1372. struct device *dev = to_device(dpm_list.next);
  1373. get_device(dev);
  1374. mutex_unlock(&dpm_list_mtx);
  1375. error = device_prepare(dev, state);
  1376. mutex_lock(&dpm_list_mtx);
  1377. if (error) {
  1378. if (error == -EAGAIN) {
  1379. put_device(dev);
  1380. error = 0;
  1381. continue;
  1382. }
  1383. printk(KERN_INFO "PM: Device %s not prepared "
  1384. "for power transition: code %d\n",
  1385. dev_name(dev), error);
  1386. put_device(dev);
  1387. break;
  1388. }
  1389. dev->power.is_prepared = true;
  1390. if (!list_empty(&dev->power.entry))
  1391. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  1392. put_device(dev);
  1393. }
  1394. mutex_unlock(&dpm_list_mtx);
  1395. trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
  1396. return error;
  1397. }
  1398. /**
  1399. * dpm_suspend_start - Prepare devices for PM transition and suspend them.
  1400. * @state: PM transition of the system being carried out.
  1401. *
  1402. * Prepare all non-sysdev devices for system PM transition and execute "suspend"
  1403. * callbacks for them.
  1404. */
  1405. int dpm_suspend_start(pm_message_t state)
  1406. {
  1407. int error;
  1408. error = dpm_prepare(state);
  1409. if (error) {
  1410. suspend_stats.failed_prepare++;
  1411. dpm_save_failed_step(SUSPEND_PREPARE);
  1412. } else
  1413. error = dpm_suspend(state);
  1414. return error;
  1415. }
  1416. EXPORT_SYMBOL_GPL(dpm_suspend_start);
  1417. void __suspend_report_result(const char *function, void *fn, int ret)
  1418. {
  1419. if (ret)
  1420. printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
  1421. }
  1422. EXPORT_SYMBOL_GPL(__suspend_report_result);
  1423. /**
  1424. * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
  1425. * @dev: Device to wait for.
  1426. * @subordinate: Device that needs to wait for @dev.
  1427. */
  1428. int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
  1429. {
  1430. dpm_wait(dev, subordinate->power.async_suspend);
  1431. return async_error;
  1432. }
  1433. EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
  1434. /**
  1435. * dpm_for_each_dev - device iterator.
  1436. * @data: data for the callback.
  1437. * @fn: function to be called for each device.
  1438. *
  1439. * Iterate over devices in dpm_list, and call @fn for each device,
  1440. * passing it @data.
  1441. */
  1442. void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
  1443. {
  1444. struct device *dev;
  1445. if (!fn)
  1446. return;
  1447. device_pm_lock();
  1448. list_for_each_entry(dev, &dpm_list, power.entry)
  1449. fn(dev, data);
  1450. device_pm_unlock();
  1451. }
  1452. EXPORT_SYMBOL_GPL(dpm_for_each_dev);