runtime.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475
  1. /*
  2. * drivers/base/power/runtime.c - Helper functions for device runtime PM
  3. *
  4. * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  5. * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
  6. *
  7. * This file is released under the GPLv2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/export.h>
  11. #include <linux/pm_runtime.h>
  12. #include <trace/events/rpm.h>
  13. #include "power.h"
  14. typedef int (*pm_callback_t)(struct device *);
  15. static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
  16. {
  17. pm_callback_t cb;
  18. const struct dev_pm_ops *ops;
  19. if (dev->pm_domain)
  20. ops = &dev->pm_domain->ops;
  21. else if (dev->type && dev->type->pm)
  22. ops = dev->type->pm;
  23. else if (dev->class && dev->class->pm)
  24. ops = dev->class->pm;
  25. else if (dev->bus && dev->bus->pm)
  26. ops = dev->bus->pm;
  27. else
  28. ops = NULL;
  29. if (ops)
  30. cb = *(pm_callback_t *)((void *)ops + cb_offset);
  31. else
  32. cb = NULL;
  33. if (!cb && dev->driver && dev->driver->pm)
  34. cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
  35. return cb;
  36. }
  37. #define RPM_GET_CALLBACK(dev, callback) \
  38. __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
  39. static int rpm_resume(struct device *dev, int rpmflags);
  40. static int rpm_suspend(struct device *dev, int rpmflags);
  41. /**
  42. * update_pm_runtime_accounting - Update the time accounting of power states
  43. * @dev: Device to update the accounting for
  44. *
  45. * In order to be able to have time accounting of the various power states
  46. * (as used by programs such as PowerTOP to show the effectiveness of runtime
  47. * PM), we need to track the time spent in each state.
  48. * update_pm_runtime_accounting must be called each time before the
  49. * runtime_status field is updated, to account the time in the old state
  50. * correctly.
  51. */
  52. void update_pm_runtime_accounting(struct device *dev)
  53. {
  54. unsigned long now = jiffies;
  55. unsigned long delta;
  56. delta = now - dev->power.accounting_timestamp;
  57. dev->power.accounting_timestamp = now;
  58. if (dev->power.disable_depth > 0)
  59. return;
  60. if (dev->power.runtime_status == RPM_SUSPENDED)
  61. dev->power.suspended_jiffies += delta;
  62. else
  63. dev->power.active_jiffies += delta;
  64. }
  65. static void __update_runtime_status(struct device *dev, enum rpm_status status)
  66. {
  67. update_pm_runtime_accounting(dev);
  68. dev->power.runtime_status = status;
  69. }
  70. /**
  71. * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  72. * @dev: Device to handle.
  73. */
  74. static void pm_runtime_deactivate_timer(struct device *dev)
  75. {
  76. if (dev->power.timer_expires > 0) {
  77. del_timer(&dev->power.suspend_timer);
  78. dev->power.timer_expires = 0;
  79. }
  80. }
  81. /**
  82. * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
  83. * @dev: Device to handle.
  84. */
  85. static void pm_runtime_cancel_pending(struct device *dev)
  86. {
  87. pm_runtime_deactivate_timer(dev);
  88. /*
  89. * In case there's a request pending, make sure its work function will
  90. * return without doing anything.
  91. */
  92. dev->power.request = RPM_REQ_NONE;
  93. }
  94. /*
  95. * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
  96. * @dev: Device to handle.
  97. *
  98. * Compute the autosuspend-delay expiration time based on the device's
  99. * power.last_busy time. If the delay has already expired or is disabled
  100. * (negative) or the power.use_autosuspend flag isn't set, return 0.
  101. * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
  102. *
  103. * This function may be called either with or without dev->power.lock held.
  104. * Either way it can be racy, since power.last_busy may be updated at any time.
  105. */
  106. unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
  107. {
  108. int autosuspend_delay;
  109. long elapsed;
  110. unsigned long last_busy;
  111. unsigned long expires = 0;
  112. if (!dev->power.use_autosuspend)
  113. goto out;
  114. autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
  115. if (autosuspend_delay < 0)
  116. goto out;
  117. last_busy = ACCESS_ONCE(dev->power.last_busy);
  118. elapsed = jiffies - last_busy;
  119. if (elapsed < 0)
  120. goto out; /* jiffies has wrapped around. */
  121. /*
  122. * If the autosuspend_delay is >= 1 second, align the timer by rounding
  123. * up to the nearest second.
  124. */
  125. expires = last_busy + msecs_to_jiffies(autosuspend_delay);
  126. if (autosuspend_delay >= 1000)
  127. expires = round_jiffies(expires);
  128. expires += !expires;
  129. if (elapsed >= expires - last_busy)
  130. expires = 0; /* Already expired. */
  131. out:
  132. return expires;
  133. }
  134. EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
  135. static int dev_memalloc_noio(struct device *dev, void *data)
  136. {
  137. return dev->power.memalloc_noio;
  138. }
  139. /*
  140. * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
  141. * @dev: Device to handle.
  142. * @enable: True for setting the flag and False for clearing the flag.
  143. *
  144. * Set the flag for all devices in the path from the device to the
  145. * root device in the device tree if @enable is true, otherwise clear
  146. * the flag for devices in the path whose siblings don't set the flag.
  147. *
  148. * The function should only be called by block device, or network
  149. * device driver for solving the deadlock problem during runtime
  150. * resume/suspend:
  151. *
  152. * If memory allocation with GFP_KERNEL is called inside runtime
  153. * resume/suspend callback of any one of its ancestors(or the
  154. * block device itself), the deadlock may be triggered inside the
  155. * memory allocation since it might not complete until the block
  156. * device becomes active and the involed page I/O finishes. The
  157. * situation is pointed out first by Alan Stern. Network device
  158. * are involved in iSCSI kind of situation.
  159. *
  160. * The lock of dev_hotplug_mutex is held in the function for handling
  161. * hotplug race because pm_runtime_set_memalloc_noio() may be called
  162. * in async probe().
  163. *
  164. * The function should be called between device_add() and device_del()
  165. * on the affected device(block/network device).
  166. */
  167. void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
  168. {
  169. static DEFINE_MUTEX(dev_hotplug_mutex);
  170. mutex_lock(&dev_hotplug_mutex);
  171. for (;;) {
  172. bool enabled;
  173. /* hold power lock since bitfield is not SMP-safe. */
  174. spin_lock_irq(&dev->power.lock);
  175. enabled = dev->power.memalloc_noio;
  176. dev->power.memalloc_noio = enable;
  177. spin_unlock_irq(&dev->power.lock);
  178. /*
  179. * not need to enable ancestors any more if the device
  180. * has been enabled.
  181. */
  182. if (enabled && enable)
  183. break;
  184. dev = dev->parent;
  185. /*
  186. * clear flag of the parent device only if all the
  187. * children don't set the flag because ancestor's
  188. * flag was set by any one of the descendants.
  189. */
  190. if (!dev || (!enable &&
  191. device_for_each_child(dev, NULL,
  192. dev_memalloc_noio)))
  193. break;
  194. }
  195. mutex_unlock(&dev_hotplug_mutex);
  196. }
  197. EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
  198. /**
  199. * rpm_check_suspend_allowed - Test whether a device may be suspended.
  200. * @dev: Device to test.
  201. */
  202. static int rpm_check_suspend_allowed(struct device *dev)
  203. {
  204. int retval = 0;
  205. if (dev->power.runtime_error)
  206. retval = -EINVAL;
  207. else if (dev->power.disable_depth > 0)
  208. retval = -EACCES;
  209. else if (atomic_read(&dev->power.usage_count) > 0)
  210. retval = -EAGAIN;
  211. else if (!pm_children_suspended(dev))
  212. retval = -EBUSY;
  213. /* Pending resume requests take precedence over suspends. */
  214. else if ((dev->power.deferred_resume
  215. && dev->power.runtime_status == RPM_SUSPENDING)
  216. || (dev->power.request_pending
  217. && dev->power.request == RPM_REQ_RESUME))
  218. retval = -EAGAIN;
  219. else if (__dev_pm_qos_read_value(dev) < 0)
  220. retval = -EPERM;
  221. else if (dev->power.runtime_status == RPM_SUSPENDED)
  222. retval = 1;
  223. return retval;
  224. }
  225. /**
  226. * __rpm_callback - Run a given runtime PM callback for a given device.
  227. * @cb: Runtime PM callback to run.
  228. * @dev: Device to run the callback for.
  229. */
  230. static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
  231. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  232. {
  233. int retval;
  234. if (dev->power.irq_safe)
  235. spin_unlock(&dev->power.lock);
  236. else
  237. spin_unlock_irq(&dev->power.lock);
  238. retval = cb(dev);
  239. if (dev->power.irq_safe)
  240. spin_lock(&dev->power.lock);
  241. else
  242. spin_lock_irq(&dev->power.lock);
  243. return retval;
  244. }
  245. /**
  246. * rpm_idle - Notify device bus type if the device can be suspended.
  247. * @dev: Device to notify the bus type about.
  248. * @rpmflags: Flag bits.
  249. *
  250. * Check if the device's runtime PM status allows it to be suspended. If
  251. * another idle notification has been started earlier, return immediately. If
  252. * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
  253. * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
  254. * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
  255. *
  256. * This function must be called under dev->power.lock with interrupts disabled.
  257. */
  258. static int rpm_idle(struct device *dev, int rpmflags)
  259. {
  260. int (*callback)(struct device *);
  261. int retval;
  262. trace_rpm_idle(dev, rpmflags);
  263. retval = rpm_check_suspend_allowed(dev);
  264. if (retval < 0)
  265. ; /* Conditions are wrong. */
  266. /* Idle notifications are allowed only in the RPM_ACTIVE state. */
  267. else if (dev->power.runtime_status != RPM_ACTIVE)
  268. retval = -EAGAIN;
  269. /*
  270. * Any pending request other than an idle notification takes
  271. * precedence over us, except that the timer may be running.
  272. */
  273. else if (dev->power.request_pending &&
  274. dev->power.request > RPM_REQ_IDLE)
  275. retval = -EAGAIN;
  276. /* Act as though RPM_NOWAIT is always set. */
  277. else if (dev->power.idle_notification)
  278. retval = -EINPROGRESS;
  279. if (retval)
  280. goto out;
  281. /* Pending requests need to be canceled. */
  282. dev->power.request = RPM_REQ_NONE;
  283. if (dev->power.no_callbacks)
  284. goto out;
  285. /* Carry out an asynchronous or a synchronous idle notification. */
  286. if (rpmflags & RPM_ASYNC) {
  287. dev->power.request = RPM_REQ_IDLE;
  288. if (!dev->power.request_pending) {
  289. dev->power.request_pending = true;
  290. queue_work(pm_wq, &dev->power.work);
  291. }
  292. trace_rpm_return_int(dev, _THIS_IP_, 0);
  293. return 0;
  294. }
  295. dev->power.idle_notification = true;
  296. callback = RPM_GET_CALLBACK(dev, runtime_idle);
  297. if (callback)
  298. retval = __rpm_callback(callback, dev);
  299. dev->power.idle_notification = false;
  300. wake_up_all(&dev->power.wait_queue);
  301. out:
  302. trace_rpm_return_int(dev, _THIS_IP_, retval);
  303. return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
  304. }
  305. /**
  306. * rpm_callback - Run a given runtime PM callback for a given device.
  307. * @cb: Runtime PM callback to run.
  308. * @dev: Device to run the callback for.
  309. */
  310. static int rpm_callback(int (*cb)(struct device *), struct device *dev)
  311. {
  312. int retval;
  313. if (!cb)
  314. return -ENOSYS;
  315. if (dev->power.memalloc_noio) {
  316. unsigned int noio_flag;
  317. /*
  318. * Deadlock might be caused if memory allocation with
  319. * GFP_KERNEL happens inside runtime_suspend and
  320. * runtime_resume callbacks of one block device's
  321. * ancestor or the block device itself. Network
  322. * device might be thought as part of iSCSI block
  323. * device, so network device and its ancestor should
  324. * be marked as memalloc_noio too.
  325. */
  326. noio_flag = memalloc_noio_save();
  327. retval = __rpm_callback(cb, dev);
  328. memalloc_noio_restore(noio_flag);
  329. } else {
  330. retval = __rpm_callback(cb, dev);
  331. }
  332. dev->power.runtime_error = retval;
  333. return retval != -EACCES ? retval : -EIO;
  334. }
  335. /**
  336. * rpm_suspend - Carry out runtime suspend of given device.
  337. * @dev: Device to suspend.
  338. * @rpmflags: Flag bits.
  339. *
  340. * Check if the device's runtime PM status allows it to be suspended.
  341. * Cancel a pending idle notification, autosuspend or suspend. If
  342. * another suspend has been started earlier, either return immediately
  343. * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
  344. * flags. If the RPM_ASYNC flag is set then queue a suspend request;
  345. * otherwise run the ->runtime_suspend() callback directly. When
  346. * ->runtime_suspend succeeded, if a deferred resume was requested while
  347. * the callback was running then carry it out, otherwise send an idle
  348. * notification for its parent (if the suspend succeeded and both
  349. * ignore_children of parent->power and irq_safe of dev->power are not set).
  350. * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
  351. * flag is set and the next autosuspend-delay expiration time is in the
  352. * future, schedule another autosuspend attempt.
  353. *
  354. * This function must be called under dev->power.lock with interrupts disabled.
  355. */
  356. static int rpm_suspend(struct device *dev, int rpmflags)
  357. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  358. {
  359. int (*callback)(struct device *);
  360. struct device *parent = NULL;
  361. int retval;
  362. trace_rpm_suspend(dev, rpmflags);
  363. repeat:
  364. retval = rpm_check_suspend_allowed(dev);
  365. if (retval < 0)
  366. ; /* Conditions are wrong. */
  367. /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
  368. else if (dev->power.runtime_status == RPM_RESUMING &&
  369. !(rpmflags & RPM_ASYNC))
  370. retval = -EAGAIN;
  371. if (retval)
  372. goto out;
  373. /* If the autosuspend_delay time hasn't expired yet, reschedule. */
  374. if ((rpmflags & RPM_AUTO)
  375. && dev->power.runtime_status != RPM_SUSPENDING) {
  376. unsigned long expires = pm_runtime_autosuspend_expiration(dev);
  377. if (expires != 0) {
  378. /* Pending requests need to be canceled. */
  379. dev->power.request = RPM_REQ_NONE;
  380. /*
  381. * Optimization: If the timer is already running and is
  382. * set to expire at or before the autosuspend delay,
  383. * avoid the overhead of resetting it. Just let it
  384. * expire; pm_suspend_timer_fn() will take care of the
  385. * rest.
  386. */
  387. if (!(dev->power.timer_expires && time_before_eq(
  388. dev->power.timer_expires, expires))) {
  389. dev->power.timer_expires = expires;
  390. mod_timer(&dev->power.suspend_timer, expires);
  391. }
  392. dev->power.timer_autosuspends = 1;
  393. goto out;
  394. }
  395. }
  396. /* Other scheduled or pending requests need to be canceled. */
  397. pm_runtime_cancel_pending(dev);
  398. if (dev->power.runtime_status == RPM_SUSPENDING) {
  399. DEFINE_WAIT(wait);
  400. if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
  401. retval = -EINPROGRESS;
  402. goto out;
  403. }
  404. if (dev->power.irq_safe) {
  405. spin_unlock(&dev->power.lock);
  406. cpu_relax();
  407. spin_lock(&dev->power.lock);
  408. goto repeat;
  409. }
  410. /* Wait for the other suspend running in parallel with us. */
  411. for (;;) {
  412. prepare_to_wait(&dev->power.wait_queue, &wait,
  413. TASK_UNINTERRUPTIBLE);
  414. if (dev->power.runtime_status != RPM_SUSPENDING)
  415. break;
  416. spin_unlock_irq(&dev->power.lock);
  417. schedule();
  418. spin_lock_irq(&dev->power.lock);
  419. }
  420. finish_wait(&dev->power.wait_queue, &wait);
  421. goto repeat;
  422. }
  423. if (dev->power.no_callbacks)
  424. goto no_callback; /* Assume success. */
  425. /* Carry out an asynchronous or a synchronous suspend. */
  426. if (rpmflags & RPM_ASYNC) {
  427. dev->power.request = (rpmflags & RPM_AUTO) ?
  428. RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
  429. if (!dev->power.request_pending) {
  430. dev->power.request_pending = true;
  431. queue_work(pm_wq, &dev->power.work);
  432. }
  433. goto out;
  434. }
  435. __update_runtime_status(dev, RPM_SUSPENDING);
  436. callback = RPM_GET_CALLBACK(dev, runtime_suspend);
  437. retval = rpm_callback(callback, dev);
  438. if (retval)
  439. goto fail;
  440. no_callback:
  441. __update_runtime_status(dev, RPM_SUSPENDED);
  442. pm_runtime_deactivate_timer(dev);
  443. if (dev->parent) {
  444. parent = dev->parent;
  445. atomic_add_unless(&parent->power.child_count, -1, 0);
  446. }
  447. wake_up_all(&dev->power.wait_queue);
  448. if (dev->power.deferred_resume) {
  449. dev->power.deferred_resume = false;
  450. rpm_resume(dev, 0);
  451. retval = -EAGAIN;
  452. goto out;
  453. }
  454. /* Maybe the parent is now able to suspend. */
  455. if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
  456. spin_unlock(&dev->power.lock);
  457. spin_lock(&parent->power.lock);
  458. rpm_idle(parent, RPM_ASYNC);
  459. spin_unlock(&parent->power.lock);
  460. spin_lock(&dev->power.lock);
  461. }
  462. out:
  463. trace_rpm_return_int(dev, _THIS_IP_, retval);
  464. return retval;
  465. fail:
  466. __update_runtime_status(dev, RPM_ACTIVE);
  467. dev->power.deferred_resume = false;
  468. wake_up_all(&dev->power.wait_queue);
  469. if (retval == -EAGAIN || retval == -EBUSY) {
  470. dev->power.runtime_error = 0;
  471. /*
  472. * If the callback routine failed an autosuspend, and
  473. * if the last_busy time has been updated so that there
  474. * is a new autosuspend expiration time, automatically
  475. * reschedule another autosuspend.
  476. */
  477. if ((rpmflags & RPM_AUTO) &&
  478. pm_runtime_autosuspend_expiration(dev) != 0)
  479. goto repeat;
  480. } else {
  481. pm_runtime_cancel_pending(dev);
  482. }
  483. goto out;
  484. }
  485. /**
  486. * rpm_resume - Carry out runtime resume of given device.
  487. * @dev: Device to resume.
  488. * @rpmflags: Flag bits.
  489. *
  490. * Check if the device's runtime PM status allows it to be resumed. Cancel
  491. * any scheduled or pending requests. If another resume has been started
  492. * earlier, either return immediately or wait for it to finish, depending on the
  493. * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
  494. * parallel with this function, either tell the other process to resume after
  495. * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
  496. * flag is set then queue a resume request; otherwise run the
  497. * ->runtime_resume() callback directly. Queue an idle notification for the
  498. * device if the resume succeeded.
  499. *
  500. * This function must be called under dev->power.lock with interrupts disabled.
  501. */
  502. static int rpm_resume(struct device *dev, int rpmflags)
  503. __releases(&dev->power.lock) __acquires(&dev->power.lock)
  504. {
  505. int (*callback)(struct device *);
  506. struct device *parent = NULL;
  507. int retval = 0;
  508. trace_rpm_resume(dev, rpmflags);
  509. repeat:
  510. if (dev->power.runtime_error)
  511. retval = -EINVAL;
  512. else if (dev->power.disable_depth == 1 && dev->power.is_suspended
  513. && dev->power.runtime_status == RPM_ACTIVE)
  514. retval = 1;
  515. else if (dev->power.disable_depth > 0)
  516. retval = -EACCES;
  517. if (retval)
  518. goto out;
  519. /*
  520. * Other scheduled or pending requests need to be canceled. Small
  521. * optimization: If an autosuspend timer is running, leave it running
  522. * rather than cancelling it now only to restart it again in the near
  523. * future.
  524. */
  525. dev->power.request = RPM_REQ_NONE;
  526. if (!dev->power.timer_autosuspends)
  527. pm_runtime_deactivate_timer(dev);
  528. if (dev->power.runtime_status == RPM_ACTIVE) {
  529. retval = 1;
  530. goto out;
  531. }
  532. if (dev->power.runtime_status == RPM_RESUMING
  533. || dev->power.runtime_status == RPM_SUSPENDING) {
  534. DEFINE_WAIT(wait);
  535. if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
  536. if (dev->power.runtime_status == RPM_SUSPENDING)
  537. dev->power.deferred_resume = true;
  538. else
  539. retval = -EINPROGRESS;
  540. goto out;
  541. }
  542. if (dev->power.irq_safe) {
  543. spin_unlock(&dev->power.lock);
  544. cpu_relax();
  545. spin_lock(&dev->power.lock);
  546. goto repeat;
  547. }
  548. /* Wait for the operation carried out in parallel with us. */
  549. for (;;) {
  550. prepare_to_wait(&dev->power.wait_queue, &wait,
  551. TASK_UNINTERRUPTIBLE);
  552. if (dev->power.runtime_status != RPM_RESUMING
  553. && dev->power.runtime_status != RPM_SUSPENDING)
  554. break;
  555. spin_unlock_irq(&dev->power.lock);
  556. schedule();
  557. spin_lock_irq(&dev->power.lock);
  558. }
  559. finish_wait(&dev->power.wait_queue, &wait);
  560. goto repeat;
  561. }
  562. /*
  563. * See if we can skip waking up the parent. This is safe only if
  564. * power.no_callbacks is set, because otherwise we don't know whether
  565. * the resume will actually succeed.
  566. */
  567. if (dev->power.no_callbacks && !parent && dev->parent) {
  568. spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
  569. if (dev->parent->power.disable_depth > 0
  570. || dev->parent->power.ignore_children
  571. || dev->parent->power.runtime_status == RPM_ACTIVE) {
  572. atomic_inc(&dev->parent->power.child_count);
  573. spin_unlock(&dev->parent->power.lock);
  574. retval = 1;
  575. goto no_callback; /* Assume success. */
  576. }
  577. spin_unlock(&dev->parent->power.lock);
  578. }
  579. /* Carry out an asynchronous or a synchronous resume. */
  580. if (rpmflags & RPM_ASYNC) {
  581. dev->power.request = RPM_REQ_RESUME;
  582. if (!dev->power.request_pending) {
  583. dev->power.request_pending = true;
  584. queue_work(pm_wq, &dev->power.work);
  585. }
  586. retval = 0;
  587. goto out;
  588. }
  589. if (!parent && dev->parent) {
  590. /*
  591. * Increment the parent's usage counter and resume it if
  592. * necessary. Not needed if dev is irq-safe; then the
  593. * parent is permanently resumed.
  594. */
  595. parent = dev->parent;
  596. if (dev->power.irq_safe)
  597. goto skip_parent;
  598. spin_unlock(&dev->power.lock);
  599. pm_runtime_get_noresume(parent);
  600. spin_lock(&parent->power.lock);
  601. /*
  602. * We can resume if the parent's runtime PM is disabled or it
  603. * is set to ignore children.
  604. */
  605. if (!parent->power.disable_depth
  606. && !parent->power.ignore_children) {
  607. rpm_resume(parent, 0);
  608. if (parent->power.runtime_status != RPM_ACTIVE)
  609. retval = -EBUSY;
  610. }
  611. spin_unlock(&parent->power.lock);
  612. spin_lock(&dev->power.lock);
  613. if (retval)
  614. goto out;
  615. goto repeat;
  616. }
  617. skip_parent:
  618. if (dev->power.no_callbacks)
  619. goto no_callback; /* Assume success. */
  620. __update_runtime_status(dev, RPM_RESUMING);
  621. callback = RPM_GET_CALLBACK(dev, runtime_resume);
  622. retval = rpm_callback(callback, dev);
  623. if (retval) {
  624. __update_runtime_status(dev, RPM_SUSPENDED);
  625. pm_runtime_cancel_pending(dev);
  626. } else {
  627. no_callback:
  628. __update_runtime_status(dev, RPM_ACTIVE);
  629. if (parent)
  630. atomic_inc(&parent->power.child_count);
  631. }
  632. wake_up_all(&dev->power.wait_queue);
  633. if (retval >= 0)
  634. rpm_idle(dev, RPM_ASYNC);
  635. out:
  636. if (parent && !dev->power.irq_safe) {
  637. spin_unlock_irq(&dev->power.lock);
  638. pm_runtime_put(parent);
  639. spin_lock_irq(&dev->power.lock);
  640. }
  641. trace_rpm_return_int(dev, _THIS_IP_, retval);
  642. return retval;
  643. }
  644. /**
  645. * pm_runtime_work - Universal runtime PM work function.
  646. * @work: Work structure used for scheduling the execution of this function.
  647. *
  648. * Use @work to get the device object the work is to be done for, determine what
  649. * is to be done and execute the appropriate runtime PM function.
  650. */
  651. static void pm_runtime_work(struct work_struct *work)
  652. {
  653. struct device *dev = container_of(work, struct device, power.work);
  654. enum rpm_request req;
  655. spin_lock_irq(&dev->power.lock);
  656. if (!dev->power.request_pending)
  657. goto out;
  658. req = dev->power.request;
  659. dev->power.request = RPM_REQ_NONE;
  660. dev->power.request_pending = false;
  661. switch (req) {
  662. case RPM_REQ_NONE:
  663. break;
  664. case RPM_REQ_IDLE:
  665. rpm_idle(dev, RPM_NOWAIT);
  666. break;
  667. case RPM_REQ_SUSPEND:
  668. rpm_suspend(dev, RPM_NOWAIT);
  669. break;
  670. case RPM_REQ_AUTOSUSPEND:
  671. rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
  672. break;
  673. case RPM_REQ_RESUME:
  674. rpm_resume(dev, RPM_NOWAIT);
  675. break;
  676. }
  677. out:
  678. spin_unlock_irq(&dev->power.lock);
  679. }
  680. /**
  681. * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
  682. * @data: Device pointer passed by pm_schedule_suspend().
  683. *
  684. * Check if the time is right and queue a suspend request.
  685. */
  686. static void pm_suspend_timer_fn(unsigned long data)
  687. {
  688. struct device *dev = (struct device *)data;
  689. unsigned long flags;
  690. unsigned long expires;
  691. spin_lock_irqsave(&dev->power.lock, flags);
  692. expires = dev->power.timer_expires;
  693. /* If 'expire' is after 'jiffies' we've been called too early. */
  694. if (expires > 0 && !time_after(expires, jiffies)) {
  695. dev->power.timer_expires = 0;
  696. rpm_suspend(dev, dev->power.timer_autosuspends ?
  697. (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
  698. }
  699. spin_unlock_irqrestore(&dev->power.lock, flags);
  700. }
  701. /**
  702. * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
  703. * @dev: Device to suspend.
  704. * @delay: Time to wait before submitting a suspend request, in milliseconds.
  705. */
  706. int pm_schedule_suspend(struct device *dev, unsigned int delay)
  707. {
  708. unsigned long flags;
  709. int retval;
  710. spin_lock_irqsave(&dev->power.lock, flags);
  711. if (!delay) {
  712. retval = rpm_suspend(dev, RPM_ASYNC);
  713. goto out;
  714. }
  715. retval = rpm_check_suspend_allowed(dev);
  716. if (retval)
  717. goto out;
  718. /* Other scheduled or pending requests need to be canceled. */
  719. pm_runtime_cancel_pending(dev);
  720. dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
  721. dev->power.timer_expires += !dev->power.timer_expires;
  722. dev->power.timer_autosuspends = 0;
  723. mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
  724. out:
  725. spin_unlock_irqrestore(&dev->power.lock, flags);
  726. return retval;
  727. }
  728. EXPORT_SYMBOL_GPL(pm_schedule_suspend);
  729. /**
  730. * __pm_runtime_idle - Entry point for runtime idle operations.
  731. * @dev: Device to send idle notification for.
  732. * @rpmflags: Flag bits.
  733. *
  734. * If the RPM_GET_PUT flag is set, decrement the device's usage count and
  735. * return immediately if it is larger than zero. Then carry out an idle
  736. * notification, either synchronous or asynchronous.
  737. *
  738. * This routine may be called in atomic context if the RPM_ASYNC flag is set,
  739. * or if pm_runtime_irq_safe() has been called.
  740. */
  741. int __pm_runtime_idle(struct device *dev, int rpmflags)
  742. {
  743. unsigned long flags;
  744. int retval;
  745. might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
  746. if (rpmflags & RPM_GET_PUT) {
  747. if (!atomic_dec_and_test(&dev->power.usage_count))
  748. return 0;
  749. }
  750. spin_lock_irqsave(&dev->power.lock, flags);
  751. retval = rpm_idle(dev, rpmflags);
  752. spin_unlock_irqrestore(&dev->power.lock, flags);
  753. return retval;
  754. }
  755. EXPORT_SYMBOL_GPL(__pm_runtime_idle);
  756. /**
  757. * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
  758. * @dev: Device to suspend.
  759. * @rpmflags: Flag bits.
  760. *
  761. * If the RPM_GET_PUT flag is set, decrement the device's usage count and
  762. * return immediately if it is larger than zero. Then carry out a suspend,
  763. * either synchronous or asynchronous.
  764. *
  765. * This routine may be called in atomic context if the RPM_ASYNC flag is set,
  766. * or if pm_runtime_irq_safe() has been called.
  767. */
  768. int __pm_runtime_suspend(struct device *dev, int rpmflags)
  769. {
  770. unsigned long flags;
  771. int retval;
  772. might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
  773. if (rpmflags & RPM_GET_PUT) {
  774. if (!atomic_dec_and_test(&dev->power.usage_count))
  775. return 0;
  776. }
  777. spin_lock_irqsave(&dev->power.lock, flags);
  778. retval = rpm_suspend(dev, rpmflags);
  779. spin_unlock_irqrestore(&dev->power.lock, flags);
  780. return retval;
  781. }
  782. EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
  783. /**
  784. * __pm_runtime_resume - Entry point for runtime resume operations.
  785. * @dev: Device to resume.
  786. * @rpmflags: Flag bits.
  787. *
  788. * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
  789. * carry out a resume, either synchronous or asynchronous.
  790. *
  791. * This routine may be called in atomic context if the RPM_ASYNC flag is set,
  792. * or if pm_runtime_irq_safe() has been called.
  793. */
  794. int __pm_runtime_resume(struct device *dev, int rpmflags)
  795. {
  796. unsigned long flags;
  797. int retval;
  798. might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
  799. if (rpmflags & RPM_GET_PUT)
  800. atomic_inc(&dev->power.usage_count);
  801. spin_lock_irqsave(&dev->power.lock, flags);
  802. retval = rpm_resume(dev, rpmflags);
  803. spin_unlock_irqrestore(&dev->power.lock, flags);
  804. return retval;
  805. }
  806. EXPORT_SYMBOL_GPL(__pm_runtime_resume);
  807. /**
  808. * __pm_runtime_set_status - Set runtime PM status of a device.
  809. * @dev: Device to handle.
  810. * @status: New runtime PM status of the device.
  811. *
  812. * If runtime PM of the device is disabled or its power.runtime_error field is
  813. * different from zero, the status may be changed either to RPM_ACTIVE, or to
  814. * RPM_SUSPENDED, as long as that reflects the actual state of the device.
  815. * However, if the device has a parent and the parent is not active, and the
  816. * parent's power.ignore_children flag is unset, the device's status cannot be
  817. * set to RPM_ACTIVE, so -EBUSY is returned in that case.
  818. *
  819. * If successful, __pm_runtime_set_status() clears the power.runtime_error field
  820. * and the device parent's counter of unsuspended children is modified to
  821. * reflect the new status. If the new status is RPM_SUSPENDED, an idle
  822. * notification request for the parent is submitted.
  823. */
  824. int __pm_runtime_set_status(struct device *dev, unsigned int status)
  825. {
  826. struct device *parent = dev->parent;
  827. unsigned long flags;
  828. bool notify_parent = false;
  829. int error = 0;
  830. if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
  831. return -EINVAL;
  832. spin_lock_irqsave(&dev->power.lock, flags);
  833. if (!dev->power.runtime_error && !dev->power.disable_depth) {
  834. error = -EAGAIN;
  835. goto out;
  836. }
  837. if (dev->power.runtime_status == status)
  838. goto out_set;
  839. if (status == RPM_SUSPENDED) {
  840. /* It always is possible to set the status to 'suspended'. */
  841. if (parent) {
  842. atomic_add_unless(&parent->power.child_count, -1, 0);
  843. notify_parent = !parent->power.ignore_children;
  844. }
  845. goto out_set;
  846. }
  847. if (parent) {
  848. spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
  849. /*
  850. * It is invalid to put an active child under a parent that is
  851. * not active, has runtime PM enabled and the
  852. * 'power.ignore_children' flag unset.
  853. */
  854. if (!parent->power.disable_depth
  855. && !parent->power.ignore_children
  856. && parent->power.runtime_status != RPM_ACTIVE)
  857. error = -EBUSY;
  858. else if (dev->power.runtime_status == RPM_SUSPENDED)
  859. atomic_inc(&parent->power.child_count);
  860. spin_unlock(&parent->power.lock);
  861. if (error)
  862. goto out;
  863. }
  864. out_set:
  865. __update_runtime_status(dev, status);
  866. dev->power.runtime_error = 0;
  867. out:
  868. spin_unlock_irqrestore(&dev->power.lock, flags);
  869. if (notify_parent)
  870. pm_request_idle(parent);
  871. return error;
  872. }
  873. EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
  874. /**
  875. * __pm_runtime_barrier - Cancel pending requests and wait for completions.
  876. * @dev: Device to handle.
  877. *
  878. * Flush all pending requests for the device from pm_wq and wait for all
  879. * runtime PM operations involving the device in progress to complete.
  880. *
  881. * Should be called under dev->power.lock with interrupts disabled.
  882. */
  883. static void __pm_runtime_barrier(struct device *dev)
  884. {
  885. pm_runtime_deactivate_timer(dev);
  886. if (dev->power.request_pending) {
  887. dev->power.request = RPM_REQ_NONE;
  888. spin_unlock_irq(&dev->power.lock);
  889. cancel_work_sync(&dev->power.work);
  890. spin_lock_irq(&dev->power.lock);
  891. dev->power.request_pending = false;
  892. }
  893. if (dev->power.runtime_status == RPM_SUSPENDING
  894. || dev->power.runtime_status == RPM_RESUMING
  895. || dev->power.idle_notification) {
  896. DEFINE_WAIT(wait);
  897. /* Suspend, wake-up or idle notification in progress. */
  898. for (;;) {
  899. prepare_to_wait(&dev->power.wait_queue, &wait,
  900. TASK_UNINTERRUPTIBLE);
  901. if (dev->power.runtime_status != RPM_SUSPENDING
  902. && dev->power.runtime_status != RPM_RESUMING
  903. && !dev->power.idle_notification)
  904. break;
  905. spin_unlock_irq(&dev->power.lock);
  906. schedule();
  907. spin_lock_irq(&dev->power.lock);
  908. }
  909. finish_wait(&dev->power.wait_queue, &wait);
  910. }
  911. }
  912. /**
  913. * pm_runtime_barrier - Flush pending requests and wait for completions.
  914. * @dev: Device to handle.
  915. *
  916. * Prevent the device from being suspended by incrementing its usage counter and
  917. * if there's a pending resume request for the device, wake the device up.
  918. * Next, make sure that all pending requests for the device have been flushed
  919. * from pm_wq and wait for all runtime PM operations involving the device in
  920. * progress to complete.
  921. *
  922. * Return value:
  923. * 1, if there was a resume request pending and the device had to be woken up,
  924. * 0, otherwise
  925. */
  926. int pm_runtime_barrier(struct device *dev)
  927. {
  928. int retval = 0;
  929. pm_runtime_get_noresume(dev);
  930. spin_lock_irq(&dev->power.lock);
  931. if (dev->power.request_pending
  932. && dev->power.request == RPM_REQ_RESUME) {
  933. rpm_resume(dev, 0);
  934. retval = 1;
  935. }
  936. __pm_runtime_barrier(dev);
  937. spin_unlock_irq(&dev->power.lock);
  938. pm_runtime_put_noidle(dev);
  939. return retval;
  940. }
  941. EXPORT_SYMBOL_GPL(pm_runtime_barrier);
  942. /**
  943. * __pm_runtime_disable - Disable runtime PM of a device.
  944. * @dev: Device to handle.
  945. * @check_resume: If set, check if there's a resume request for the device.
  946. *
  947. * Increment power.disable_depth for the device and if it was zero previously,
  948. * cancel all pending runtime PM requests for the device and wait for all
  949. * operations in progress to complete. The device can be either active or
  950. * suspended after its runtime PM has been disabled.
  951. *
  952. * If @check_resume is set and there's a resume request pending when
  953. * __pm_runtime_disable() is called and power.disable_depth is zero, the
  954. * function will wake up the device before disabling its runtime PM.
  955. */
  956. void __pm_runtime_disable(struct device *dev, bool check_resume)
  957. {
  958. spin_lock_irq(&dev->power.lock);
  959. if (dev->power.disable_depth > 0) {
  960. dev->power.disable_depth++;
  961. goto out;
  962. }
  963. /*
  964. * Wake up the device if there's a resume request pending, because that
  965. * means there probably is some I/O to process and disabling runtime PM
  966. * shouldn't prevent the device from processing the I/O.
  967. */
  968. if (check_resume && dev->power.request_pending
  969. && dev->power.request == RPM_REQ_RESUME) {
  970. /*
  971. * Prevent suspends and idle notifications from being carried
  972. * out after we have woken up the device.
  973. */
  974. pm_runtime_get_noresume(dev);
  975. rpm_resume(dev, 0);
  976. pm_runtime_put_noidle(dev);
  977. }
  978. if (!dev->power.disable_depth++)
  979. __pm_runtime_barrier(dev);
  980. out:
  981. spin_unlock_irq(&dev->power.lock);
  982. }
  983. EXPORT_SYMBOL_GPL(__pm_runtime_disable);
  984. /**
  985. * pm_runtime_enable - Enable runtime PM of a device.
  986. * @dev: Device to handle.
  987. */
  988. void pm_runtime_enable(struct device *dev)
  989. {
  990. unsigned long flags;
  991. spin_lock_irqsave(&dev->power.lock, flags);
  992. if (dev->power.disable_depth > 0)
  993. dev->power.disable_depth--;
  994. else
  995. dev_warn(dev, "Unbalanced %s!\n", __func__);
  996. spin_unlock_irqrestore(&dev->power.lock, flags);
  997. }
  998. EXPORT_SYMBOL_GPL(pm_runtime_enable);
  999. /**
  1000. * pm_runtime_forbid - Block runtime PM of a device.
  1001. * @dev: Device to handle.
  1002. *
  1003. * Increase the device's usage count and clear its power.runtime_auto flag,
  1004. * so that it cannot be suspended at run time until pm_runtime_allow() is called
  1005. * for it.
  1006. */
  1007. void pm_runtime_forbid(struct device *dev)
  1008. {
  1009. spin_lock_irq(&dev->power.lock);
  1010. if (!dev->power.runtime_auto)
  1011. goto out;
  1012. dev->power.runtime_auto = false;
  1013. atomic_inc(&dev->power.usage_count);
  1014. rpm_resume(dev, 0);
  1015. out:
  1016. spin_unlock_irq(&dev->power.lock);
  1017. }
  1018. EXPORT_SYMBOL_GPL(pm_runtime_forbid);
  1019. /**
  1020. * pm_runtime_allow - Unblock runtime PM of a device.
  1021. * @dev: Device to handle.
  1022. *
  1023. * Decrease the device's usage count and set its power.runtime_auto flag.
  1024. */
  1025. void pm_runtime_allow(struct device *dev)
  1026. {
  1027. spin_lock_irq(&dev->power.lock);
  1028. if (dev->power.runtime_auto)
  1029. goto out;
  1030. dev->power.runtime_auto = true;
  1031. if (atomic_dec_and_test(&dev->power.usage_count))
  1032. rpm_idle(dev, RPM_AUTO);
  1033. out:
  1034. spin_unlock_irq(&dev->power.lock);
  1035. }
  1036. EXPORT_SYMBOL_GPL(pm_runtime_allow);
  1037. /**
  1038. * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
  1039. * @dev: Device to handle.
  1040. *
  1041. * Set the power.no_callbacks flag, which tells the PM core that this
  1042. * device is power-managed through its parent and has no runtime PM
  1043. * callbacks of its own. The runtime sysfs attributes will be removed.
  1044. */
  1045. void pm_runtime_no_callbacks(struct device *dev)
  1046. {
  1047. spin_lock_irq(&dev->power.lock);
  1048. dev->power.no_callbacks = 1;
  1049. spin_unlock_irq(&dev->power.lock);
  1050. if (device_is_registered(dev))
  1051. rpm_sysfs_remove(dev);
  1052. }
  1053. EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
  1054. /**
  1055. * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
  1056. * @dev: Device to handle
  1057. *
  1058. * Set the power.irq_safe flag, which tells the PM core that the
  1059. * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
  1060. * always be invoked with the spinlock held and interrupts disabled. It also
  1061. * causes the parent's usage counter to be permanently incremented, preventing
  1062. * the parent from runtime suspending -- otherwise an irq-safe child might have
  1063. * to wait for a non-irq-safe parent.
  1064. */
  1065. void pm_runtime_irq_safe(struct device *dev)
  1066. {
  1067. if (dev->parent)
  1068. pm_runtime_get_sync(dev->parent);
  1069. spin_lock_irq(&dev->power.lock);
  1070. dev->power.irq_safe = 1;
  1071. spin_unlock_irq(&dev->power.lock);
  1072. }
  1073. EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
  1074. /**
  1075. * update_autosuspend - Handle a change to a device's autosuspend settings.
  1076. * @dev: Device to handle.
  1077. * @old_delay: The former autosuspend_delay value.
  1078. * @old_use: The former use_autosuspend value.
  1079. *
  1080. * Prevent runtime suspend if the new delay is negative and use_autosuspend is
  1081. * set; otherwise allow it. Send an idle notification if suspends are allowed.
  1082. *
  1083. * This function must be called under dev->power.lock with interrupts disabled.
  1084. */
  1085. static void update_autosuspend(struct device *dev, int old_delay, int old_use)
  1086. {
  1087. int delay = dev->power.autosuspend_delay;
  1088. /* Should runtime suspend be prevented now? */
  1089. if (dev->power.use_autosuspend && delay < 0) {
  1090. /* If it used to be allowed then prevent it. */
  1091. if (!old_use || old_delay >= 0) {
  1092. atomic_inc(&dev->power.usage_count);
  1093. rpm_resume(dev, 0);
  1094. }
  1095. }
  1096. /* Runtime suspend should be allowed now. */
  1097. else {
  1098. /* If it used to be prevented then allow it. */
  1099. if (old_use && old_delay < 0)
  1100. atomic_dec(&dev->power.usage_count);
  1101. /* Maybe we can autosuspend now. */
  1102. rpm_idle(dev, RPM_AUTO);
  1103. }
  1104. }
  1105. /**
  1106. * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
  1107. * @dev: Device to handle.
  1108. * @delay: Value of the new delay in milliseconds.
  1109. *
  1110. * Set the device's power.autosuspend_delay value. If it changes to negative
  1111. * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
  1112. * changes the other way, allow runtime suspends.
  1113. */
  1114. void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
  1115. {
  1116. int old_delay, old_use;
  1117. spin_lock_irq(&dev->power.lock);
  1118. old_delay = dev->power.autosuspend_delay;
  1119. old_use = dev->power.use_autosuspend;
  1120. dev->power.autosuspend_delay = delay;
  1121. update_autosuspend(dev, old_delay, old_use);
  1122. spin_unlock_irq(&dev->power.lock);
  1123. }
  1124. EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
  1125. /**
  1126. * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
  1127. * @dev: Device to handle.
  1128. * @use: New value for use_autosuspend.
  1129. *
  1130. * Set the device's power.use_autosuspend flag, and allow or prevent runtime
  1131. * suspends as needed.
  1132. */
  1133. void __pm_runtime_use_autosuspend(struct device *dev, bool use)
  1134. {
  1135. int old_delay, old_use;
  1136. spin_lock_irq(&dev->power.lock);
  1137. old_delay = dev->power.autosuspend_delay;
  1138. old_use = dev->power.use_autosuspend;
  1139. dev->power.use_autosuspend = use;
  1140. update_autosuspend(dev, old_delay, old_use);
  1141. spin_unlock_irq(&dev->power.lock);
  1142. }
  1143. EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
  1144. /**
  1145. * pm_runtime_init - Initialize runtime PM fields in given device object.
  1146. * @dev: Device object to initialize.
  1147. */
  1148. void pm_runtime_init(struct device *dev)
  1149. {
  1150. dev->power.runtime_status = RPM_SUSPENDED;
  1151. dev->power.idle_notification = false;
  1152. dev->power.disable_depth = 1;
  1153. atomic_set(&dev->power.usage_count, 0);
  1154. dev->power.runtime_error = 0;
  1155. atomic_set(&dev->power.child_count, 0);
  1156. pm_suspend_ignore_children(dev, false);
  1157. dev->power.runtime_auto = true;
  1158. dev->power.request_pending = false;
  1159. dev->power.request = RPM_REQ_NONE;
  1160. dev->power.deferred_resume = false;
  1161. dev->power.accounting_timestamp = jiffies;
  1162. INIT_WORK(&dev->power.work, pm_runtime_work);
  1163. dev->power.timer_expires = 0;
  1164. setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
  1165. (unsigned long)dev);
  1166. init_waitqueue_head(&dev->power.wait_queue);
  1167. }
  1168. /**
  1169. * pm_runtime_remove - Prepare for removing a device from device hierarchy.
  1170. * @dev: Device object being removed from device hierarchy.
  1171. */
  1172. void pm_runtime_remove(struct device *dev)
  1173. {
  1174. __pm_runtime_disable(dev, false);
  1175. /* Change the status back to 'suspended' to match the initial status. */
  1176. if (dev->power.runtime_status == RPM_ACTIVE)
  1177. pm_runtime_set_suspended(dev);
  1178. if (dev->power.irq_safe && dev->parent)
  1179. pm_runtime_put(dev->parent);
  1180. }
  1181. /**
  1182. * pm_runtime_force_suspend - Force a device into suspend state if needed.
  1183. * @dev: Device to suspend.
  1184. *
  1185. * Disable runtime PM so we safely can check the device's runtime PM status and
  1186. * if it is active, invoke it's .runtime_suspend callback to bring it into
  1187. * suspend state. Keep runtime PM disabled to preserve the state unless we
  1188. * encounter errors.
  1189. *
  1190. * Typically this function may be invoked from a system suspend callback to make
  1191. * sure the device is put into low power state.
  1192. */
  1193. int pm_runtime_force_suspend(struct device *dev)
  1194. {
  1195. int (*callback)(struct device *);
  1196. int ret = 0;
  1197. pm_runtime_disable(dev);
  1198. if (pm_runtime_status_suspended(dev))
  1199. return 0;
  1200. callback = RPM_GET_CALLBACK(dev, runtime_suspend);
  1201. if (!callback) {
  1202. ret = -ENOSYS;
  1203. goto err;
  1204. }
  1205. ret = callback(dev);
  1206. if (ret)
  1207. goto err;
  1208. pm_runtime_set_suspended(dev);
  1209. return 0;
  1210. err:
  1211. pm_runtime_enable(dev);
  1212. return ret;
  1213. }
  1214. EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
  1215. /**
  1216. * pm_runtime_force_resume - Force a device into resume state.
  1217. * @dev: Device to resume.
  1218. *
  1219. * Prior invoking this function we expect the user to have brought the device
  1220. * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
  1221. * those actions and brings the device into full power. We update the runtime PM
  1222. * status and re-enables runtime PM.
  1223. *
  1224. * Typically this function may be invoked from a system resume callback to make
  1225. * sure the device is put into full power state.
  1226. */
  1227. int pm_runtime_force_resume(struct device *dev)
  1228. {
  1229. int (*callback)(struct device *);
  1230. int ret = 0;
  1231. callback = RPM_GET_CALLBACK(dev, runtime_resume);
  1232. if (!callback) {
  1233. ret = -ENOSYS;
  1234. goto out;
  1235. }
  1236. ret = callback(dev);
  1237. if (ret)
  1238. goto out;
  1239. pm_runtime_set_active(dev);
  1240. pm_runtime_mark_last_busy(dev);
  1241. out:
  1242. pm_runtime_enable(dev);
  1243. return ret;
  1244. }
  1245. EXPORT_SYMBOL_GPL(pm_runtime_force_resume);