main.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * drivers/base/power/main.c - Where the driver meets power management.
  4. *
  5. * Copyright (c) 2003 Patrick Mochel
  6. * Copyright (c) 2003 Open Source Development Lab
  7. *
  8. * The driver model core calls device_pm_add() when a device is registered.
  9. * This will initialize the embedded device_pm_info object in the device
  10. * and add it to the list of power-controlled devices. sysfs entries for
  11. * controlling device power management will also be added.
  12. *
  13. * A separate list is used for keeping track of power info, because the power
  14. * domain dependencies may differ from the ancestral dependencies that the
  15. * subsystem list maintains.
  16. */
  17. #define pr_fmt(fmt) "PM: " fmt
  18. #define dev_fmt pr_fmt
  19. #include <linux/device.h>
  20. #include <linux/export.h>
  21. #include <linux/mutex.h>
  22. #include <linux/pm.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/pm-trace.h>
  25. #include <linux/pm_wakeirq.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/sched.h>
  28. #include <linux/sched/debug.h>
  29. #include <linux/async.h>
  30. #include <linux/suspend.h>
  31. #include <trace/events/power.h>
  32. #include <linux/cpufreq.h>
  33. #include <linux/devfreq.h>
  34. #include <linux/timer.h>
  35. #include <linux/wakeup_reason.h>
  36. #include "../base.h"
  37. #include "power.h"
  38. typedef int (*pm_callback_t)(struct device *);
  39. #define list_for_each_entry_rcu_locked(pos, head, member) \
  40. list_for_each_entry_rcu(pos, head, member, \
  41. device_links_read_lock_held())
  42. /*
  43. * The entries in the dpm_list list are in a depth first order, simply
  44. * because children are guaranteed to be discovered after parents, and
  45. * are inserted at the back of the list on discovery.
  46. *
  47. * Since device_pm_add() may be called with a device lock held,
  48. * we must never try to acquire a device lock while holding
  49. * dpm_list_mutex.
  50. */
  51. LIST_HEAD(dpm_list);
  52. static LIST_HEAD(dpm_prepared_list);
  53. static LIST_HEAD(dpm_suspended_list);
  54. static LIST_HEAD(dpm_late_early_list);
  55. static LIST_HEAD(dpm_noirq_list);
  56. struct suspend_stats suspend_stats;
  57. static DEFINE_MUTEX(dpm_list_mtx);
  58. static pm_message_t pm_transition;
  59. static int async_error;
  60. static const char *pm_verb(int event)
  61. {
  62. switch (event) {
  63. case PM_EVENT_SUSPEND:
  64. return "suspend";
  65. case PM_EVENT_RESUME:
  66. return "resume";
  67. case PM_EVENT_FREEZE:
  68. return "freeze";
  69. case PM_EVENT_QUIESCE:
  70. return "quiesce";
  71. case PM_EVENT_HIBERNATE:
  72. return "hibernate";
  73. case PM_EVENT_THAW:
  74. return "thaw";
  75. case PM_EVENT_RESTORE:
  76. return "restore";
  77. case PM_EVENT_RECOVER:
  78. return "recover";
  79. default:
  80. return "(unknown PM event)";
  81. }
  82. }
  83. /**
  84. * device_pm_sleep_init - Initialize system suspend-related device fields.
  85. * @dev: Device object being initialized.
  86. */
  87. void device_pm_sleep_init(struct device *dev)
  88. {
  89. dev->power.is_prepared = false;
  90. dev->power.is_suspended = false;
  91. dev->power.is_noirq_suspended = false;
  92. dev->power.is_late_suspended = false;
  93. init_completion(&dev->power.completion);
  94. complete_all(&dev->power.completion);
  95. dev->power.wakeup = NULL;
  96. INIT_LIST_HEAD(&dev->power.entry);
  97. }
  98. /**
  99. * device_pm_lock - Lock the list of active devices used by the PM core.
  100. */
  101. void device_pm_lock(void)
  102. {
  103. mutex_lock(&dpm_list_mtx);
  104. }
  105. /**
  106. * device_pm_unlock - Unlock the list of active devices used by the PM core.
  107. */
  108. void device_pm_unlock(void)
  109. {
  110. mutex_unlock(&dpm_list_mtx);
  111. }
  112. /**
  113. * device_pm_add - Add a device to the PM core's list of active devices.
  114. * @dev: Device to add to the list.
  115. */
  116. void device_pm_add(struct device *dev)
  117. {
  118. /* Skip PM setup/initialization. */
  119. if (device_pm_not_required(dev))
  120. return;
  121. pr_debug("Adding info for %s:%s\n",
  122. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  123. device_pm_check_callbacks(dev);
  124. mutex_lock(&dpm_list_mtx);
  125. if (dev->parent && dev->parent->power.is_prepared)
  126. dev_warn(dev, "parent %s should not be sleeping\n",
  127. dev_name(dev->parent));
  128. list_add_tail(&dev->power.entry, &dpm_list);
  129. dev->power.in_dpm_list = true;
  130. mutex_unlock(&dpm_list_mtx);
  131. }
  132. /**
  133. * device_pm_remove - Remove a device from the PM core's list of active devices.
  134. * @dev: Device to be removed from the list.
  135. */
  136. void device_pm_remove(struct device *dev)
  137. {
  138. if (device_pm_not_required(dev))
  139. return;
  140. pr_debug("Removing info for %s:%s\n",
  141. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  142. complete_all(&dev->power.completion);
  143. mutex_lock(&dpm_list_mtx);
  144. list_del_init(&dev->power.entry);
  145. dev->power.in_dpm_list = false;
  146. mutex_unlock(&dpm_list_mtx);
  147. device_wakeup_disable(dev);
  148. pm_runtime_remove(dev);
  149. device_pm_check_callbacks(dev);
  150. }
  151. /**
  152. * device_pm_move_before - Move device in the PM core's list of active devices.
  153. * @deva: Device to move in dpm_list.
  154. * @devb: Device @deva should come before.
  155. */
  156. void device_pm_move_before(struct device *deva, struct device *devb)
  157. {
  158. pr_debug("Moving %s:%s before %s:%s\n",
  159. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  160. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  161. /* Delete deva from dpm_list and reinsert before devb. */
  162. list_move_tail(&deva->power.entry, &devb->power.entry);
  163. }
  164. /**
  165. * device_pm_move_after - Move device in the PM core's list of active devices.
  166. * @deva: Device to move in dpm_list.
  167. * @devb: Device @deva should come after.
  168. */
  169. void device_pm_move_after(struct device *deva, struct device *devb)
  170. {
  171. pr_debug("Moving %s:%s after %s:%s\n",
  172. deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
  173. devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
  174. /* Delete deva from dpm_list and reinsert after devb. */
  175. list_move(&deva->power.entry, &devb->power.entry);
  176. }
  177. /**
  178. * device_pm_move_last - Move device to end of the PM core's list of devices.
  179. * @dev: Device to move in dpm_list.
  180. */
  181. void device_pm_move_last(struct device *dev)
  182. {
  183. pr_debug("Moving %s:%s to end of list\n",
  184. dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  185. list_move_tail(&dev->power.entry, &dpm_list);
  186. }
  187. static ktime_t initcall_debug_start(struct device *dev, void *cb)
  188. {
  189. if (!pm_print_times_enabled)
  190. return 0;
  191. dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
  192. task_pid_nr(current),
  193. dev->parent ? dev_name(dev->parent) : "none");
  194. return ktime_get();
  195. }
  196. static void initcall_debug_report(struct device *dev, ktime_t calltime,
  197. void *cb, int error)
  198. {
  199. ktime_t rettime;
  200. if (!pm_print_times_enabled)
  201. return;
  202. rettime = ktime_get();
  203. dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
  204. (unsigned long long)ktime_us_delta(rettime, calltime));
  205. }
  206. /**
  207. * dpm_wait - Wait for a PM operation to complete.
  208. * @dev: Device to wait for.
  209. * @async: If unset, wait only if the device's power.async_suspend flag is set.
  210. */
  211. static void dpm_wait(struct device *dev, bool async)
  212. {
  213. if (!dev)
  214. return;
  215. if (async || (pm_async_enabled && dev->power.async_suspend))
  216. wait_for_completion(&dev->power.completion);
  217. }
  218. static int dpm_wait_fn(struct device *dev, void *async_ptr)
  219. {
  220. dpm_wait(dev, *((bool *)async_ptr));
  221. return 0;
  222. }
  223. static void dpm_wait_for_children(struct device *dev, bool async)
  224. {
  225. device_for_each_child(dev, &async, dpm_wait_fn);
  226. }
  227. static void dpm_wait_for_suppliers(struct device *dev, bool async)
  228. {
  229. struct device_link *link;
  230. int idx;
  231. idx = device_links_read_lock();
  232. /*
  233. * If the supplier goes away right after we've checked the link to it,
  234. * we'll wait for its completion to change the state, but that's fine,
  235. * because the only things that will block as a result are the SRCU
  236. * callbacks freeing the link objects for the links in the list we're
  237. * walking.
  238. */
  239. list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
  240. if (READ_ONCE(link->status) != DL_STATE_DORMANT)
  241. dpm_wait(link->supplier, async);
  242. device_links_read_unlock(idx);
  243. }
  244. static bool dpm_wait_for_superior(struct device *dev, bool async)
  245. {
  246. struct device *parent;
  247. /*
  248. * If the device is resumed asynchronously and the parent's callback
  249. * deletes both the device and the parent itself, the parent object may
  250. * be freed while this function is running, so avoid that by reference
  251. * counting the parent once more unless the device has been deleted
  252. * already (in which case return right away).
  253. */
  254. mutex_lock(&dpm_list_mtx);
  255. if (!device_pm_initialized(dev)) {
  256. mutex_unlock(&dpm_list_mtx);
  257. return false;
  258. }
  259. parent = get_device(dev->parent);
  260. mutex_unlock(&dpm_list_mtx);
  261. dpm_wait(parent, async);
  262. put_device(parent);
  263. dpm_wait_for_suppliers(dev, async);
  264. /*
  265. * If the parent's callback has deleted the device, attempting to resume
  266. * it would be invalid, so avoid doing that then.
  267. */
  268. return device_pm_initialized(dev);
  269. }
  270. static void dpm_wait_for_consumers(struct device *dev, bool async)
  271. {
  272. struct device_link *link;
  273. int idx;
  274. idx = device_links_read_lock();
  275. /*
  276. * The status of a device link can only be changed from "dormant" by a
  277. * probe, but that cannot happen during system suspend/resume. In
  278. * theory it can change to "dormant" at that time, but then it is
  279. * reasonable to wait for the target device anyway (eg. if it goes
  280. * away, it's better to wait for it to go away completely and then
  281. * continue instead of trying to continue in parallel with its
  282. * unregistration).
  283. */
  284. list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
  285. if (READ_ONCE(link->status) != DL_STATE_DORMANT)
  286. dpm_wait(link->consumer, async);
  287. device_links_read_unlock(idx);
  288. }
  289. static void dpm_wait_for_subordinate(struct device *dev, bool async)
  290. {
  291. dpm_wait_for_children(dev, async);
  292. dpm_wait_for_consumers(dev, async);
  293. }
  294. /**
  295. * pm_op - Return the PM operation appropriate for given PM event.
  296. * @ops: PM operations to choose from.
  297. * @state: PM transition of the system being carried out.
  298. */
  299. static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
  300. {
  301. switch (state.event) {
  302. #ifdef CONFIG_SUSPEND
  303. case PM_EVENT_SUSPEND:
  304. return ops->suspend;
  305. case PM_EVENT_RESUME:
  306. return ops->resume;
  307. #endif /* CONFIG_SUSPEND */
  308. #ifdef CONFIG_HIBERNATE_CALLBACKS
  309. case PM_EVENT_FREEZE:
  310. case PM_EVENT_QUIESCE:
  311. return ops->freeze;
  312. case PM_EVENT_HIBERNATE:
  313. return ops->poweroff;
  314. case PM_EVENT_THAW:
  315. case PM_EVENT_RECOVER:
  316. return ops->thaw;
  317. case PM_EVENT_RESTORE:
  318. return ops->restore;
  319. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  320. }
  321. return NULL;
  322. }
  323. /**
  324. * pm_late_early_op - Return the PM operation appropriate for given PM event.
  325. * @ops: PM operations to choose from.
  326. * @state: PM transition of the system being carried out.
  327. *
  328. * Runtime PM is disabled for @dev while this function is being executed.
  329. */
  330. static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
  331. pm_message_t state)
  332. {
  333. switch (state.event) {
  334. #ifdef CONFIG_SUSPEND
  335. case PM_EVENT_SUSPEND:
  336. return ops->suspend_late;
  337. case PM_EVENT_RESUME:
  338. return ops->resume_early;
  339. #endif /* CONFIG_SUSPEND */
  340. #ifdef CONFIG_HIBERNATE_CALLBACKS
  341. case PM_EVENT_FREEZE:
  342. case PM_EVENT_QUIESCE:
  343. return ops->freeze_late;
  344. case PM_EVENT_HIBERNATE:
  345. return ops->poweroff_late;
  346. case PM_EVENT_THAW:
  347. case PM_EVENT_RECOVER:
  348. return ops->thaw_early;
  349. case PM_EVENT_RESTORE:
  350. return ops->restore_early;
  351. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  352. }
  353. return NULL;
  354. }
  355. /**
  356. * pm_noirq_op - Return the PM operation appropriate for given PM event.
  357. * @ops: PM operations to choose from.
  358. * @state: PM transition of the system being carried out.
  359. *
  360. * The driver of @dev will not receive interrupts while this function is being
  361. * executed.
  362. */
  363. static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
  364. {
  365. switch (state.event) {
  366. #ifdef CONFIG_SUSPEND
  367. case PM_EVENT_SUSPEND:
  368. return ops->suspend_noirq;
  369. case PM_EVENT_RESUME:
  370. return ops->resume_noirq;
  371. #endif /* CONFIG_SUSPEND */
  372. #ifdef CONFIG_HIBERNATE_CALLBACKS
  373. case PM_EVENT_FREEZE:
  374. case PM_EVENT_QUIESCE:
  375. return ops->freeze_noirq;
  376. case PM_EVENT_HIBERNATE:
  377. return ops->poweroff_noirq;
  378. case PM_EVENT_THAW:
  379. case PM_EVENT_RECOVER:
  380. return ops->thaw_noirq;
  381. case PM_EVENT_RESTORE:
  382. return ops->restore_noirq;
  383. #endif /* CONFIG_HIBERNATE_CALLBACKS */
  384. }
  385. return NULL;
  386. }
  387. static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
  388. {
  389. dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
  390. ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
  391. ", may wakeup" : "", dev->power.driver_flags);
  392. }
  393. static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
  394. int error)
  395. {
  396. dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
  397. error);
  398. }
  399. static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
  400. const char *info)
  401. {
  402. ktime_t calltime;
  403. u64 usecs64;
  404. int usecs;
  405. calltime = ktime_get();
  406. usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
  407. do_div(usecs64, NSEC_PER_USEC);
  408. usecs = usecs64;
  409. if (usecs == 0)
  410. usecs = 1;
  411. pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
  412. info ?: "", info ? " " : "", pm_verb(state.event),
  413. error ? "aborted" : "complete",
  414. usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
  415. }
  416. static int dpm_run_callback(pm_callback_t cb, struct device *dev,
  417. pm_message_t state, const char *info)
  418. {
  419. ktime_t calltime;
  420. int error;
  421. if (!cb)
  422. return 0;
  423. calltime = initcall_debug_start(dev, cb);
  424. pm_dev_dbg(dev, state, info);
  425. trace_device_pm_callback_start(dev, info, state.event);
  426. error = cb(dev);
  427. trace_device_pm_callback_end(dev, error);
  428. suspend_report_result(dev, cb, error);
  429. initcall_debug_report(dev, calltime, cb, error);
  430. return error;
  431. }
  432. #ifdef CONFIG_DPM_WATCHDOG
  433. struct dpm_watchdog {
  434. struct device *dev;
  435. struct task_struct *tsk;
  436. struct timer_list timer;
  437. };
  438. #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
  439. struct dpm_watchdog wd
  440. /**
  441. * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
  442. * @t: The timer that PM watchdog depends on.
  443. *
  444. * Called when a driver has timed out suspending or resuming.
  445. * There's not much we can do here to recover so panic() to
  446. * capture a crash-dump in pstore.
  447. */
  448. static void dpm_watchdog_handler(struct timer_list *t)
  449. {
  450. struct dpm_watchdog *wd = from_timer(wd, t, timer);
  451. dev_emerg(wd->dev, "**** DPM device timeout ****\n");
  452. show_stack(wd->tsk, NULL, KERN_EMERG);
  453. panic("%s %s: unrecoverable failure\n",
  454. dev_driver_string(wd->dev), dev_name(wd->dev));
  455. }
  456. /**
  457. * dpm_watchdog_set - Enable pm watchdog for given device.
  458. * @wd: Watchdog. Must be allocated on the stack.
  459. * @dev: Device to handle.
  460. */
  461. static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
  462. {
  463. struct timer_list *timer = &wd->timer;
  464. wd->dev = dev;
  465. wd->tsk = current;
  466. timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
  467. /* use same timeout value for both suspend and resume */
  468. timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
  469. add_timer(timer);
  470. }
  471. /**
  472. * dpm_watchdog_clear - Disable suspend/resume watchdog.
  473. * @wd: Watchdog to disable.
  474. */
  475. static void dpm_watchdog_clear(struct dpm_watchdog *wd)
  476. {
  477. struct timer_list *timer = &wd->timer;
  478. del_timer_sync(timer);
  479. destroy_timer_on_stack(timer);
  480. }
  481. #else
  482. #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
  483. #define dpm_watchdog_set(x, y)
  484. #define dpm_watchdog_clear(x)
  485. #endif
  486. /*------------------------- Resume routines -------------------------*/
  487. /**
  488. * dev_pm_skip_resume - System-wide device resume optimization check.
  489. * @dev: Target device.
  490. *
  491. * Return:
  492. * - %false if the transition under way is RESTORE.
  493. * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
  494. * - The logical negation of %power.must_resume otherwise (that is, when the
  495. * transition under way is RESUME).
  496. */
  497. bool dev_pm_skip_resume(struct device *dev)
  498. {
  499. if (pm_transition.event == PM_EVENT_RESTORE)
  500. return false;
  501. if (pm_transition.event == PM_EVENT_THAW)
  502. return dev_pm_skip_suspend(dev);
  503. return !dev->power.must_resume;
  504. }
  505. /**
  506. * __device_resume_noirq - Execute a "noirq resume" callback for given device.
  507. * @dev: Device to handle.
  508. * @state: PM transition of the system being carried out.
  509. * @async: If true, the device is being resumed asynchronously.
  510. *
  511. * The driver of @dev will not receive interrupts while this function is being
  512. * executed.
  513. */
  514. static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
  515. {
  516. pm_callback_t callback = NULL;
  517. const char *info = NULL;
  518. bool skip_resume;
  519. int error = 0;
  520. TRACE_DEVICE(dev);
  521. TRACE_RESUME(0);
  522. if (dev->power.syscore || dev->power.direct_complete)
  523. goto Out;
  524. if (!dev->power.is_noirq_suspended)
  525. goto Out;
  526. if (!dpm_wait_for_superior(dev, async))
  527. goto Out;
  528. skip_resume = dev_pm_skip_resume(dev);
  529. /*
  530. * If the driver callback is skipped below or by the middle layer
  531. * callback and device_resume_early() also skips the driver callback for
  532. * this device later, it needs to appear as "suspended" to PM-runtime,
  533. * so change its status accordingly.
  534. *
  535. * Otherwise, the device is going to be resumed, so set its PM-runtime
  536. * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
  537. * to avoid confusing drivers that don't use it.
  538. */
  539. if (skip_resume)
  540. pm_runtime_set_suspended(dev);
  541. else if (dev_pm_skip_suspend(dev))
  542. pm_runtime_set_active(dev);
  543. if (dev->pm_domain) {
  544. info = "noirq power domain ";
  545. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  546. } else if (dev->type && dev->type->pm) {
  547. info = "noirq type ";
  548. callback = pm_noirq_op(dev->type->pm, state);
  549. } else if (dev->class && dev->class->pm) {
  550. info = "noirq class ";
  551. callback = pm_noirq_op(dev->class->pm, state);
  552. } else if (dev->bus && dev->bus->pm) {
  553. info = "noirq bus ";
  554. callback = pm_noirq_op(dev->bus->pm, state);
  555. }
  556. if (callback)
  557. goto Run;
  558. if (skip_resume)
  559. goto Skip;
  560. if (dev->driver && dev->driver->pm) {
  561. info = "noirq driver ";
  562. callback = pm_noirq_op(dev->driver->pm, state);
  563. }
  564. Run:
  565. error = dpm_run_callback(callback, dev, state, info);
  566. Skip:
  567. dev->power.is_noirq_suspended = false;
  568. Out:
  569. complete_all(&dev->power.completion);
  570. TRACE_RESUME(error);
  571. if (error) {
  572. suspend_stats.failed_resume_noirq++;
  573. dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
  574. dpm_save_failed_dev(dev_name(dev));
  575. pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
  576. }
  577. }
  578. static bool is_async(struct device *dev)
  579. {
  580. return dev->power.async_suspend && pm_async_enabled
  581. && !pm_trace_is_enabled();
  582. }
  583. static bool dpm_async_fn(struct device *dev, async_func_t func)
  584. {
  585. reinit_completion(&dev->power.completion);
  586. if (!is_async(dev))
  587. return false;
  588. get_device(dev);
  589. if (async_schedule_dev_nocall(func, dev))
  590. return true;
  591. put_device(dev);
  592. return false;
  593. }
  594. static void async_resume_noirq(void *data, async_cookie_t cookie)
  595. {
  596. struct device *dev = (struct device *)data;
  597. __device_resume_noirq(dev, pm_transition, true);
  598. put_device(dev);
  599. }
  600. static void device_resume_noirq(struct device *dev)
  601. {
  602. if (dpm_async_fn(dev, async_resume_noirq))
  603. return;
  604. __device_resume_noirq(dev, pm_transition, false);
  605. }
  606. static void dpm_noirq_resume_devices(pm_message_t state)
  607. {
  608. struct device *dev;
  609. ktime_t starttime = ktime_get();
  610. trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
  611. mutex_lock(&dpm_list_mtx);
  612. pm_transition = state;
  613. while (!list_empty(&dpm_noirq_list)) {
  614. dev = to_device(dpm_noirq_list.next);
  615. get_device(dev);
  616. list_move_tail(&dev->power.entry, &dpm_late_early_list);
  617. mutex_unlock(&dpm_list_mtx);
  618. device_resume_noirq(dev);
  619. put_device(dev);
  620. mutex_lock(&dpm_list_mtx);
  621. }
  622. mutex_unlock(&dpm_list_mtx);
  623. async_synchronize_full();
  624. dpm_show_time(starttime, state, 0, "noirq");
  625. trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
  626. }
  627. /**
  628. * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  629. * @state: PM transition of the system being carried out.
  630. *
  631. * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
  632. * allow device drivers' interrupt handlers to be called.
  633. */
  634. void dpm_resume_noirq(pm_message_t state)
  635. {
  636. dpm_noirq_resume_devices(state);
  637. resume_device_irqs();
  638. device_wakeup_disarm_wake_irqs();
  639. }
  640. /**
  641. * __device_resume_early - Execute an "early resume" callback for given device.
  642. * @dev: Device to handle.
  643. * @state: PM transition of the system being carried out.
  644. * @async: If true, the device is being resumed asynchronously.
  645. *
  646. * Runtime PM is disabled for @dev while this function is being executed.
  647. */
  648. static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
  649. {
  650. pm_callback_t callback = NULL;
  651. const char *info = NULL;
  652. int error = 0;
  653. TRACE_DEVICE(dev);
  654. TRACE_RESUME(0);
  655. if (dev->power.syscore || dev->power.direct_complete)
  656. goto Out;
  657. if (!dev->power.is_late_suspended)
  658. goto Out;
  659. if (!dpm_wait_for_superior(dev, async))
  660. goto Out;
  661. if (dev->pm_domain) {
  662. info = "early power domain ";
  663. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  664. } else if (dev->type && dev->type->pm) {
  665. info = "early type ";
  666. callback = pm_late_early_op(dev->type->pm, state);
  667. } else if (dev->class && dev->class->pm) {
  668. info = "early class ";
  669. callback = pm_late_early_op(dev->class->pm, state);
  670. } else if (dev->bus && dev->bus->pm) {
  671. info = "early bus ";
  672. callback = pm_late_early_op(dev->bus->pm, state);
  673. }
  674. if (callback)
  675. goto Run;
  676. if (dev_pm_skip_resume(dev))
  677. goto Skip;
  678. if (dev->driver && dev->driver->pm) {
  679. info = "early driver ";
  680. callback = pm_late_early_op(dev->driver->pm, state);
  681. }
  682. Run:
  683. error = dpm_run_callback(callback, dev, state, info);
  684. Skip:
  685. dev->power.is_late_suspended = false;
  686. Out:
  687. TRACE_RESUME(error);
  688. pm_runtime_enable(dev);
  689. complete_all(&dev->power.completion);
  690. if (error) {
  691. suspend_stats.failed_resume_early++;
  692. dpm_save_failed_step(SUSPEND_RESUME_EARLY);
  693. dpm_save_failed_dev(dev_name(dev));
  694. pm_dev_err(dev, state, async ? " async early" : " early", error);
  695. }
  696. }
  697. static void async_resume_early(void *data, async_cookie_t cookie)
  698. {
  699. struct device *dev = (struct device *)data;
  700. __device_resume_early(dev, pm_transition, true);
  701. put_device(dev);
  702. }
  703. static void device_resume_early(struct device *dev)
  704. {
  705. if (dpm_async_fn(dev, async_resume_early))
  706. return;
  707. __device_resume_early(dev, pm_transition, false);
  708. }
  709. /**
  710. * dpm_resume_early - Execute "early resume" callbacks for all devices.
  711. * @state: PM transition of the system being carried out.
  712. */
  713. void dpm_resume_early(pm_message_t state)
  714. {
  715. struct device *dev;
  716. ktime_t starttime = ktime_get();
  717. trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
  718. mutex_lock(&dpm_list_mtx);
  719. pm_transition = state;
  720. while (!list_empty(&dpm_late_early_list)) {
  721. dev = to_device(dpm_late_early_list.next);
  722. get_device(dev);
  723. list_move_tail(&dev->power.entry, &dpm_suspended_list);
  724. mutex_unlock(&dpm_list_mtx);
  725. device_resume_early(dev);
  726. put_device(dev);
  727. mutex_lock(&dpm_list_mtx);
  728. }
  729. mutex_unlock(&dpm_list_mtx);
  730. async_synchronize_full();
  731. dpm_show_time(starttime, state, 0, "early");
  732. trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
  733. }
  734. /**
  735. * dpm_resume_start - Execute "noirq" and "early" device callbacks.
  736. * @state: PM transition of the system being carried out.
  737. */
  738. void dpm_resume_start(pm_message_t state)
  739. {
  740. dpm_resume_noirq(state);
  741. dpm_resume_early(state);
  742. }
  743. EXPORT_SYMBOL_GPL(dpm_resume_start);
  744. /**
  745. * __device_resume - Execute "resume" callbacks for given device.
  746. * @dev: Device to handle.
  747. * @state: PM transition of the system being carried out.
  748. * @async: If true, the device is being resumed asynchronously.
  749. */
  750. static void __device_resume(struct device *dev, pm_message_t state, bool async)
  751. {
  752. pm_callback_t callback = NULL;
  753. const char *info = NULL;
  754. int error = 0;
  755. DECLARE_DPM_WATCHDOG_ON_STACK(wd);
  756. TRACE_DEVICE(dev);
  757. TRACE_RESUME(0);
  758. if (dev->power.syscore)
  759. goto Complete;
  760. if (dev->power.direct_complete) {
  761. /* Match the pm_runtime_disable() in __device_suspend(). */
  762. pm_runtime_enable(dev);
  763. goto Complete;
  764. }
  765. if (!dpm_wait_for_superior(dev, async))
  766. goto Complete;
  767. dpm_watchdog_set(&wd, dev);
  768. device_lock(dev);
  769. /*
  770. * This is a fib. But we'll allow new children to be added below
  771. * a resumed device, even if the device hasn't been completed yet.
  772. */
  773. dev->power.is_prepared = false;
  774. if (!dev->power.is_suspended)
  775. goto Unlock;
  776. if (dev->pm_domain) {
  777. info = "power domain ";
  778. callback = pm_op(&dev->pm_domain->ops, state);
  779. goto Driver;
  780. }
  781. if (dev->type && dev->type->pm) {
  782. info = "type ";
  783. callback = pm_op(dev->type->pm, state);
  784. goto Driver;
  785. }
  786. if (dev->class && dev->class->pm) {
  787. info = "class ";
  788. callback = pm_op(dev->class->pm, state);
  789. goto Driver;
  790. }
  791. if (dev->bus) {
  792. if (dev->bus->pm) {
  793. info = "bus ";
  794. callback = pm_op(dev->bus->pm, state);
  795. } else if (dev->bus->resume) {
  796. info = "legacy bus ";
  797. callback = dev->bus->resume;
  798. goto End;
  799. }
  800. }
  801. Driver:
  802. if (!callback && dev->driver && dev->driver->pm) {
  803. info = "driver ";
  804. callback = pm_op(dev->driver->pm, state);
  805. }
  806. End:
  807. error = dpm_run_callback(callback, dev, state, info);
  808. dev->power.is_suspended = false;
  809. Unlock:
  810. device_unlock(dev);
  811. dpm_watchdog_clear(&wd);
  812. Complete:
  813. complete_all(&dev->power.completion);
  814. TRACE_RESUME(error);
  815. if (error) {
  816. suspend_stats.failed_resume++;
  817. dpm_save_failed_step(SUSPEND_RESUME);
  818. dpm_save_failed_dev(dev_name(dev));
  819. pm_dev_err(dev, state, async ? " async" : "", error);
  820. }
  821. }
  822. static void async_resume(void *data, async_cookie_t cookie)
  823. {
  824. struct device *dev = (struct device *)data;
  825. __device_resume(dev, pm_transition, true);
  826. put_device(dev);
  827. }
  828. static void device_resume(struct device *dev)
  829. {
  830. if (dpm_async_fn(dev, async_resume))
  831. return;
  832. __device_resume(dev, pm_transition, false);
  833. }
  834. /**
  835. * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  836. * @state: PM transition of the system being carried out.
  837. *
  838. * Execute the appropriate "resume" callback for all devices whose status
  839. * indicates that they are suspended.
  840. */
  841. void dpm_resume(pm_message_t state)
  842. {
  843. struct device *dev;
  844. ktime_t starttime = ktime_get();
  845. trace_suspend_resume(TPS("dpm_resume"), state.event, true);
  846. might_sleep();
  847. mutex_lock(&dpm_list_mtx);
  848. pm_transition = state;
  849. async_error = 0;
  850. while (!list_empty(&dpm_suspended_list)) {
  851. dev = to_device(dpm_suspended_list.next);
  852. get_device(dev);
  853. mutex_unlock(&dpm_list_mtx);
  854. device_resume(dev);
  855. mutex_lock(&dpm_list_mtx);
  856. if (!list_empty(&dev->power.entry))
  857. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  858. mutex_unlock(&dpm_list_mtx);
  859. put_device(dev);
  860. mutex_lock(&dpm_list_mtx);
  861. }
  862. mutex_unlock(&dpm_list_mtx);
  863. async_synchronize_full();
  864. dpm_show_time(starttime, state, 0, NULL);
  865. cpufreq_resume();
  866. devfreq_resume();
  867. trace_suspend_resume(TPS("dpm_resume"), state.event, false);
  868. }
  869. /**
  870. * device_complete - Complete a PM transition for given device.
  871. * @dev: Device to handle.
  872. * @state: PM transition of the system being carried out.
  873. */
  874. static void device_complete(struct device *dev, pm_message_t state)
  875. {
  876. void (*callback)(struct device *) = NULL;
  877. const char *info = NULL;
  878. if (dev->power.syscore)
  879. goto out;
  880. device_lock(dev);
  881. if (dev->pm_domain) {
  882. info = "completing power domain ";
  883. callback = dev->pm_domain->ops.complete;
  884. } else if (dev->type && dev->type->pm) {
  885. info = "completing type ";
  886. callback = dev->type->pm->complete;
  887. } else if (dev->class && dev->class->pm) {
  888. info = "completing class ";
  889. callback = dev->class->pm->complete;
  890. } else if (dev->bus && dev->bus->pm) {
  891. info = "completing bus ";
  892. callback = dev->bus->pm->complete;
  893. }
  894. if (!callback && dev->driver && dev->driver->pm) {
  895. info = "completing driver ";
  896. callback = dev->driver->pm->complete;
  897. }
  898. if (callback) {
  899. pm_dev_dbg(dev, state, info);
  900. callback(dev);
  901. }
  902. device_unlock(dev);
  903. out:
  904. pm_runtime_put(dev);
  905. }
  906. /**
  907. * dpm_complete - Complete a PM transition for all non-sysdev devices.
  908. * @state: PM transition of the system being carried out.
  909. *
  910. * Execute the ->complete() callbacks for all devices whose PM status is not
  911. * DPM_ON (this allows new devices to be registered).
  912. */
  913. void dpm_complete(pm_message_t state)
  914. {
  915. struct list_head list;
  916. trace_suspend_resume(TPS("dpm_complete"), state.event, true);
  917. might_sleep();
  918. INIT_LIST_HEAD(&list);
  919. mutex_lock(&dpm_list_mtx);
  920. while (!list_empty(&dpm_prepared_list)) {
  921. struct device *dev = to_device(dpm_prepared_list.prev);
  922. get_device(dev);
  923. dev->power.is_prepared = false;
  924. list_move(&dev->power.entry, &list);
  925. mutex_unlock(&dpm_list_mtx);
  926. trace_device_pm_callback_start(dev, "", state.event);
  927. device_complete(dev, state);
  928. trace_device_pm_callback_end(dev, 0);
  929. put_device(dev);
  930. mutex_lock(&dpm_list_mtx);
  931. }
  932. list_splice(&list, &dpm_list);
  933. mutex_unlock(&dpm_list_mtx);
  934. /* Allow device probing and trigger re-probing of deferred devices */
  935. device_unblock_probing();
  936. trace_suspend_resume(TPS("dpm_complete"), state.event, false);
  937. }
  938. /**
  939. * dpm_resume_end - Execute "resume" callbacks and complete system transition.
  940. * @state: PM transition of the system being carried out.
  941. *
  942. * Execute "resume" callbacks for all devices and complete the PM transition of
  943. * the system.
  944. */
  945. void dpm_resume_end(pm_message_t state)
  946. {
  947. dpm_resume(state);
  948. dpm_complete(state);
  949. }
  950. EXPORT_SYMBOL_GPL(dpm_resume_end);
  951. /*------------------------- Suspend routines -------------------------*/
  952. /**
  953. * resume_event - Return a "resume" message for given "suspend" sleep state.
  954. * @sleep_state: PM message representing a sleep state.
  955. *
  956. * Return a PM message representing the resume event corresponding to given
  957. * sleep state.
  958. */
  959. static pm_message_t resume_event(pm_message_t sleep_state)
  960. {
  961. switch (sleep_state.event) {
  962. case PM_EVENT_SUSPEND:
  963. return PMSG_RESUME;
  964. case PM_EVENT_FREEZE:
  965. case PM_EVENT_QUIESCE:
  966. return PMSG_RECOVER;
  967. case PM_EVENT_HIBERNATE:
  968. return PMSG_RESTORE;
  969. }
  970. return PMSG_ON;
  971. }
  972. static void dpm_superior_set_must_resume(struct device *dev)
  973. {
  974. struct device_link *link;
  975. int idx;
  976. if (dev->parent)
  977. dev->parent->power.must_resume = true;
  978. idx = device_links_read_lock();
  979. list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
  980. link->supplier->power.must_resume = true;
  981. device_links_read_unlock(idx);
  982. }
  983. /**
  984. * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
  985. * @dev: Device to handle.
  986. * @state: PM transition of the system being carried out.
  987. * @async: If true, the device is being suspended asynchronously.
  988. *
  989. * The driver of @dev will not receive interrupts while this function is being
  990. * executed.
  991. */
  992. static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
  993. {
  994. pm_callback_t callback = NULL;
  995. const char *info = NULL;
  996. int error = 0;
  997. TRACE_DEVICE(dev);
  998. TRACE_SUSPEND(0);
  999. dpm_wait_for_subordinate(dev, async);
  1000. if (async_error)
  1001. goto Complete;
  1002. if (dev->power.syscore || dev->power.direct_complete)
  1003. goto Complete;
  1004. if (dev->pm_domain) {
  1005. info = "noirq power domain ";
  1006. callback = pm_noirq_op(&dev->pm_domain->ops, state);
  1007. } else if (dev->type && dev->type->pm) {
  1008. info = "noirq type ";
  1009. callback = pm_noirq_op(dev->type->pm, state);
  1010. } else if (dev->class && dev->class->pm) {
  1011. info = "noirq class ";
  1012. callback = pm_noirq_op(dev->class->pm, state);
  1013. } else if (dev->bus && dev->bus->pm) {
  1014. info = "noirq bus ";
  1015. callback = pm_noirq_op(dev->bus->pm, state);
  1016. }
  1017. if (callback)
  1018. goto Run;
  1019. if (dev_pm_skip_suspend(dev))
  1020. goto Skip;
  1021. if (dev->driver && dev->driver->pm) {
  1022. info = "noirq driver ";
  1023. callback = pm_noirq_op(dev->driver->pm, state);
  1024. }
  1025. Run:
  1026. error = dpm_run_callback(callback, dev, state, info);
  1027. if (error) {
  1028. async_error = error;
  1029. log_suspend_abort_reason("Device %s failed to %s noirq: error %d",
  1030. dev_name(dev), pm_verb(state.event), error);
  1031. goto Complete;
  1032. }
  1033. Skip:
  1034. dev->power.is_noirq_suspended = true;
  1035. /*
  1036. * Skipping the resume of devices that were in use right before the
  1037. * system suspend (as indicated by their PM-runtime usage counters)
  1038. * would be suboptimal. Also resume them if doing that is not allowed
  1039. * to be skipped.
  1040. */
  1041. if (atomic_read(&dev->power.usage_count) > 1 ||
  1042. !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
  1043. dev->power.may_skip_resume))
  1044. dev->power.must_resume = true;
  1045. if (dev->power.must_resume)
  1046. dpm_superior_set_must_resume(dev);
  1047. Complete:
  1048. complete_all(&dev->power.completion);
  1049. TRACE_SUSPEND(error);
  1050. return error;
  1051. }
  1052. static void async_suspend_noirq(void *data, async_cookie_t cookie)
  1053. {
  1054. struct device *dev = (struct device *)data;
  1055. int error;
  1056. error = __device_suspend_noirq(dev, pm_transition, true);
  1057. if (error) {
  1058. dpm_save_failed_dev(dev_name(dev));
  1059. pm_dev_err(dev, pm_transition, " async", error);
  1060. }
  1061. put_device(dev);
  1062. }
  1063. static int device_suspend_noirq(struct device *dev)
  1064. {
  1065. if (dpm_async_fn(dev, async_suspend_noirq))
  1066. return 0;
  1067. return __device_suspend_noirq(dev, pm_transition, false);
  1068. }
  1069. static int dpm_noirq_suspend_devices(pm_message_t state)
  1070. {
  1071. ktime_t starttime = ktime_get();
  1072. int error = 0;
  1073. trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
  1074. mutex_lock(&dpm_list_mtx);
  1075. pm_transition = state;
  1076. async_error = 0;
  1077. while (!list_empty(&dpm_late_early_list)) {
  1078. struct device *dev = to_device(dpm_late_early_list.prev);
  1079. get_device(dev);
  1080. mutex_unlock(&dpm_list_mtx);
  1081. error = device_suspend_noirq(dev);
  1082. mutex_lock(&dpm_list_mtx);
  1083. if (error) {
  1084. pm_dev_err(dev, state, " noirq", error);
  1085. dpm_save_failed_dev(dev_name(dev));
  1086. } else if (!list_empty(&dev->power.entry)) {
  1087. list_move(&dev->power.entry, &dpm_noirq_list);
  1088. }
  1089. mutex_unlock(&dpm_list_mtx);
  1090. put_device(dev);
  1091. mutex_lock(&dpm_list_mtx);
  1092. if (error || async_error)
  1093. break;
  1094. }
  1095. mutex_unlock(&dpm_list_mtx);
  1096. async_synchronize_full();
  1097. if (!error)
  1098. error = async_error;
  1099. if (error) {
  1100. suspend_stats.failed_suspend_noirq++;
  1101. dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
  1102. }
  1103. dpm_show_time(starttime, state, error, "noirq");
  1104. trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
  1105. return error;
  1106. }
  1107. /**
  1108. * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
  1109. * @state: PM transition of the system being carried out.
  1110. *
  1111. * Prevent device drivers' interrupt handlers from being called and invoke
  1112. * "noirq" suspend callbacks for all non-sysdev devices.
  1113. */
  1114. int dpm_suspend_noirq(pm_message_t state)
  1115. {
  1116. int ret;
  1117. device_wakeup_arm_wake_irqs();
  1118. suspend_device_irqs();
  1119. ret = dpm_noirq_suspend_devices(state);
  1120. if (ret)
  1121. dpm_resume_noirq(resume_event(state));
  1122. return ret;
  1123. }
  1124. static void dpm_propagate_wakeup_to_parent(struct device *dev)
  1125. {
  1126. struct device *parent = dev->parent;
  1127. if (!parent)
  1128. return;
  1129. spin_lock_irq(&parent->power.lock);
  1130. if (device_wakeup_path(dev) && !parent->power.ignore_children)
  1131. parent->power.wakeup_path = true;
  1132. spin_unlock_irq(&parent->power.lock);
  1133. }
  1134. /**
  1135. * __device_suspend_late - Execute a "late suspend" callback for given device.
  1136. * @dev: Device to handle.
  1137. * @state: PM transition of the system being carried out.
  1138. * @async: If true, the device is being suspended asynchronously.
  1139. *
  1140. * Runtime PM is disabled for @dev while this function is being executed.
  1141. */
  1142. static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
  1143. {
  1144. pm_callback_t callback = NULL;
  1145. const char *info = NULL;
  1146. int error = 0;
  1147. TRACE_DEVICE(dev);
  1148. TRACE_SUSPEND(0);
  1149. __pm_runtime_disable(dev, false);
  1150. dpm_wait_for_subordinate(dev, async);
  1151. if (async_error)
  1152. goto Complete;
  1153. if (pm_wakeup_pending()) {
  1154. async_error = -EBUSY;
  1155. goto Complete;
  1156. }
  1157. if (dev->power.syscore || dev->power.direct_complete)
  1158. goto Complete;
  1159. if (dev->pm_domain) {
  1160. info = "late power domain ";
  1161. callback = pm_late_early_op(&dev->pm_domain->ops, state);
  1162. } else if (dev->type && dev->type->pm) {
  1163. info = "late type ";
  1164. callback = pm_late_early_op(dev->type->pm, state);
  1165. } else if (dev->class && dev->class->pm) {
  1166. info = "late class ";
  1167. callback = pm_late_early_op(dev->class->pm, state);
  1168. } else if (dev->bus && dev->bus->pm) {
  1169. info = "late bus ";
  1170. callback = pm_late_early_op(dev->bus->pm, state);
  1171. }
  1172. if (callback)
  1173. goto Run;
  1174. if (dev_pm_skip_suspend(dev))
  1175. goto Skip;
  1176. if (dev->driver && dev->driver->pm) {
  1177. info = "late driver ";
  1178. callback = pm_late_early_op(dev->driver->pm, state);
  1179. }
  1180. Run:
  1181. error = dpm_run_callback(callback, dev, state, info);
  1182. if (error) {
  1183. async_error = error;
  1184. log_suspend_abort_reason("Device %s failed to %s late: error %d",
  1185. dev_name(dev), pm_verb(state.event), error);
  1186. goto Complete;
  1187. }
  1188. dpm_propagate_wakeup_to_parent(dev);
  1189. Skip:
  1190. dev->power.is_late_suspended = true;
  1191. Complete:
  1192. TRACE_SUSPEND(error);
  1193. complete_all(&dev->power.completion);
  1194. return error;
  1195. }
  1196. static void async_suspend_late(void *data, async_cookie_t cookie)
  1197. {
  1198. struct device *dev = (struct device *)data;
  1199. int error;
  1200. error = __device_suspend_late(dev, pm_transition, true);
  1201. if (error) {
  1202. dpm_save_failed_dev(dev_name(dev));
  1203. pm_dev_err(dev, pm_transition, " async", error);
  1204. }
  1205. put_device(dev);
  1206. }
  1207. static int device_suspend_late(struct device *dev)
  1208. {
  1209. if (dpm_async_fn(dev, async_suspend_late))
  1210. return 0;
  1211. return __device_suspend_late(dev, pm_transition, false);
  1212. }
  1213. /**
  1214. * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
  1215. * @state: PM transition of the system being carried out.
  1216. */
  1217. int dpm_suspend_late(pm_message_t state)
  1218. {
  1219. ktime_t starttime = ktime_get();
  1220. int error = 0;
  1221. trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
  1222. wake_up_all_idle_cpus();
  1223. mutex_lock(&dpm_list_mtx);
  1224. pm_transition = state;
  1225. async_error = 0;
  1226. while (!list_empty(&dpm_suspended_list)) {
  1227. struct device *dev = to_device(dpm_suspended_list.prev);
  1228. get_device(dev);
  1229. mutex_unlock(&dpm_list_mtx);
  1230. error = device_suspend_late(dev);
  1231. mutex_lock(&dpm_list_mtx);
  1232. if (!list_empty(&dev->power.entry))
  1233. list_move(&dev->power.entry, &dpm_late_early_list);
  1234. if (error) {
  1235. pm_dev_err(dev, state, " late", error);
  1236. dpm_save_failed_dev(dev_name(dev));
  1237. }
  1238. mutex_unlock(&dpm_list_mtx);
  1239. put_device(dev);
  1240. mutex_lock(&dpm_list_mtx);
  1241. if (error || async_error)
  1242. break;
  1243. }
  1244. mutex_unlock(&dpm_list_mtx);
  1245. async_synchronize_full();
  1246. if (!error)
  1247. error = async_error;
  1248. if (error) {
  1249. suspend_stats.failed_suspend_late++;
  1250. dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
  1251. dpm_resume_early(resume_event(state));
  1252. }
  1253. dpm_show_time(starttime, state, error, "late");
  1254. trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
  1255. return error;
  1256. }
  1257. /**
  1258. * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
  1259. * @state: PM transition of the system being carried out.
  1260. */
  1261. int dpm_suspend_end(pm_message_t state)
  1262. {
  1263. ktime_t starttime = ktime_get();
  1264. int error;
  1265. error = dpm_suspend_late(state);
  1266. if (error)
  1267. goto out;
  1268. error = dpm_suspend_noirq(state);
  1269. if (error)
  1270. dpm_resume_early(resume_event(state));
  1271. out:
  1272. dpm_show_time(starttime, state, error, "end");
  1273. return error;
  1274. }
  1275. EXPORT_SYMBOL_GPL(dpm_suspend_end);
  1276. /**
  1277. * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
  1278. * @dev: Device to suspend.
  1279. * @state: PM transition of the system being carried out.
  1280. * @cb: Suspend callback to execute.
  1281. * @info: string description of caller.
  1282. */
  1283. static int legacy_suspend(struct device *dev, pm_message_t state,
  1284. int (*cb)(struct device *dev, pm_message_t state),
  1285. const char *info)
  1286. {
  1287. int error;
  1288. ktime_t calltime;
  1289. calltime = initcall_debug_start(dev, cb);
  1290. trace_device_pm_callback_start(dev, info, state.event);
  1291. error = cb(dev, state);
  1292. trace_device_pm_callback_end(dev, error);
  1293. suspend_report_result(dev, cb, error);
  1294. initcall_debug_report(dev, calltime, cb, error);
  1295. return error;
  1296. }
  1297. static void dpm_clear_superiors_direct_complete(struct device *dev)
  1298. {
  1299. struct device_link *link;
  1300. int idx;
  1301. if (dev->parent) {
  1302. spin_lock_irq(&dev->parent->power.lock);
  1303. dev->parent->power.direct_complete = false;
  1304. spin_unlock_irq(&dev->parent->power.lock);
  1305. }
  1306. idx = device_links_read_lock();
  1307. list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
  1308. spin_lock_irq(&link->supplier->power.lock);
  1309. link->supplier->power.direct_complete = false;
  1310. spin_unlock_irq(&link->supplier->power.lock);
  1311. }
  1312. device_links_read_unlock(idx);
  1313. }
  1314. /**
  1315. * __device_suspend - Execute "suspend" callbacks for given device.
  1316. * @dev: Device to handle.
  1317. * @state: PM transition of the system being carried out.
  1318. * @async: If true, the device is being suspended asynchronously.
  1319. */
  1320. static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  1321. {
  1322. pm_callback_t callback = NULL;
  1323. const char *info = NULL;
  1324. int error = 0;
  1325. DECLARE_DPM_WATCHDOG_ON_STACK(wd);
  1326. TRACE_DEVICE(dev);
  1327. TRACE_SUSPEND(0);
  1328. dpm_wait_for_subordinate(dev, async);
  1329. if (async_error) {
  1330. dev->power.direct_complete = false;
  1331. goto Complete;
  1332. }
  1333. /*
  1334. * Wait for possible runtime PM transitions of the device in progress
  1335. * to complete and if there's a runtime resume request pending for it,
  1336. * resume it before proceeding with invoking the system-wide suspend
  1337. * callbacks for it.
  1338. *
  1339. * If the system-wide suspend callbacks below change the configuration
  1340. * of the device, they must disable runtime PM for it or otherwise
  1341. * ensure that its runtime-resume callbacks will not be confused by that
  1342. * change in case they are invoked going forward.
  1343. */
  1344. pm_runtime_barrier(dev);
  1345. if (pm_wakeup_pending()) {
  1346. dev->power.direct_complete = false;
  1347. async_error = -EBUSY;
  1348. goto Complete;
  1349. }
  1350. if (dev->power.syscore)
  1351. goto Complete;
  1352. /* Avoid direct_complete to let wakeup_path propagate. */
  1353. if (device_may_wakeup(dev) || device_wakeup_path(dev))
  1354. dev->power.direct_complete = false;
  1355. if (dev->power.direct_complete) {
  1356. if (pm_runtime_status_suspended(dev)) {
  1357. pm_runtime_disable(dev);
  1358. if (pm_runtime_status_suspended(dev)) {
  1359. pm_dev_dbg(dev, state, "direct-complete ");
  1360. goto Complete;
  1361. }
  1362. pm_runtime_enable(dev);
  1363. }
  1364. dev->power.direct_complete = false;
  1365. }
  1366. dev->power.may_skip_resume = true;
  1367. dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
  1368. dpm_watchdog_set(&wd, dev);
  1369. device_lock(dev);
  1370. if (dev->pm_domain) {
  1371. info = "power domain ";
  1372. callback = pm_op(&dev->pm_domain->ops, state);
  1373. goto Run;
  1374. }
  1375. if (dev->type && dev->type->pm) {
  1376. info = "type ";
  1377. callback = pm_op(dev->type->pm, state);
  1378. goto Run;
  1379. }
  1380. if (dev->class && dev->class->pm) {
  1381. info = "class ";
  1382. callback = pm_op(dev->class->pm, state);
  1383. goto Run;
  1384. }
  1385. if (dev->bus) {
  1386. if (dev->bus->pm) {
  1387. info = "bus ";
  1388. callback = pm_op(dev->bus->pm, state);
  1389. } else if (dev->bus->suspend) {
  1390. pm_dev_dbg(dev, state, "legacy bus ");
  1391. error = legacy_suspend(dev, state, dev->bus->suspend,
  1392. "legacy bus ");
  1393. goto End;
  1394. }
  1395. }
  1396. Run:
  1397. if (!callback && dev->driver && dev->driver->pm) {
  1398. info = "driver ";
  1399. callback = pm_op(dev->driver->pm, state);
  1400. }
  1401. error = dpm_run_callback(callback, dev, state, info);
  1402. End:
  1403. if (!error) {
  1404. dev->power.is_suspended = true;
  1405. if (device_may_wakeup(dev))
  1406. dev->power.wakeup_path = true;
  1407. dpm_propagate_wakeup_to_parent(dev);
  1408. dpm_clear_superiors_direct_complete(dev);
  1409. } else {
  1410. log_suspend_abort_reason("Device %s failed to %s: error %d",
  1411. dev_name(dev), pm_verb(state.event), error);
  1412. }
  1413. device_unlock(dev);
  1414. dpm_watchdog_clear(&wd);
  1415. Complete:
  1416. if (error)
  1417. async_error = error;
  1418. complete_all(&dev->power.completion);
  1419. TRACE_SUSPEND(error);
  1420. return error;
  1421. }
  1422. static void async_suspend(void *data, async_cookie_t cookie)
  1423. {
  1424. struct device *dev = (struct device *)data;
  1425. int error;
  1426. error = __device_suspend(dev, pm_transition, true);
  1427. if (error) {
  1428. dpm_save_failed_dev(dev_name(dev));
  1429. pm_dev_err(dev, pm_transition, " async", error);
  1430. }
  1431. put_device(dev);
  1432. }
  1433. static int device_suspend(struct device *dev)
  1434. {
  1435. if (dpm_async_fn(dev, async_suspend))
  1436. return 0;
  1437. return __device_suspend(dev, pm_transition, false);
  1438. }
  1439. /**
  1440. * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
  1441. * @state: PM transition of the system being carried out.
  1442. */
  1443. int dpm_suspend(pm_message_t state)
  1444. {
  1445. ktime_t starttime = ktime_get();
  1446. int error = 0;
  1447. trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
  1448. might_sleep();
  1449. devfreq_suspend();
  1450. cpufreq_suspend();
  1451. mutex_lock(&dpm_list_mtx);
  1452. pm_transition = state;
  1453. async_error = 0;
  1454. while (!list_empty(&dpm_prepared_list)) {
  1455. struct device *dev = to_device(dpm_prepared_list.prev);
  1456. get_device(dev);
  1457. mutex_unlock(&dpm_list_mtx);
  1458. error = device_suspend(dev);
  1459. mutex_lock(&dpm_list_mtx);
  1460. if (error) {
  1461. pm_dev_err(dev, state, "", error);
  1462. dpm_save_failed_dev(dev_name(dev));
  1463. } else if (!list_empty(&dev->power.entry)) {
  1464. list_move(&dev->power.entry, &dpm_suspended_list);
  1465. }
  1466. mutex_unlock(&dpm_list_mtx);
  1467. put_device(dev);
  1468. mutex_lock(&dpm_list_mtx);
  1469. if (error || async_error)
  1470. break;
  1471. }
  1472. mutex_unlock(&dpm_list_mtx);
  1473. async_synchronize_full();
  1474. if (!error)
  1475. error = async_error;
  1476. if (error) {
  1477. suspend_stats.failed_suspend++;
  1478. dpm_save_failed_step(SUSPEND_SUSPEND);
  1479. }
  1480. dpm_show_time(starttime, state, error, NULL);
  1481. trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
  1482. return error;
  1483. }
  1484. /**
  1485. * device_prepare - Prepare a device for system power transition.
  1486. * @dev: Device to handle.
  1487. * @state: PM transition of the system being carried out.
  1488. *
  1489. * Execute the ->prepare() callback(s) for given device. No new children of the
  1490. * device may be registered after this function has returned.
  1491. */
  1492. static int device_prepare(struct device *dev, pm_message_t state)
  1493. {
  1494. int (*callback)(struct device *) = NULL;
  1495. int ret = 0;
  1496. /*
  1497. * If a device's parent goes into runtime suspend at the wrong time,
  1498. * it won't be possible to resume the device. To prevent this we
  1499. * block runtime suspend here, during the prepare phase, and allow
  1500. * it again during the complete phase.
  1501. */
  1502. pm_runtime_get_noresume(dev);
  1503. if (dev->power.syscore)
  1504. return 0;
  1505. device_lock(dev);
  1506. dev->power.wakeup_path = false;
  1507. if (dev->power.no_pm_callbacks)
  1508. goto unlock;
  1509. if (dev->pm_domain)
  1510. callback = dev->pm_domain->ops.prepare;
  1511. else if (dev->type && dev->type->pm)
  1512. callback = dev->type->pm->prepare;
  1513. else if (dev->class && dev->class->pm)
  1514. callback = dev->class->pm->prepare;
  1515. else if (dev->bus && dev->bus->pm)
  1516. callback = dev->bus->pm->prepare;
  1517. if (!callback && dev->driver && dev->driver->pm)
  1518. callback = dev->driver->pm->prepare;
  1519. if (callback)
  1520. ret = callback(dev);
  1521. unlock:
  1522. device_unlock(dev);
  1523. if (ret < 0) {
  1524. suspend_report_result(dev, callback, ret);
  1525. pm_runtime_put(dev);
  1526. return ret;
  1527. }
  1528. /*
  1529. * A positive return value from ->prepare() means "this device appears
  1530. * to be runtime-suspended and its state is fine, so if it really is
  1531. * runtime-suspended, you can leave it in that state provided that you
  1532. * will do the same thing with all of its descendants". This only
  1533. * applies to suspend transitions, however.
  1534. */
  1535. spin_lock_irq(&dev->power.lock);
  1536. dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
  1537. (ret > 0 || dev->power.no_pm_callbacks) &&
  1538. !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
  1539. spin_unlock_irq(&dev->power.lock);
  1540. return 0;
  1541. }
  1542. /**
  1543. * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
  1544. * @state: PM transition of the system being carried out.
  1545. *
  1546. * Execute the ->prepare() callback(s) for all devices.
  1547. */
  1548. int dpm_prepare(pm_message_t state)
  1549. {
  1550. int error = 0;
  1551. trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
  1552. might_sleep();
  1553. /*
  1554. * Give a chance for the known devices to complete their probes, before
  1555. * disable probing of devices. This sync point is important at least
  1556. * at boot time + hibernation restore.
  1557. */
  1558. wait_for_device_probe();
  1559. /*
  1560. * It is unsafe if probing of devices will happen during suspend or
  1561. * hibernation and system behavior will be unpredictable in this case.
  1562. * So, let's prohibit device's probing here and defer their probes
  1563. * instead. The normal behavior will be restored in dpm_complete().
  1564. */
  1565. device_block_probing();
  1566. mutex_lock(&dpm_list_mtx);
  1567. while (!list_empty(&dpm_list) && !error) {
  1568. struct device *dev = to_device(dpm_list.next);
  1569. get_device(dev);
  1570. mutex_unlock(&dpm_list_mtx);
  1571. trace_device_pm_callback_start(dev, "", state.event);
  1572. error = device_prepare(dev, state);
  1573. trace_device_pm_callback_end(dev, error);
  1574. mutex_lock(&dpm_list_mtx);
  1575. if (!error) {
  1576. dev->power.is_prepared = true;
  1577. if (!list_empty(&dev->power.entry))
  1578. list_move_tail(&dev->power.entry, &dpm_prepared_list);
  1579. } else if (error == -EAGAIN) {
  1580. error = 0;
  1581. } else {
  1582. dev_info(dev, "not prepared for power transition: code %d\n",
  1583. error);
  1584. log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
  1585. dev_name(dev), error);
  1586. dpm_save_failed_dev(dev_name(dev));
  1587. }
  1588. mutex_unlock(&dpm_list_mtx);
  1589. put_device(dev);
  1590. mutex_lock(&dpm_list_mtx);
  1591. }
  1592. mutex_unlock(&dpm_list_mtx);
  1593. trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
  1594. return error;
  1595. }
  1596. /**
  1597. * dpm_suspend_start - Prepare devices for PM transition and suspend them.
  1598. * @state: PM transition of the system being carried out.
  1599. *
  1600. * Prepare all non-sysdev devices for system PM transition and execute "suspend"
  1601. * callbacks for them.
  1602. */
  1603. int dpm_suspend_start(pm_message_t state)
  1604. {
  1605. ktime_t starttime = ktime_get();
  1606. int error;
  1607. error = dpm_prepare(state);
  1608. if (error) {
  1609. suspend_stats.failed_prepare++;
  1610. dpm_save_failed_step(SUSPEND_PREPARE);
  1611. } else
  1612. error = dpm_suspend(state);
  1613. dpm_show_time(starttime, state, error, "start");
  1614. return error;
  1615. }
  1616. EXPORT_SYMBOL_GPL(dpm_suspend_start);
  1617. void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
  1618. {
  1619. if (ret)
  1620. dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
  1621. }
  1622. EXPORT_SYMBOL_GPL(__suspend_report_result);
  1623. /**
  1624. * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
  1625. * @subordinate: Device that needs to wait for @dev.
  1626. * @dev: Device to wait for.
  1627. */
  1628. int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
  1629. {
  1630. dpm_wait(dev, subordinate->power.async_suspend);
  1631. return async_error;
  1632. }
  1633. EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
  1634. /**
  1635. * dpm_for_each_dev - device iterator.
  1636. * @data: data for the callback.
  1637. * @fn: function to be called for each device.
  1638. *
  1639. * Iterate over devices in dpm_list, and call @fn for each device,
  1640. * passing it @data.
  1641. */
  1642. void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
  1643. {
  1644. struct device *dev;
  1645. if (!fn)
  1646. return;
  1647. device_pm_lock();
  1648. list_for_each_entry(dev, &dpm_list, power.entry)
  1649. fn(dev, data);
  1650. device_pm_unlock();
  1651. }
  1652. EXPORT_SYMBOL_GPL(dpm_for_each_dev);
  1653. static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
  1654. {
  1655. if (!ops)
  1656. return true;
  1657. return !ops->prepare &&
  1658. !ops->suspend &&
  1659. !ops->suspend_late &&
  1660. !ops->suspend_noirq &&
  1661. !ops->resume_noirq &&
  1662. !ops->resume_early &&
  1663. !ops->resume &&
  1664. !ops->complete;
  1665. }
  1666. void device_pm_check_callbacks(struct device *dev)
  1667. {
  1668. unsigned long flags;
  1669. spin_lock_irqsave(&dev->power.lock, flags);
  1670. dev->power.no_pm_callbacks =
  1671. (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
  1672. !dev->bus->suspend && !dev->bus->resume)) &&
  1673. (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
  1674. (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
  1675. (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
  1676. (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
  1677. !dev->driver->suspend && !dev->driver->resume));
  1678. spin_unlock_irqrestore(&dev->power.lock, flags);
  1679. }
  1680. bool dev_pm_skip_suspend(struct device *dev)
  1681. {
  1682. return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
  1683. pm_runtime_status_suspended(dev);
  1684. }