hif_runtime_pm.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788
  1. /*
  2. * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  10. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  12. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  13. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/if_arp.h>
  18. #include "hif_io32.h"
  19. #include "hif_runtime_pm.h"
  20. #include "hif.h"
  21. #include "target_type.h"
  22. #include "hif_main.h"
  23. #include "ce_main.h"
  24. #include "ce_api.h"
  25. #include "ce_internal.h"
  26. #include "ce_reg.h"
  27. #include "ce_bmi.h"
  28. #include "regtable.h"
  29. #include "hif_hw_version.h"
  30. #include <linux/debugfs.h>
  31. #include <linux/seq_file.h>
  32. #include "qdf_status.h"
  33. #include "qdf_atomic.h"
  34. #include "pld_common.h"
  35. #include "mp_dev.h"
  36. #include "hif_debug.h"
  37. #include "ce_tasklet.h"
  38. #include "targaddrs.h"
  39. #include "hif_exec.h"
  40. #ifdef FEATURE_RUNTIME_PM
  41. /**
  42. * hif_pci_pm_runtime_enabled() - To check if Runtime PM is enabled
  43. * @scn: hif context
  44. *
  45. * This function will check if Runtime PM is enabled or not.
  46. *
  47. * Return: void
  48. */
  49. static bool hif_pci_pm_runtime_enabled(struct hif_softc *scn)
  50. {
  51. if (scn->hif_config.enable_runtime_pm)
  52. return true;
  53. return pm_runtime_enabled(hif_bus_get_dev(scn));
  54. }
  55. /**
  56. * hif_pm_runtime_state_to_string() - Mapping state into string
  57. * @state: runtime pm state
  58. *
  59. * This function will map the runtime pm state into corresponding
  60. * string for debug purpose.
  61. *
  62. * Return: pointer to the string
  63. */
  64. static const char *hif_pm_runtime_state_to_string(uint32_t state)
  65. {
  66. switch (state) {
  67. case HIF_PM_RUNTIME_STATE_NONE:
  68. return "INIT_STATE";
  69. case HIF_PM_RUNTIME_STATE_ON:
  70. return "ON";
  71. case HIF_PM_RUNTIME_STATE_RESUMING:
  72. return "RESUMING";
  73. case HIF_PM_RUNTIME_STATE_SUSPENDING:
  74. return "SUSPENDING";
  75. case HIF_PM_RUNTIME_STATE_SUSPENDED:
  76. return "SUSPENDED";
  77. default:
  78. return "INVALID STATE";
  79. }
  80. }
  81. #define HIF_PCI_RUNTIME_PM_STATS(_s, _rpm_ctx, _name) \
  82. seq_printf(_s, "%30s: %u\n", #_name, (_rpm_ctx)->pm_stats._name)
  83. /**
  84. * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
  85. * @hif_ctx: hif_softc context
  86. * @msg: log message
  87. *
  88. * log runtime pm stats when something seems off.
  89. *
  90. * Return: void
  91. */
  92. static void hif_pci_runtime_pm_warn(struct hif_softc *scn,
  93. const char *msg)
  94. {
  95. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  96. struct device *dev = hif_bus_get_dev(scn);
  97. struct hif_pm_runtime_lock *ctx;
  98. int i;
  99. hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
  100. msg, atomic_read(&dev->power.usage_count),
  101. hif_pm_runtime_state_to_string(
  102. atomic_read(&rpm_ctx->pm_state)),
  103. rpm_ctx->prevent_suspend_cnt);
  104. hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
  105. dev->power.runtime_status,
  106. dev->power.runtime_error,
  107. dev->power.disable_depth,
  108. dev->power.autosuspend_delay);
  109. hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u",
  110. qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get),
  111. qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put),
  112. rpm_ctx->pm_stats.request_resume);
  113. hif_nofl_debug("get put get-timestamp put-timestamp :DBGID_NAME");
  114. for (i = 0; i < RTPM_ID_MAX; i++) {
  115. hif_nofl_debug("%-10d %-10d 0x%-10llx 0x%-10llx :%-30s",
  116. qdf_atomic_read(
  117. &rpm_ctx->pm_stats.runtime_get_dbgid[i]),
  118. qdf_atomic_read(
  119. &rpm_ctx->pm_stats.runtime_put_dbgid[i]),
  120. rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i],
  121. rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i],
  122. rtpm_string_from_dbgid(i));
  123. }
  124. hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u",
  125. qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend),
  126. qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
  127. hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
  128. rpm_ctx->pm_stats.prevent_suspend_timeout,
  129. rpm_ctx->pm_stats.allow_suspend_timeout);
  130. hif_nofl_debug("Suspended: %u, resumed: %u count",
  131. rpm_ctx->pm_stats.suspended,
  132. rpm_ctx->pm_stats.resumed);
  133. hif_nofl_debug("suspend_err: %u, runtime_get_err: %u",
  134. rpm_ctx->pm_stats.suspend_err,
  135. rpm_ctx->pm_stats.runtime_get_err);
  136. hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: ");
  137. list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
  138. hif_nofl_debug("source %s; timeout %d ms",
  139. ctx->name, ctx->timeout);
  140. }
  141. if (qdf_is_fw_down()) {
  142. hif_err("fw is down");
  143. return;
  144. }
  145. QDF_DEBUG_PANIC("hif_pci_runtime_pm_warn");
  146. }
  147. /**
  148. * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
  149. * @s: file to print to
  150. * @data: unused
  151. *
  152. * debugging tool added to the debug fs for displaying runtimepm stats
  153. *
  154. * Return: 0
  155. */
  156. static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
  157. {
  158. struct hif_softc *scn = s->private;
  159. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  160. struct device *dev = hif_bus_get_dev(scn);
  161. static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
  162. "SUSPENDING", "SUSPENDED"};
  163. unsigned int msecs_age;
  164. qdf_time_t usecs_age;
  165. int pm_state = atomic_read(&rpm_ctx->pm_state);
  166. unsigned long timer_expires;
  167. struct hif_pm_runtime_lock *ctx;
  168. int i;
  169. seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
  170. seq_printf(s, "%30s: %ps\n", "Last Resume Caller",
  171. rpm_ctx->pm_stats.last_resume_caller);
  172. seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
  173. rpm_ctx->pm_stats.last_busy_marker);
  174. usecs_age = qdf_get_log_timestamp_usecs() -
  175. rpm_ctx->pm_stats.last_busy_timestamp;
  176. seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
  177. rpm_ctx->pm_stats.last_busy_timestamp / 1000000,
  178. rpm_ctx->pm_stats.last_busy_timestamp % 1000000);
  179. seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
  180. usecs_age / 1000000, usecs_age % 1000000);
  181. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
  182. msecs_age = jiffies_to_msecs(jiffies -
  183. rpm_ctx->pm_stats.suspend_jiffies);
  184. seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
  185. msecs_age / 1000, msecs_age % 1000);
  186. }
  187. seq_printf(s, "%30s: %d\n", "PM Usage count",
  188. atomic_read(&dev->power.usage_count));
  189. seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
  190. rpm_ctx->prevent_suspend_cnt);
  191. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspended);
  192. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspend_err);
  193. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, resumed);
  194. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, request_resume);
  195. seq_printf(s, "%30s: %u\n", "prevent_suspend",
  196. qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
  197. seq_printf(s, "%30s: %u\n", "allow_suspend",
  198. qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend));
  199. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, prevent_suspend_timeout);
  200. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, allow_suspend_timeout);
  201. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, runtime_get_err);
  202. seq_printf(s, "%30s: %u\n", "runtime_get",
  203. qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get));
  204. seq_printf(s, "%30s: %u\n", "runtime_put",
  205. qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put));
  206. seq_puts(s, "get put get-timestamp put-timestamp :DBGID_NAME\n");
  207. for (i = 0; i < RTPM_ID_MAX; i++) {
  208. seq_printf(s, "%-10d ",
  209. qdf_atomic_read(
  210. &rpm_ctx->pm_stats.runtime_get_dbgid[i]));
  211. seq_printf(s, "%-10d ",
  212. qdf_atomic_read(
  213. &rpm_ctx->pm_stats.runtime_put_dbgid[i]));
  214. seq_printf(s, "0x%-10llx ",
  215. rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i]);
  216. seq_printf(s, "0x%-10llx ",
  217. rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i]);
  218. seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i));
  219. }
  220. timer_expires = rpm_ctx->runtime_timer_expires;
  221. if (timer_expires > 0) {
  222. msecs_age = jiffies_to_msecs(timer_expires - jiffies);
  223. seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
  224. msecs_age / 1000, msecs_age % 1000);
  225. }
  226. spin_lock_bh(&rpm_ctx->runtime_lock);
  227. if (list_empty(&rpm_ctx->prevent_suspend_list)) {
  228. spin_unlock_bh(&rpm_ctx->runtime_lock);
  229. return 0;
  230. }
  231. seq_printf(s, "%30s: ", "Active Wakeup_Sources");
  232. list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
  233. seq_printf(s, "%s", ctx->name);
  234. if (ctx->timeout)
  235. seq_printf(s, "(%d ms)", ctx->timeout);
  236. seq_puts(s, " ");
  237. }
  238. seq_puts(s, "\n");
  239. spin_unlock_bh(&rpm_ctx->runtime_lock);
  240. return 0;
  241. }
  242. #undef HIF_PCI_RUNTIME_PM_STATS
  243. /**
  244. * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
  245. * @inode
  246. * @file
  247. *
  248. * Return: linux error code of single_open.
  249. */
  250. static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
  251. {
  252. return single_open(file, hif_pci_pm_runtime_debugfs_show,
  253. inode->i_private);
  254. }
  255. static const struct file_operations hif_pci_runtime_pm_fops = {
  256. .owner = THIS_MODULE,
  257. .open = hif_pci_runtime_pm_open,
  258. .release = single_release,
  259. .read = seq_read,
  260. .llseek = seq_lseek,
  261. };
  262. /**
  263. * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
  264. * @scn: hif context
  265. *
  266. * creates a debugfs entry to debug the runtime pm feature.
  267. */
  268. static void hif_runtime_pm_debugfs_create(struct hif_softc *scn)
  269. {
  270. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  271. rpm_ctx->pm_dentry = debugfs_create_file("cnss_runtime_pm",
  272. 0400, NULL, scn,
  273. &hif_pci_runtime_pm_fops);
  274. }
  275. /**
  276. * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
  277. * @sc: pci context
  278. *
  279. * removes the debugfs entry to debug the runtime pm feature.
  280. */
  281. static void hif_runtime_pm_debugfs_remove(struct hif_softc *scn)
  282. {
  283. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  284. debugfs_remove(rpm_ctx->pm_dentry);
  285. }
  286. /**
  287. * hif_runtime_init() - Initialize Runtime PM
  288. * @dev: device structure
  289. * @delay: delay to be confgured for auto suspend
  290. *
  291. * This function will init all the Runtime PM config.
  292. *
  293. * Return: void
  294. */
  295. static void hif_runtime_init(struct device *dev, int delay)
  296. {
  297. pm_runtime_set_autosuspend_delay(dev, delay);
  298. pm_runtime_use_autosuspend(dev);
  299. pm_runtime_allow(dev);
  300. pm_runtime_mark_last_busy(dev);
  301. pm_runtime_put_noidle(dev);
  302. pm_suspend_ignore_children(dev, true);
  303. }
  304. /**
  305. * hif_runtime_exit() - Deinit/Exit Runtime PM
  306. * @dev: device structure
  307. *
  308. * This function will deinit all the Runtime PM config.
  309. *
  310. * Return: void
  311. */
  312. static void hif_runtime_exit(struct device *dev)
  313. {
  314. pm_runtime_get_noresume(dev);
  315. pm_runtime_set_active(dev);
  316. /* Symmetric call to make sure default usage count == 2 */
  317. pm_runtime_forbid(dev);
  318. }
  319. static void hif_pm_runtime_lock_timeout_fn(void *data);
  320. /**
  321. * hif_pm_runtime_start(): start the runtime pm
  322. * @scn: hif context
  323. *
  324. * After this call, runtime pm will be active.
  325. */
  326. void hif_pm_runtime_start(struct hif_softc *scn)
  327. {
  328. uint32_t mode = hif_get_conparam(scn);
  329. struct device *dev = hif_bus_get_dev(scn);
  330. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  331. if (!scn->hif_config.enable_runtime_pm) {
  332. HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
  333. return;
  334. }
  335. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  336. mode == QDF_GLOBAL_MONITOR_MODE) {
  337. HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
  338. __func__);
  339. return;
  340. }
  341. qdf_timer_init(NULL, &rpm_ctx->runtime_timer,
  342. hif_pm_runtime_lock_timeout_fn,
  343. scn, QDF_TIMER_TYPE_WAKE_APPS);
  344. HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
  345. scn->hif_config.runtime_pm_delay);
  346. qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_ON);
  347. hif_runtime_init(dev, scn->hif_config.runtime_pm_delay);
  348. hif_runtime_pm_debugfs_create(scn);
  349. }
  350. /**
  351. * hif_pm_runtime_stop(): stop runtime pm
  352. * @scn: hif context
  353. *
  354. * Turns off runtime pm and frees corresponding resources
  355. * that were acquired by hif_runtime_pm_start().
  356. */
  357. void hif_pm_runtime_stop(struct hif_softc *scn)
  358. {
  359. uint32_t mode = hif_get_conparam(scn);
  360. struct device *dev = hif_bus_get_dev(scn);
  361. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  362. if (!scn->hif_config.enable_runtime_pm)
  363. return;
  364. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  365. mode == QDF_GLOBAL_MONITOR_MODE)
  366. return;
  367. hif_runtime_exit(dev);
  368. hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(scn));
  369. qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  370. hif_runtime_pm_debugfs_remove(scn);
  371. qdf_timer_free(&rpm_ctx->runtime_timer);
  372. }
  373. /**
  374. * hif_pm_runtime_open(): initialize runtime pm
  375. * @scn: hif ctx
  376. *
  377. * Early initialization
  378. */
  379. void hif_pm_runtime_open(struct hif_softc *scn)
  380. {
  381. int i;
  382. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  383. spin_lock_init(&rpm_ctx->runtime_lock);
  384. qdf_atomic_init(&rpm_ctx->pm_state);
  385. hif_runtime_lock_init(&rpm_ctx->prevent_linkdown_lock,
  386. "prevent_linkdown_lock");
  387. qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  388. qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get);
  389. qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put);
  390. qdf_atomic_init(&rpm_ctx->pm_stats.allow_suspend);
  391. qdf_atomic_init(&rpm_ctx->pm_stats.prevent_suspend);
  392. for (i = 0; i < RTPM_ID_MAX; i++) {
  393. qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
  394. qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
  395. }
  396. INIT_LIST_HEAD(&rpm_ctx->prevent_suspend_list);
  397. }
  398. /**
  399. * hif_check_for_get_put_out_of_sync() - Check if Get/Put is out of sync
  400. * @scn: hif context
  401. *
  402. * This function will check if get and put are out of sync or not.
  403. *
  404. * Return: void
  405. */
  406. static void hif_check_for_get_put_out_of_sync(struct hif_softc *scn)
  407. {
  408. int32_t i;
  409. int32_t get_count, put_count;
  410. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  411. if (qdf_is_fw_down())
  412. return;
  413. for (i = 0; i < RTPM_ID_MAX; i++) {
  414. get_count = qdf_atomic_read(
  415. &rpm_ctx->pm_stats.runtime_get_dbgid[i]);
  416. put_count = qdf_atomic_read(
  417. &rpm_ctx->pm_stats.runtime_put_dbgid[i]);
  418. if (get_count != put_count) {
  419. QDF_DEBUG_PANIC("%s get-put out of sync. get %d put %d",
  420. rtpm_string_from_dbgid(i),
  421. get_count, put_count);
  422. }
  423. }
  424. }
  425. /**
  426. * hif_pm_runtime_sanitize_on_exit(): sanitize runtime PM gets/puts from driver
  427. * @scn: hif context
  428. *
  429. * Ensure all gets/puts are in sync before exiting runtime PM feature.
  430. * Also make sure all runtime PM locks are deinitialized properly.
  431. *
  432. * Return: void
  433. */
  434. static void hif_pm_runtime_sanitize_on_exit(struct hif_softc *scn)
  435. {
  436. struct hif_pm_runtime_lock *ctx, *tmp;
  437. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  438. hif_check_for_get_put_out_of_sync(scn);
  439. spin_lock_bh(&rpm_ctx->runtime_lock);
  440. list_for_each_entry_safe(ctx, tmp,
  441. &rpm_ctx->prevent_suspend_list, list) {
  442. spin_unlock_bh(&rpm_ctx->runtime_lock);
  443. hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(scn), ctx);
  444. spin_lock_bh(&rpm_ctx->runtime_lock);
  445. }
  446. spin_unlock_bh(&rpm_ctx->runtime_lock);
  447. }
  448. static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
  449. struct hif_pm_runtime_lock *lock);
  450. /**
  451. * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
  452. * @scn: hif context
  453. *
  454. * API is used to empty the runtime pm prevent suspend list.
  455. *
  456. * Return: void
  457. */
  458. static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_softc *scn)
  459. {
  460. struct hif_pm_runtime_lock *ctx, *tmp;
  461. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  462. spin_lock_bh(&rpm_ctx->runtime_lock);
  463. list_for_each_entry_safe(ctx, tmp,
  464. &rpm_ctx->prevent_suspend_list, list) {
  465. __hif_pm_runtime_allow_suspend(scn, ctx);
  466. }
  467. spin_unlock_bh(&rpm_ctx->runtime_lock);
  468. }
  469. /**
  470. * hif_pm_runtime_close(): close runtime pm
  471. * @scn: hif ctx
  472. *
  473. * ensure runtime_pm is stopped before closing the driver
  474. */
  475. void hif_pm_runtime_close(struct hif_softc *scn)
  476. {
  477. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  478. struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
  479. /*
  480. * Here cds hif context was already NULL,
  481. * so calling hif_runtime_lock_deinit, instead of
  482. * qdf_runtime_lock_deinit(&rpm_ctx->prevent_linkdown_lock);
  483. */
  484. hif_runtime_lock_deinit(hif_ctx, rpm_ctx->prevent_linkdown_lock.lock);
  485. hif_is_recovery_in_progress(scn) ?
  486. hif_pm_runtime_sanitize_on_ssr_exit(scn) :
  487. hif_pm_runtime_sanitize_on_exit(scn);
  488. }
  489. /**
  490. * hif_pm_runtime_sync_resume() - Invoke synchronous runtime resume.
  491. * @hif_ctx: hif context
  492. *
  493. * This function will invoke synchronous runtime resume.
  494. *
  495. * Return: status
  496. */
  497. int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
  498. {
  499. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  500. struct hif_runtime_pm_ctx *rpm_ctx;
  501. int pm_state;
  502. if (!scn)
  503. return -EINVAL;
  504. if (!hif_pci_pm_runtime_enabled(scn))
  505. return 0;
  506. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  507. pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
  508. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  509. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  510. HIF_INFO("Runtime PM resume is requested by %ps",
  511. (void *)_RET_IP_);
  512. rpm_ctx->pm_stats.request_resume++;
  513. rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
  514. return pm_runtime_resume(hif_bus_get_dev(scn));
  515. }
  516. /**
  517. * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
  518. * @scn: hif context
  519. * @flag: prevent linkdown if true otherwise allow
  520. *
  521. * this api should only be called as part of bus prevent linkdown
  522. */
  523. void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  524. {
  525. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  526. if (flag)
  527. qdf_runtime_pm_prevent_suspend(&rpm_ctx->prevent_linkdown_lock);
  528. else
  529. qdf_runtime_pm_allow_suspend(&rpm_ctx->prevent_linkdown_lock);
  530. }
  531. /**
  532. * __hif_runtime_pm_set_state(): utility function
  533. * @state: state to set
  534. *
  535. * indexes into the runtime pm state and sets it.
  536. */
  537. static void __hif_runtime_pm_set_state(struct hif_softc *scn,
  538. enum hif_pm_runtime_state state)
  539. {
  540. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  541. if (!rpm_ctx) {
  542. HIF_ERROR("%s: HIF_CTX not initialized",
  543. __func__);
  544. return;
  545. }
  546. qdf_atomic_set(&rpm_ctx->pm_state, state);
  547. }
  548. /**
  549. * hif_runtime_pm_set_state_on(): adjust runtime pm state
  550. *
  551. * Notify hif that a the runtime pm state should be on
  552. */
  553. static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
  554. {
  555. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
  556. }
  557. /**
  558. * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
  559. *
  560. * Notify hif that a runtime pm resuming has started
  561. */
  562. static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
  563. {
  564. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
  565. }
  566. /**
  567. * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
  568. *
  569. * Notify hif that a runtime pm suspend has started
  570. */
  571. static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
  572. {
  573. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
  574. }
  575. /**
  576. * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
  577. *
  578. * Notify hif that a runtime suspend attempt has been completed successfully
  579. */
  580. static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
  581. {
  582. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
  583. }
  584. /**
  585. * hif_log_runtime_suspend_success() - log a successful runtime suspend
  586. */
  587. static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
  588. {
  589. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  590. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  591. if (!rpm_ctx)
  592. return;
  593. rpm_ctx->pm_stats.suspended++;
  594. rpm_ctx->pm_stats.suspend_jiffies = jiffies;
  595. }
  596. /**
  597. * hif_log_runtime_suspend_failure() - log a failed runtime suspend
  598. *
  599. * log a failed runtime suspend
  600. * mark last busy to prevent immediate runtime suspend
  601. */
  602. static void hif_log_runtime_suspend_failure(void *hif_ctx)
  603. {
  604. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  605. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  606. if (!rpm_ctx)
  607. return;
  608. rpm_ctx->pm_stats.suspend_err++;
  609. }
  610. /**
  611. * hif_log_runtime_resume_success() - log a successful runtime resume
  612. *
  613. * log a successful runtime resume
  614. * mark last busy to prevent immediate runtime suspend
  615. */
  616. static void hif_log_runtime_resume_success(void *hif_ctx)
  617. {
  618. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  619. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  620. if (!rpm_ctx)
  621. return;
  622. rpm_ctx->pm_stats.resumed++;
  623. }
  624. /**
  625. * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
  626. *
  627. * Record the failure.
  628. * mark last busy to delay a retry.
  629. * adjust the runtime_pm state.
  630. */
  631. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
  632. {
  633. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  634. hif_log_runtime_suspend_failure(hif_ctx);
  635. hif_pm_runtime_mark_last_busy(hif_ctx);
  636. hif_runtime_pm_set_state_on(scn);
  637. }
  638. /**
  639. * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
  640. *
  641. * Makes sure that the pci link will be taken down by the suspend opperation.
  642. * If the hif layer is configured to leave the bus on, runtime suspend will
  643. * not save any power.
  644. *
  645. * Set the runtime suspend state to in progress.
  646. *
  647. * return -EINVAL if the bus won't go down. otherwise return 0
  648. */
  649. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  650. {
  651. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  652. if (!hif_can_suspend_link(hif_ctx)) {
  653. HIF_ERROR("Runtime PM not supported for link up suspend");
  654. return -EINVAL;
  655. }
  656. hif_runtime_pm_set_state_suspending(scn);
  657. return 0;
  658. }
  659. /**
  660. * hif_process_runtime_suspend_success() - bookkeeping of suspend success
  661. *
  662. * Record the success.
  663. * adjust the runtime_pm state
  664. */
  665. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
  666. {
  667. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  668. hif_runtime_pm_set_state_suspended(scn);
  669. hif_log_runtime_suspend_success(scn);
  670. }
  671. /**
  672. * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
  673. *
  674. * update the runtime pm state.
  675. */
  676. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
  677. {
  678. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  679. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  680. hif_runtime_pm_set_state_resuming(scn);
  681. }
  682. /**
  683. * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
  684. *
  685. * record the success.
  686. * adjust the runtime_pm state
  687. */
  688. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
  689. {
  690. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  691. hif_log_runtime_resume_success(hif_ctx);
  692. hif_pm_runtime_mark_last_busy(hif_ctx);
  693. hif_runtime_pm_set_state_on(scn);
  694. }
  695. /**
  696. * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
  697. *
  698. * Return: 0 for success and non-zero error code for failure
  699. */
  700. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  701. {
  702. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  703. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  704. int errno;
  705. errno = hif_bus_suspend(hif_ctx);
  706. if (errno) {
  707. HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
  708. return errno;
  709. }
  710. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
  711. errno = hif_bus_suspend_noirq(hif_ctx);
  712. if (errno) {
  713. HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
  714. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  715. goto bus_resume;
  716. }
  717. qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 0);
  718. return 0;
  719. bus_resume:
  720. QDF_BUG(!hif_bus_resume(hif_ctx));
  721. return errno;
  722. }
  723. /**
  724. * hif_fastpath_resume() - resume fastpath for runtimepm
  725. *
  726. * ensure that the fastpath write index register is up to date
  727. * since runtime pm may cause ce_send_fast to skip the register
  728. * write.
  729. *
  730. * fastpath only applicable to legacy copy engine
  731. */
  732. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
  733. {
  734. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  735. struct CE_state *ce_state;
  736. if (!scn)
  737. return;
  738. if (scn->fastpath_mode_on) {
  739. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  740. return;
  741. ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
  742. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  743. /*war_ce_src_ring_write_idx_set */
  744. CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
  745. ce_state->src_ring->write_index);
  746. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  747. Q_TARGET_ACCESS_END(scn);
  748. }
  749. }
  750. /**
  751. * hif_runtime_resume() - do the bus resume part of a runtime resume
  752. *
  753. * Return: 0 for success and non-zero error code for failure
  754. */
  755. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
  756. {
  757. QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
  758. QDF_BUG(!hif_bus_resume(hif_ctx));
  759. return 0;
  760. }
  761. /**
  762. * hif_pm_stats_runtime_get_record() - record runtime get statistics
  763. * @scn: hif context
  764. * @rtpm_dbgid: debug id to trace who use it
  765. *
  766. *
  767. * Return: void
  768. */
  769. static void hif_pm_stats_runtime_get_record(struct hif_softc *scn,
  770. wlan_rtpm_dbgid rtpm_dbgid)
  771. {
  772. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  773. if (rtpm_dbgid >= RTPM_ID_MAX) {
  774. QDF_BUG(0);
  775. return;
  776. }
  777. qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get);
  778. qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get_dbgid[rtpm_dbgid]);
  779. rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] =
  780. qdf_get_log_timestamp();
  781. }
  782. /**
  783. * hif_pm_stats_runtime_put_record() - record runtime put statistics
  784. * @scn: hif context
  785. * @rtpm_dbgid: dbg_id to trace who use it
  786. *
  787. *
  788. * Return: void
  789. */
  790. static void hif_pm_stats_runtime_put_record(struct hif_softc *scn,
  791. wlan_rtpm_dbgid rtpm_dbgid)
  792. {
  793. struct device *dev = hif_bus_get_dev(scn);
  794. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  795. if (rtpm_dbgid >= RTPM_ID_MAX) {
  796. QDF_BUG(0);
  797. return;
  798. }
  799. if (atomic_read(&dev->power.usage_count) <= 0) {
  800. QDF_BUG(0);
  801. return;
  802. }
  803. qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put);
  804. qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put_dbgid[rtpm_dbgid]);
  805. rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] =
  806. qdf_get_log_timestamp();
  807. }
  808. /**
  809. * hif_pm_runtime_get_sync() - do a get operation with sync resume
  810. * @hif_ctx: pointer of HIF context
  811. * @rtpm_dbgid: dbgid to trace who use it
  812. *
  813. * A get operation will prevent a runtime suspend until a corresponding
  814. * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
  815. * resume instead of requesting a resume if it is runtime PM suspended
  816. * so it can only be called in non-atomic context.
  817. *
  818. * Return: 0 if it is runtime PM resumed otherwise an error code.
  819. */
  820. int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
  821. wlan_rtpm_dbgid rtpm_dbgid)
  822. {
  823. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  824. struct device *dev = hif_bus_get_dev(scn);
  825. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  826. int pm_state;
  827. int ret;
  828. if (!rpm_ctx)
  829. return -EINVAL;
  830. if (!hif_pci_pm_runtime_enabled(scn))
  831. return 0;
  832. pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
  833. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  834. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  835. hif_info_high("Runtime PM resume is requested by %ps",
  836. (void *)_RET_IP_);
  837. hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
  838. ret = pm_runtime_get_sync(dev);
  839. /* Get can return 1 if the device is already active, just return
  840. * success in that case.
  841. */
  842. if (ret > 0)
  843. ret = 0;
  844. if (ret) {
  845. rpm_ctx->pm_stats.runtime_get_err++;
  846. hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
  847. qdf_atomic_read(&rpm_ctx->pm_state), ret);
  848. hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
  849. }
  850. return ret;
  851. }
  852. /**
  853. * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
  854. * @hif_ctx: pointer of HIF context
  855. * @rtpm_dbgid: dbgid to trace who use it
  856. *
  857. * This API will do a runtime put operation followed by a sync suspend if usage
  858. * count is 0 so it can only be called in non-atomic context.
  859. *
  860. * Return: 0 for success otherwise an error code
  861. */
  862. int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
  863. wlan_rtpm_dbgid rtpm_dbgid)
  864. {
  865. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  866. struct device *dev;
  867. int usage_count;
  868. char *err = NULL;
  869. if (!scn)
  870. return -EINVAL;
  871. if (!hif_pci_pm_runtime_enabled(scn))
  872. return 0;
  873. dev = hif_bus_get_dev(scn);
  874. usage_count = atomic_read(&dev->power.usage_count);
  875. if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
  876. err = "Uexpected PUT when runtime PM is disabled";
  877. else if (usage_count == 0)
  878. err = "PUT without a GET Operation";
  879. if (err) {
  880. hif_pci_runtime_pm_warn(scn, err);
  881. return -EINVAL;
  882. }
  883. hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
  884. return pm_runtime_put_sync_suspend(dev);
  885. }
  886. /**
  887. * hif_pm_runtime_request_resume() - Invoke async runtime resume
  888. * @hif_ctx: hif context
  889. *
  890. * This function will invoke asynchronous runtime resume.
  891. *
  892. * Return: status
  893. */
  894. int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
  895. {
  896. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  897. struct hif_runtime_pm_ctx *rpm_ctx;
  898. int pm_state;
  899. if (!scn)
  900. return -EINVAL;
  901. if (!hif_pci_pm_runtime_enabled(scn))
  902. return 0;
  903. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  904. pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
  905. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  906. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  907. HIF_INFO("Runtime PM resume is requested by %ps",
  908. (void *)_RET_IP_);
  909. rpm_ctx->pm_stats.request_resume++;
  910. rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
  911. return hif_pm_request_resume(hif_bus_get_dev(scn));
  912. }
  913. /**
  914. * hif_pm_runtime_mark_last_busy() - Mark last busy time
  915. * @hif_ctx: hif context
  916. *
  917. * This function will mark the last busy time, this will be used
  918. * to check if auto suspend delay expired or not.
  919. *
  920. * Return: void
  921. */
  922. void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
  923. {
  924. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  925. struct hif_runtime_pm_ctx *rpm_ctx;
  926. if (!scn)
  927. return;
  928. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  929. rpm_ctx->pm_stats.last_busy_marker = (void *)_RET_IP_;
  930. rpm_ctx->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
  931. pm_runtime_mark_last_busy(hif_bus_get_dev(scn));
  932. return;
  933. }
  934. /**
  935. * hif_pm_runtime_get_noresume() - Inc usage count without resume
  936. * @hif_ctx: hif context
  937. * rtpm_dbgid: Id of the module calling get
  938. *
  939. * This function will increment device usage count to avoid runtime
  940. * suspend, but it would not do resume.
  941. *
  942. * Return: void
  943. */
  944. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
  945. wlan_rtpm_dbgid rtpm_dbgid)
  946. {
  947. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  948. if (!scn)
  949. return;
  950. if (!hif_pci_pm_runtime_enabled(scn))
  951. return;
  952. hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
  953. pm_runtime_get_noresume(hif_bus_get_dev(scn));
  954. }
  955. /**
  956. * hif_pm_runtime_get() - do a get opperation on the device
  957. * @hif_ctx: pointer of HIF context
  958. * @rtpm_dbgid: dbgid to trace who use it
  959. *
  960. * A get opperation will prevent a runtime suspend until a
  961. * corresponding put is done. This api should be used when sending
  962. * data.
  963. *
  964. * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
  965. * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
  966. *
  967. * return: success if the bus is up and a get has been issued
  968. * otherwise an error code.
  969. */
  970. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
  971. wlan_rtpm_dbgid rtpm_dbgid)
  972. {
  973. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  974. struct hif_runtime_pm_ctx *rpm_ctx;
  975. struct device *dev;
  976. int ret;
  977. int pm_state;
  978. if (!scn) {
  979. hif_err("Could not do runtime get, scn is null");
  980. return -EFAULT;
  981. }
  982. if (!hif_pci_pm_runtime_enabled(scn))
  983. return 0;
  984. dev = hif_bus_get_dev(scn);
  985. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  986. pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
  987. if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
  988. pm_state == HIF_PM_RUNTIME_STATE_NONE) {
  989. hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
  990. ret = __hif_pm_runtime_get(dev);
  991. /* Get can return 1 if the device is already active, just return
  992. * success in that case
  993. */
  994. if (ret > 0)
  995. ret = 0;
  996. if (ret)
  997. hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
  998. if (ret && ret != -EINPROGRESS) {
  999. rpm_ctx->pm_stats.runtime_get_err++;
  1000. hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
  1001. qdf_atomic_read(&rpm_ctx->pm_state), ret);
  1002. }
  1003. return ret;
  1004. }
  1005. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  1006. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
  1007. hif_info_high("Runtime PM resume is requested by %ps",
  1008. (void *)_RET_IP_);
  1009. ret = -EAGAIN;
  1010. } else {
  1011. ret = -EBUSY;
  1012. }
  1013. rpm_ctx->pm_stats.request_resume++;
  1014. rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
  1015. hif_pm_request_resume(dev);
  1016. return ret;
  1017. }
  1018. /**
  1019. * hif_pm_runtime_put() - do a put operation on the device
  1020. * @hif_ctx: pointer of HIF context
  1021. * @rtpm_dbgid: dbgid to trace who use it
  1022. *
  1023. * A put operation will allow a runtime suspend after a corresponding
  1024. * get was done. This api should be used when sending data.
  1025. *
  1026. * This api will return a failure if runtime pm is stopped
  1027. * This api will return failure if it would decrement the usage count below 0.
  1028. *
  1029. * return: QDF_STATUS_SUCCESS if the put is performed
  1030. */
  1031. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
  1032. wlan_rtpm_dbgid rtpm_dbgid)
  1033. {
  1034. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1035. struct device *dev;
  1036. int usage_count;
  1037. char *error = NULL;
  1038. if (!scn) {
  1039. HIF_ERROR("%s: Could not do runtime put, scn is null",
  1040. __func__);
  1041. return -EFAULT;
  1042. }
  1043. if (!hif_pci_pm_runtime_enabled(scn))
  1044. return 0;
  1045. dev = hif_bus_get_dev(scn);
  1046. usage_count = atomic_read(&dev->power.usage_count);
  1047. if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
  1048. error = "Unexpected PUT when runtime PM is disabled";
  1049. else if (usage_count == 0)
  1050. error = "PUT without a GET operation";
  1051. if (error) {
  1052. hif_pci_runtime_pm_warn(scn, error);
  1053. return -EINVAL;
  1054. }
  1055. hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
  1056. hif_pm_runtime_mark_last_busy(hif_ctx);
  1057. hif_pm_runtime_put_auto(dev);
  1058. return 0;
  1059. }
  1060. /**
  1061. * hif_pm_runtime_put_noidle() - do a put operation with no idle
  1062. * @hif_ctx: pointer of HIF context
  1063. * @rtpm_dbgid: dbgid to trace who use it
  1064. *
  1065. * This API will do a runtime put no idle operation
  1066. *
  1067. * Return: 0 for success otherwise an error code
  1068. */
  1069. int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
  1070. wlan_rtpm_dbgid rtpm_dbgid)
  1071. {
  1072. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1073. struct device *dev;
  1074. int usage_count;
  1075. char *err = NULL;
  1076. if (!scn)
  1077. return -EINVAL;
  1078. if (!hif_pci_pm_runtime_enabled(scn))
  1079. return 0;
  1080. dev = hif_bus_get_dev(scn);
  1081. usage_count = atomic_read(&dev->power.usage_count);
  1082. if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
  1083. err = "Unexpected PUT when runtime PM is disabled";
  1084. else if (usage_count == 0)
  1085. err = "PUT without a GET operation";
  1086. if (err) {
  1087. hif_pci_runtime_pm_warn(scn, err);
  1088. return -EINVAL;
  1089. }
  1090. hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
  1091. pm_runtime_put_noidle(dev);
  1092. return 0;
  1093. }
  1094. /**
  1095. * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
  1096. * reason
  1097. * @scn: hif context
  1098. * @lock: runtime_pm lock being acquired
  1099. *
  1100. * Return 0 if successful.
  1101. */
  1102. static int __hif_pm_runtime_prevent_suspend(struct hif_softc *scn,
  1103. struct hif_pm_runtime_lock *lock)
  1104. {
  1105. struct device *dev = hif_bus_get_dev(scn);
  1106. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1107. int ret = 0;
  1108. /*
  1109. * We shouldn't be setting context->timeout to zero here when
  1110. * context is active as we will have a case where Timeout API's
  1111. * for the same context called back to back.
  1112. * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
  1113. * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
  1114. * API to ensure the timeout version is no more active and
  1115. * list entry of this context will be deleted during allow suspend.
  1116. */
  1117. if (lock->active)
  1118. return 0;
  1119. ret = __hif_pm_runtime_get(dev);
  1120. /**
  1121. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  1122. * RPM_SUSPENDING. Any other negative value is an error.
  1123. * We shouldn't be do runtime_put here as in later point allow
  1124. * suspend gets called with the the context and there the usage count
  1125. * is decremented, so suspend will be prevented.
  1126. */
  1127. if (ret < 0 && ret != -EINPROGRESS) {
  1128. rpm_ctx->pm_stats.runtime_get_err++;
  1129. hif_pci_runtime_pm_warn(scn,
  1130. "Prevent Suspend Runtime PM Error");
  1131. }
  1132. rpm_ctx->prevent_suspend_cnt++;
  1133. lock->active = true;
  1134. list_add_tail(&lock->list, &rpm_ctx->prevent_suspend_list);
  1135. qdf_atomic_inc(&rpm_ctx->pm_stats.prevent_suspend);
  1136. hif_debug("%s: in pm_state:%s ret: %d", __func__,
  1137. hif_pm_runtime_state_to_string(
  1138. qdf_atomic_read(&rpm_ctx->pm_state)),
  1139. ret);
  1140. return ret;
  1141. }
  1142. /**
  1143. * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
  1144. * @scn: hif context
  1145. * @lock: runtime pm lock
  1146. *
  1147. * This function will allow runtime suspend, by decrementing
  1148. * device's usage count.
  1149. *
  1150. * Return: status
  1151. */
  1152. static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
  1153. struct hif_pm_runtime_lock *lock)
  1154. {
  1155. struct device *dev = hif_bus_get_dev(scn);
  1156. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1157. int ret = 0;
  1158. int usage_count;
  1159. if (rpm_ctx->prevent_suspend_cnt == 0)
  1160. return ret;
  1161. if (!lock->active)
  1162. return ret;
  1163. usage_count = atomic_read(&dev->power.usage_count);
  1164. /*
  1165. * For runtime PM enabled case, the usage count should never be 0
  1166. * at this point. For runtime PM disabled case, it should never be
  1167. * 2 at this point. Catch unexpected PUT without GET here.
  1168. */
  1169. if ((usage_count == 2 && !scn->hif_config.enable_runtime_pm) ||
  1170. usage_count == 0) {
  1171. hif_pci_runtime_pm_warn(scn, "PUT without a GET Operation");
  1172. return -EINVAL;
  1173. }
  1174. list_del(&lock->list);
  1175. rpm_ctx->prevent_suspend_cnt--;
  1176. lock->active = false;
  1177. lock->timeout = 0;
  1178. hif_pm_runtime_mark_last_busy(GET_HIF_OPAQUE_HDL(scn));
  1179. ret = hif_pm_runtime_put_auto(dev);
  1180. hif_debug("%s: in pm_state:%s ret: %d", __func__,
  1181. hif_pm_runtime_state_to_string(
  1182. qdf_atomic_read(&rpm_ctx->pm_state)),
  1183. ret);
  1184. qdf_atomic_inc(&rpm_ctx->pm_stats.allow_suspend);
  1185. return ret;
  1186. }
  1187. /**
  1188. * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
  1189. * @data: calback data that is the pci context
  1190. *
  1191. * if runtime locks are acquired with a timeout, this function releases
  1192. * the locks when the last runtime lock expires.
  1193. *
  1194. * dummy implementation until lock acquisition is implemented.
  1195. */
  1196. static void hif_pm_runtime_lock_timeout_fn(void *data)
  1197. {
  1198. struct hif_softc *scn = data;
  1199. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1200. unsigned long timer_expires;
  1201. struct hif_pm_runtime_lock *context, *temp;
  1202. spin_lock_bh(&rpm_ctx->runtime_lock);
  1203. timer_expires = rpm_ctx->runtime_timer_expires;
  1204. /* Make sure we are not called too early, this should take care of
  1205. * following case
  1206. *
  1207. * CPU0 CPU1 (timeout function)
  1208. * ---- ----------------------
  1209. * spin_lock_irq
  1210. * timeout function called
  1211. *
  1212. * mod_timer()
  1213. *
  1214. * spin_unlock_irq
  1215. * spin_lock_irq
  1216. */
  1217. if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
  1218. rpm_ctx->runtime_timer_expires = 0;
  1219. list_for_each_entry_safe(context, temp,
  1220. &rpm_ctx->prevent_suspend_list, list) {
  1221. if (context->timeout) {
  1222. __hif_pm_runtime_allow_suspend(scn, context);
  1223. rpm_ctx->pm_stats.allow_suspend_timeout++;
  1224. }
  1225. }
  1226. }
  1227. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1228. }
  1229. /**
  1230. * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
  1231. * @scn: hif context
  1232. * @data: runtime pm lock
  1233. *
  1234. * This function will prevent runtime suspend, by incrementing
  1235. * device's usage count.
  1236. *
  1237. * Return: status
  1238. */
  1239. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  1240. struct hif_pm_runtime_lock *data)
  1241. {
  1242. struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
  1243. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1244. struct hif_pm_runtime_lock *context = data;
  1245. if (!scn->hif_config.enable_runtime_pm)
  1246. return 0;
  1247. if (!context)
  1248. return -EINVAL;
  1249. if (in_irq())
  1250. WARN_ON(1);
  1251. spin_lock_bh(&rpm_ctx->runtime_lock);
  1252. context->timeout = 0;
  1253. __hif_pm_runtime_prevent_suspend(scn, context);
  1254. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1255. return 0;
  1256. }
  1257. /**
  1258. * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
  1259. * @scn: hif context
  1260. * @data: runtime pm lock
  1261. *
  1262. * This function will allow runtime suspend, by decrementing
  1263. * device's usage count.
  1264. *
  1265. * Return: status
  1266. */
  1267. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  1268. struct hif_pm_runtime_lock *data)
  1269. {
  1270. struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
  1271. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1272. struct hif_pm_runtime_lock *context = data;
  1273. if (!scn->hif_config.enable_runtime_pm)
  1274. return 0;
  1275. if (!context)
  1276. return -EINVAL;
  1277. if (in_irq())
  1278. WARN_ON(1);
  1279. spin_lock_bh(&rpm_ctx->runtime_lock);
  1280. __hif_pm_runtime_allow_suspend(scn, context);
  1281. /* The list can be empty as well in cases where
  1282. * we have one context in the list and the allow
  1283. * suspend came before the timer expires and we delete
  1284. * context above from the list.
  1285. * When list is empty prevent_suspend count will be zero.
  1286. */
  1287. if (rpm_ctx->prevent_suspend_cnt == 0 &&
  1288. rpm_ctx->runtime_timer_expires > 0) {
  1289. qdf_timer_free(&rpm_ctx->runtime_timer);
  1290. rpm_ctx->runtime_timer_expires = 0;
  1291. }
  1292. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1293. return 0;
  1294. }
  1295. /**
  1296. * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
  1297. * @ol_sc: HIF context
  1298. * @lock: which lock is being acquired
  1299. * @delay: Timeout in milliseconds
  1300. *
  1301. * Prevent runtime suspend with a timeout after which runtime suspend would be
  1302. * allowed. This API uses a single timer to allow the suspend and timer is
  1303. * modified if the timeout is changed before timer fires.
  1304. * If the timeout is less than autosuspend_delay then use mark_last_busy instead
  1305. * of starting the timer.
  1306. *
  1307. * It is wise to try not to use this API and correct the design if possible.
  1308. *
  1309. * Return: 0 on success and negative error code on failure
  1310. */
  1311. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  1312. struct hif_pm_runtime_lock *lock,
  1313. unsigned int delay)
  1314. {
  1315. struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
  1316. struct device *dev = hif_bus_get_dev(scn);
  1317. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1318. int ret = 0;
  1319. unsigned long expires;
  1320. struct hif_pm_runtime_lock *context = lock;
  1321. if (hif_is_load_or_unload_in_progress(scn)) {
  1322. HIF_ERROR("%s: Load/unload in progress, ignore!",
  1323. __func__);
  1324. return -EINVAL;
  1325. }
  1326. if (hif_is_recovery_in_progress(scn)) {
  1327. HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
  1328. return -EINVAL;
  1329. }
  1330. if (!scn->hif_config.enable_runtime_pm)
  1331. return 0;
  1332. if (!context)
  1333. return -EINVAL;
  1334. if (in_irq())
  1335. WARN_ON(1);
  1336. /*
  1337. * Don't use internal timer if the timeout is less than auto suspend
  1338. * delay.
  1339. */
  1340. if (delay <= dev->power.autosuspend_delay) {
  1341. hif_pm_request_resume(dev);
  1342. hif_pm_runtime_mark_last_busy(ol_sc);
  1343. return ret;
  1344. }
  1345. expires = jiffies + msecs_to_jiffies(delay);
  1346. expires += !expires;
  1347. spin_lock_bh(&rpm_ctx->runtime_lock);
  1348. context->timeout = delay;
  1349. ret = __hif_pm_runtime_prevent_suspend(scn, context);
  1350. rpm_ctx->pm_stats.prevent_suspend_timeout++;
  1351. /* Modify the timer only if new timeout is after already configured
  1352. * timeout
  1353. */
  1354. if (time_after(expires, rpm_ctx->runtime_timer_expires)) {
  1355. qdf_timer_mod(&rpm_ctx->runtime_timer, delay);
  1356. rpm_ctx->runtime_timer_expires = expires;
  1357. }
  1358. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1359. HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
  1360. hif_pm_runtime_state_to_string(
  1361. qdf_atomic_read(&rpm_ctx->pm_state)),
  1362. delay, ret);
  1363. return ret;
  1364. }
  1365. /**
  1366. * hif_runtime_lock_init() - API to initialize Runtime PM context
  1367. * @name: Context name
  1368. *
  1369. * This API initializes the Runtime PM context of the caller and
  1370. * return the pointer.
  1371. *
  1372. * Return: None
  1373. */
  1374. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
  1375. {
  1376. struct hif_pm_runtime_lock *context;
  1377. HIF_INFO("Initializing Runtime PM wakelock %s", name);
  1378. context = qdf_mem_malloc(sizeof(*context));
  1379. if (!context)
  1380. return -ENOMEM;
  1381. context->name = name ? name : "Default";
  1382. lock->lock = context;
  1383. return 0;
  1384. }
  1385. /**
  1386. * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
  1387. * @data: Runtime PM context
  1388. *
  1389. * Return: void
  1390. */
  1391. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  1392. struct hif_pm_runtime_lock *data)
  1393. {
  1394. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1395. struct hif_runtime_pm_ctx *rpm_ctx;
  1396. struct hif_pm_runtime_lock *context = data;
  1397. if (!context) {
  1398. HIF_ERROR("Runtime PM wakelock context is NULL");
  1399. return;
  1400. }
  1401. HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
  1402. /*
  1403. * Ensure to delete the context list entry and reduce the usage count
  1404. * before freeing the context if context is active.
  1405. */
  1406. if (scn) {
  1407. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1408. spin_lock_bh(&rpm_ctx->runtime_lock);
  1409. __hif_pm_runtime_allow_suspend(scn, context);
  1410. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1411. }
  1412. qdf_mem_free(context);
  1413. }
  1414. /**
  1415. * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
  1416. * @hif_ctx: HIF context
  1417. *
  1418. * Return: true for runtime suspended, otherwise false
  1419. */
  1420. bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
  1421. {
  1422. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1423. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1424. return qdf_atomic_read(&rpm_ctx->pm_state) ==
  1425. HIF_PM_RUNTIME_STATE_SUSPENDED;
  1426. }
  1427. /**
  1428. * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
  1429. * @hif_ctx: HIF context
  1430. *
  1431. * monitor_wake_intr variable can be used to indicate if driver expects wake
  1432. * MSI for runtime PM
  1433. *
  1434. * Return: monitor_wake_intr variable
  1435. */
  1436. int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
  1437. {
  1438. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1439. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1440. return qdf_atomic_read(&rpm_ctx->monitor_wake_intr);
  1441. }
  1442. /**
  1443. * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
  1444. * @hif_ctx: HIF context
  1445. * @val: value to set
  1446. *
  1447. * monitor_wake_intr variable can be used to indicate if driver expects wake
  1448. * MSI for runtime PM
  1449. *
  1450. * Return: void
  1451. */
  1452. void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
  1453. int val)
  1454. {
  1455. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1456. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1457. qdf_atomic_set(&rpm_ctx->monitor_wake_intr, val);
  1458. }
  1459. /**
  1460. * hif_pm_runtime_check_and_request_resume() - check if the device is runtime
  1461. * suspended and request resume.
  1462. * @hif_ctx: HIF context
  1463. *
  1464. * This function is to check if the device is runtime suspended and
  1465. * request for runtime resume.
  1466. *
  1467. * Return: void
  1468. */
  1469. void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
  1470. {
  1471. if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
  1472. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  1473. hif_pm_runtime_request_resume(hif_ctx);
  1474. }
  1475. }
  1476. /**
  1477. * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
  1478. * @hif_ctx: HIF context
  1479. *
  1480. * Return: void
  1481. */
  1482. void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  1483. {
  1484. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1485. struct hif_runtime_pm_ctx *rpm_ctx;
  1486. if (!scn)
  1487. return;
  1488. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1489. qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 1);
  1490. rpm_ctx->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
  1491. hif_pm_runtime_mark_last_busy(hif_ctx);
  1492. }
  1493. /**
  1494. * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
  1495. * @hif_ctx: HIF context
  1496. *
  1497. * Return: dp rx busy set value
  1498. */
  1499. int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  1500. {
  1501. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1502. struct hif_runtime_pm_ctx *rpm_ctx;
  1503. if (!scn)
  1504. return 0;
  1505. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1506. return qdf_atomic_read(&rpm_ctx->pm_dp_rx_busy);
  1507. }
  1508. /**
  1509. * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
  1510. * @hif_ctx: HIF context
  1511. *
  1512. * Return: timestamp of last mark busy by dp rx
  1513. */
  1514. qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
  1515. {
  1516. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1517. struct hif_runtime_pm_ctx *rpm_ctx;
  1518. if (!scn)
  1519. return 0;
  1520. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1521. return rpm_ctx->dp_last_busy_timestamp;
  1522. }
  1523. #endif /* FEATURE_RUNTIME_PM */