hif_runtime_pm.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772
  1. /*
  2. * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  10. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  12. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  13. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/if_arp.h>
  18. #include "hif_io32.h"
  19. #include "hif_runtime_pm.h"
  20. #include "hif.h"
  21. #include "target_type.h"
  22. #include "hif_main.h"
  23. #include "ce_main.h"
  24. #include "ce_api.h"
  25. #include "ce_internal.h"
  26. #include "ce_reg.h"
  27. #include "ce_bmi.h"
  28. #include "regtable.h"
  29. #include "hif_hw_version.h"
  30. #include <linux/debugfs.h>
  31. #include <linux/seq_file.h>
  32. #include "qdf_status.h"
  33. #include "qdf_atomic.h"
  34. #include "pld_common.h"
  35. #include "mp_dev.h"
  36. #include "hif_debug.h"
  37. #include "ce_tasklet.h"
  38. #include "targaddrs.h"
  39. #include "hif_exec.h"
  40. #ifdef FEATURE_RUNTIME_PM
  41. /**
  42. * hif_pci_pm_runtime_enabled() - To check if Runtime PM is enabled
  43. * @scn: hif context
  44. *
  45. * This function will check if Runtime PM is enabled or not.
  46. *
  47. * Return: void
  48. */
  49. static bool hif_pci_pm_runtime_enabled(struct hif_softc *scn)
  50. {
  51. if (scn->hif_config.enable_runtime_pm)
  52. return true;
  53. return pm_runtime_enabled(hif_bus_get_dev(scn));
  54. }
  55. /**
  56. * hif_pm_runtime_state_to_string() - Mapping state into string
  57. * @state: runtime pm state
  58. *
  59. * This function will map the runtime pm state into corresponding
  60. * string for debug purpose.
  61. *
  62. * Return: pointer to the string
  63. */
  64. static const char *hif_pm_runtime_state_to_string(uint32_t state)
  65. {
  66. switch (state) {
  67. case HIF_PM_RUNTIME_STATE_NONE:
  68. return "INIT_STATE";
  69. case HIF_PM_RUNTIME_STATE_ON:
  70. return "ON";
  71. case HIF_PM_RUNTIME_STATE_RESUMING:
  72. return "RESUMING";
  73. case HIF_PM_RUNTIME_STATE_SUSPENDING:
  74. return "SUSPENDING";
  75. case HIF_PM_RUNTIME_STATE_SUSPENDED:
  76. return "SUSPENDED";
  77. default:
  78. return "INVALID STATE";
  79. }
  80. }
  81. #define HIF_PCI_RUNTIME_PM_STATS(_s, _rpm_ctx, _name) \
  82. seq_printf(_s, "%30s: %u\n", #_name, (_rpm_ctx)->pm_stats._name)
  83. /**
  84. * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
  85. * @hif_ctx: hif_softc context
  86. * @msg: log message
  87. *
  88. * log runtime pm stats when something seems off.
  89. *
  90. * Return: void
  91. */
  92. static void hif_pci_runtime_pm_warn(struct hif_softc *scn,
  93. const char *msg)
  94. {
  95. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  96. struct device *dev = hif_bus_get_dev(scn);
  97. struct hif_pm_runtime_lock *ctx;
  98. int i;
  99. hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
  100. msg, atomic_read(&dev->power.usage_count),
  101. hif_pm_runtime_state_to_string(
  102. atomic_read(&rpm_ctx->pm_state)),
  103. rpm_ctx->prevent_suspend_cnt);
  104. hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
  105. dev->power.runtime_status,
  106. dev->power.runtime_error,
  107. dev->power.disable_depth,
  108. dev->power.autosuspend_delay);
  109. hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u",
  110. qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get),
  111. qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put),
  112. rpm_ctx->pm_stats.request_resume);
  113. hif_nofl_debug("get put get-timestamp put-timestamp :DBGID_NAME");
  114. for (i = 0; i < RTPM_ID_MAX; i++) {
  115. hif_nofl_debug("%-10d %-10d 0x%-10llx 0x%-10llx :%-30s",
  116. qdf_atomic_read(
  117. &rpm_ctx->pm_stats.runtime_get_dbgid[i]),
  118. qdf_atomic_read(
  119. &rpm_ctx->pm_stats.runtime_put_dbgid[i]),
  120. rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i],
  121. rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i],
  122. rtpm_string_from_dbgid(i));
  123. }
  124. hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u",
  125. qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend),
  126. qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
  127. hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
  128. rpm_ctx->pm_stats.prevent_suspend_timeout,
  129. rpm_ctx->pm_stats.allow_suspend_timeout);
  130. hif_nofl_debug("Suspended: %u, resumed: %u count",
  131. rpm_ctx->pm_stats.suspended,
  132. rpm_ctx->pm_stats.resumed);
  133. hif_nofl_debug("suspend_err: %u, runtime_get_err: %u",
  134. rpm_ctx->pm_stats.suspend_err,
  135. rpm_ctx->pm_stats.runtime_get_err);
  136. hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: ");
  137. list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
  138. hif_nofl_debug("source %s; timeout %d ms",
  139. ctx->name, ctx->timeout);
  140. }
  141. if (qdf_is_fw_down()) {
  142. hif_err("fw is down");
  143. return;
  144. }
  145. QDF_DEBUG_PANIC("hif_pci_runtime_pm_warn");
  146. }
  147. /**
  148. * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
  149. * @s: file to print to
  150. * @data: unused
  151. *
  152. * debugging tool added to the debug fs for displaying runtimepm stats
  153. *
  154. * Return: 0
  155. */
  156. static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
  157. {
  158. struct hif_softc *scn = s->private;
  159. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  160. struct device *dev = hif_bus_get_dev(scn);
  161. static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
  162. "SUSPENDING", "SUSPENDED"};
  163. unsigned int msecs_age;
  164. qdf_time_t usecs_age;
  165. int pm_state = atomic_read(&rpm_ctx->pm_state);
  166. unsigned long timer_expires;
  167. struct hif_pm_runtime_lock *ctx;
  168. int i;
  169. seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
  170. seq_printf(s, "%30s: %ps\n", "Last Resume Caller",
  171. rpm_ctx->pm_stats.last_resume_caller);
  172. seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
  173. rpm_ctx->pm_stats.last_busy_marker);
  174. usecs_age = qdf_get_log_timestamp_usecs() -
  175. rpm_ctx->pm_stats.last_busy_timestamp;
  176. seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
  177. rpm_ctx->pm_stats.last_busy_timestamp / 1000000,
  178. rpm_ctx->pm_stats.last_busy_timestamp % 1000000);
  179. seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
  180. usecs_age / 1000000, usecs_age % 1000000);
  181. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
  182. msecs_age = jiffies_to_msecs(jiffies -
  183. rpm_ctx->pm_stats.suspend_jiffies);
  184. seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
  185. msecs_age / 1000, msecs_age % 1000);
  186. }
  187. seq_printf(s, "%30s: %d\n", "PM Usage count",
  188. atomic_read(&dev->power.usage_count));
  189. seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
  190. rpm_ctx->prevent_suspend_cnt);
  191. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspended);
  192. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, suspend_err);
  193. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, resumed);
  194. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, request_resume);
  195. seq_printf(s, "%30s: %u\n", "prevent_suspend",
  196. qdf_atomic_read(&rpm_ctx->pm_stats.prevent_suspend));
  197. seq_printf(s, "%30s: %u\n", "allow_suspend",
  198. qdf_atomic_read(&rpm_ctx->pm_stats.allow_suspend));
  199. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, prevent_suspend_timeout);
  200. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, allow_suspend_timeout);
  201. HIF_PCI_RUNTIME_PM_STATS(s, rpm_ctx, runtime_get_err);
  202. seq_printf(s, "%30s: %u\n", "runtime_get",
  203. qdf_atomic_read(&rpm_ctx->pm_stats.runtime_get));
  204. seq_printf(s, "%30s: %u\n", "runtime_put",
  205. qdf_atomic_read(&rpm_ctx->pm_stats.runtime_put));
  206. seq_puts(s, "get put get-timestamp put-timestamp :DBGID_NAME\n");
  207. for (i = 0; i < RTPM_ID_MAX; i++) {
  208. seq_printf(s, "%-10d ",
  209. qdf_atomic_read(
  210. &rpm_ctx->pm_stats.runtime_get_dbgid[i]));
  211. seq_printf(s, "%-10d ",
  212. qdf_atomic_read(
  213. &rpm_ctx->pm_stats.runtime_put_dbgid[i]));
  214. seq_printf(s, "0x%-10llx ",
  215. rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[i]);
  216. seq_printf(s, "0x%-10llx ",
  217. rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[i]);
  218. seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i));
  219. }
  220. timer_expires = rpm_ctx->runtime_timer_expires;
  221. if (timer_expires > 0) {
  222. msecs_age = jiffies_to_msecs(timer_expires - jiffies);
  223. seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
  224. msecs_age / 1000, msecs_age % 1000);
  225. }
  226. spin_lock_bh(&rpm_ctx->runtime_lock);
  227. if (list_empty(&rpm_ctx->prevent_suspend_list)) {
  228. spin_unlock_bh(&rpm_ctx->runtime_lock);
  229. return 0;
  230. }
  231. seq_printf(s, "%30s: ", "Active Wakeup_Sources");
  232. list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list) {
  233. seq_printf(s, "%s", ctx->name);
  234. if (ctx->timeout)
  235. seq_printf(s, "(%d ms)", ctx->timeout);
  236. seq_puts(s, " ");
  237. }
  238. seq_puts(s, "\n");
  239. spin_unlock_bh(&rpm_ctx->runtime_lock);
  240. return 0;
  241. }
  242. #undef HIF_PCI_RUNTIME_PM_STATS
  243. /**
  244. * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
  245. * @inode
  246. * @file
  247. *
  248. * Return: linux error code of single_open.
  249. */
  250. static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
  251. {
  252. return single_open(file, hif_pci_pm_runtime_debugfs_show,
  253. inode->i_private);
  254. }
  255. static const struct file_operations hif_pci_runtime_pm_fops = {
  256. .owner = THIS_MODULE,
  257. .open = hif_pci_runtime_pm_open,
  258. .release = single_release,
  259. .read = seq_read,
  260. .llseek = seq_lseek,
  261. };
  262. /**
  263. * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
  264. * @scn: hif context
  265. *
  266. * creates a debugfs entry to debug the runtime pm feature.
  267. */
  268. static void hif_runtime_pm_debugfs_create(struct hif_softc *scn)
  269. {
  270. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  271. rpm_ctx->pm_dentry = debugfs_create_file("cnss_runtime_pm",
  272. 0400, NULL, scn,
  273. &hif_pci_runtime_pm_fops);
  274. }
  275. /**
  276. * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
  277. * @sc: pci context
  278. *
  279. * removes the debugfs entry to debug the runtime pm feature.
  280. */
  281. static void hif_runtime_pm_debugfs_remove(struct hif_softc *scn)
  282. {
  283. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  284. debugfs_remove(rpm_ctx->pm_dentry);
  285. }
  286. /**
  287. * hif_runtime_init() - Initialize Runtime PM
  288. * @dev: device structure
  289. * @delay: delay to be confgured for auto suspend
  290. *
  291. * This function will init all the Runtime PM config.
  292. *
  293. * Return: void
  294. */
  295. static void hif_runtime_init(struct device *dev, int delay)
  296. {
  297. pm_runtime_set_autosuspend_delay(dev, delay);
  298. pm_runtime_use_autosuspend(dev);
  299. pm_runtime_allow(dev);
  300. pm_runtime_mark_last_busy(dev);
  301. pm_runtime_put_noidle(dev);
  302. pm_suspend_ignore_children(dev, true);
  303. }
  304. /**
  305. * hif_runtime_exit() - Deinit/Exit Runtime PM
  306. * @dev: device structure
  307. *
  308. * This function will deinit all the Runtime PM config.
  309. *
  310. * Return: void
  311. */
  312. static void hif_runtime_exit(struct device *dev)
  313. {
  314. pm_runtime_get_noresume(dev);
  315. pm_runtime_set_active(dev);
  316. /* Symmetric call to make sure default usage count == 2 */
  317. pm_runtime_forbid(dev);
  318. }
  319. static void hif_pm_runtime_lock_timeout_fn(void *data);
  320. /**
  321. * hif_pm_runtime_start(): start the runtime pm
  322. * @scn: hif context
  323. *
  324. * After this call, runtime pm will be active.
  325. */
  326. void hif_pm_runtime_start(struct hif_softc *scn)
  327. {
  328. uint32_t mode = hif_get_conparam(scn);
  329. struct device *dev = hif_bus_get_dev(scn);
  330. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  331. if (!scn->hif_config.enable_runtime_pm) {
  332. hif_info("RUNTIME PM is disabled in ini");
  333. return;
  334. }
  335. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  336. mode == QDF_GLOBAL_MONITOR_MODE) {
  337. hif_info("RUNTIME PM is disabled for FTM/EPPING mode");
  338. return;
  339. }
  340. qdf_timer_init(NULL, &rpm_ctx->runtime_timer,
  341. hif_pm_runtime_lock_timeout_fn,
  342. scn, QDF_TIMER_TYPE_WAKE_APPS);
  343. hif_info("Enabling RUNTIME PM, Delay: %d ms",
  344. scn->hif_config.runtime_pm_delay);
  345. qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_ON);
  346. hif_runtime_init(dev, scn->hif_config.runtime_pm_delay);
  347. hif_runtime_pm_debugfs_create(scn);
  348. }
  349. /**
  350. * hif_pm_runtime_stop(): stop runtime pm
  351. * @scn: hif context
  352. *
  353. * Turns off runtime pm and frees corresponding resources
  354. * that were acquired by hif_runtime_pm_start().
  355. */
  356. void hif_pm_runtime_stop(struct hif_softc *scn)
  357. {
  358. uint32_t mode = hif_get_conparam(scn);
  359. struct device *dev = hif_bus_get_dev(scn);
  360. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  361. if (!scn->hif_config.enable_runtime_pm)
  362. return;
  363. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  364. mode == QDF_GLOBAL_MONITOR_MODE)
  365. return;
  366. hif_runtime_exit(dev);
  367. hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(scn));
  368. qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  369. hif_runtime_pm_debugfs_remove(scn);
  370. qdf_timer_free(&rpm_ctx->runtime_timer);
  371. }
  372. /**
  373. * hif_pm_runtime_open(): initialize runtime pm
  374. * @scn: hif ctx
  375. *
  376. * Early initialization
  377. */
  378. void hif_pm_runtime_open(struct hif_softc *scn)
  379. {
  380. int i;
  381. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  382. spin_lock_init(&rpm_ctx->runtime_lock);
  383. qdf_spinlock_create(&rpm_ctx->runtime_suspend_lock);
  384. qdf_atomic_init(&rpm_ctx->pm_state);
  385. hif_runtime_lock_init(&rpm_ctx->prevent_linkdown_lock,
  386. "prevent_linkdown_lock");
  387. qdf_atomic_set(&rpm_ctx->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  388. qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get);
  389. qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put);
  390. qdf_atomic_init(&rpm_ctx->pm_stats.allow_suspend);
  391. qdf_atomic_init(&rpm_ctx->pm_stats.prevent_suspend);
  392. for (i = 0; i < RTPM_ID_MAX; i++) {
  393. qdf_atomic_init(&rpm_ctx->pm_stats.runtime_get_dbgid[i]);
  394. qdf_atomic_init(&rpm_ctx->pm_stats.runtime_put_dbgid[i]);
  395. }
  396. INIT_LIST_HEAD(&rpm_ctx->prevent_suspend_list);
  397. }
  398. /**
  399. * hif_check_for_get_put_out_of_sync() - Check if Get/Put is out of sync
  400. * @scn: hif context
  401. *
  402. * This function will check if get and put are out of sync or not.
  403. *
  404. * Return: void
  405. */
  406. static void hif_check_for_get_put_out_of_sync(struct hif_softc *scn)
  407. {
  408. int32_t i;
  409. int32_t get_count, put_count;
  410. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  411. if (qdf_is_fw_down())
  412. return;
  413. for (i = 0; i < RTPM_ID_MAX; i++) {
  414. get_count = qdf_atomic_read(
  415. &rpm_ctx->pm_stats.runtime_get_dbgid[i]);
  416. put_count = qdf_atomic_read(
  417. &rpm_ctx->pm_stats.runtime_put_dbgid[i]);
  418. if (get_count != put_count) {
  419. QDF_DEBUG_PANIC("%s get-put out of sync. get %d put %d",
  420. rtpm_string_from_dbgid(i),
  421. get_count, put_count);
  422. }
  423. }
  424. }
  425. /**
  426. * hif_pm_runtime_sanitize_on_exit(): sanitize runtime PM gets/puts from driver
  427. * @scn: hif context
  428. *
  429. * Ensure all gets/puts are in sync before exiting runtime PM feature.
  430. * Also make sure all runtime PM locks are deinitialized properly.
  431. *
  432. * Return: void
  433. */
  434. static void hif_pm_runtime_sanitize_on_exit(struct hif_softc *scn)
  435. {
  436. struct hif_pm_runtime_lock *ctx, *tmp;
  437. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  438. hif_check_for_get_put_out_of_sync(scn);
  439. spin_lock_bh(&rpm_ctx->runtime_lock);
  440. list_for_each_entry_safe(ctx, tmp,
  441. &rpm_ctx->prevent_suspend_list, list) {
  442. spin_unlock_bh(&rpm_ctx->runtime_lock);
  443. hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(scn), ctx);
  444. spin_lock_bh(&rpm_ctx->runtime_lock);
  445. }
  446. spin_unlock_bh(&rpm_ctx->runtime_lock);
  447. }
  448. static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
  449. struct hif_pm_runtime_lock *lock);
  450. /**
  451. * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
  452. * @scn: hif context
  453. *
  454. * API is used to empty the runtime pm prevent suspend list.
  455. *
  456. * Return: void
  457. */
  458. static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_softc *scn)
  459. {
  460. struct hif_pm_runtime_lock *ctx, *tmp;
  461. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  462. spin_lock_bh(&rpm_ctx->runtime_lock);
  463. list_for_each_entry_safe(ctx, tmp,
  464. &rpm_ctx->prevent_suspend_list, list) {
  465. __hif_pm_runtime_allow_suspend(scn, ctx);
  466. }
  467. spin_unlock_bh(&rpm_ctx->runtime_lock);
  468. }
  469. /**
  470. * hif_pm_runtime_close(): close runtime pm
  471. * @scn: hif ctx
  472. *
  473. * ensure runtime_pm is stopped before closing the driver
  474. */
  475. void hif_pm_runtime_close(struct hif_softc *scn)
  476. {
  477. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  478. struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
  479. /*
  480. * Here cds hif context was already NULL,
  481. * so calling hif_runtime_lock_deinit, instead of
  482. * qdf_runtime_lock_deinit(&rpm_ctx->prevent_linkdown_lock);
  483. */
  484. hif_runtime_lock_deinit(hif_ctx, rpm_ctx->prevent_linkdown_lock.lock);
  485. hif_is_recovery_in_progress(scn) ?
  486. hif_pm_runtime_sanitize_on_ssr_exit(scn) :
  487. hif_pm_runtime_sanitize_on_exit(scn);
  488. qdf_spinlock_destroy(&rpm_ctx->runtime_suspend_lock);
  489. }
  490. /**
  491. * hif_pm_runtime_sync_resume() - Invoke synchronous runtime resume.
  492. * @hif_ctx: hif context
  493. *
  494. * This function will invoke synchronous runtime resume.
  495. *
  496. * Return: status
  497. */
  498. int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
  499. {
  500. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  501. struct hif_runtime_pm_ctx *rpm_ctx;
  502. int pm_state;
  503. if (!scn)
  504. return -EINVAL;
  505. if (!hif_pci_pm_runtime_enabled(scn))
  506. return 0;
  507. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  508. pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
  509. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  510. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  511. hif_info("Runtime PM resume is requested by %ps",
  512. (void *)_RET_IP_);
  513. rpm_ctx->pm_stats.request_resume++;
  514. rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
  515. return pm_runtime_resume(hif_bus_get_dev(scn));
  516. }
  517. /**
  518. * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
  519. * @scn: hif context
  520. * @flag: prevent linkdown if true otherwise allow
  521. *
  522. * this api should only be called as part of bus prevent linkdown
  523. */
  524. void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  525. {
  526. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  527. if (flag)
  528. qdf_runtime_pm_prevent_suspend(&rpm_ctx->prevent_linkdown_lock);
  529. else
  530. qdf_runtime_pm_allow_suspend(&rpm_ctx->prevent_linkdown_lock);
  531. }
  532. /**
  533. * __hif_runtime_pm_set_state(): utility function
  534. * @state: state to set
  535. *
  536. * indexes into the runtime pm state and sets it.
  537. */
  538. static void __hif_runtime_pm_set_state(struct hif_softc *scn,
  539. enum hif_pm_runtime_state state)
  540. {
  541. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  542. if (!rpm_ctx) {
  543. hif_err("HIF_CTX not initialized");
  544. return;
  545. }
  546. qdf_atomic_set(&rpm_ctx->pm_state, state);
  547. }
  548. /**
  549. * hif_runtime_pm_set_state_on(): adjust runtime pm state
  550. *
  551. * Notify hif that a the runtime pm state should be on
  552. */
  553. static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
  554. {
  555. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
  556. }
  557. /**
  558. * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
  559. *
  560. * Notify hif that a runtime pm resuming has started
  561. */
  562. static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
  563. {
  564. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
  565. }
  566. /**
  567. * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
  568. *
  569. * Notify hif that a runtime pm suspend has started
  570. */
  571. static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
  572. {
  573. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
  574. }
  575. /**
  576. * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
  577. *
  578. * Notify hif that a runtime suspend attempt has been completed successfully
  579. */
  580. static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
  581. {
  582. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
  583. }
  584. /**
  585. * hif_log_runtime_suspend_success() - log a successful runtime suspend
  586. */
  587. static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
  588. {
  589. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  590. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  591. if (!rpm_ctx)
  592. return;
  593. rpm_ctx->pm_stats.suspended++;
  594. rpm_ctx->pm_stats.suspend_jiffies = jiffies;
  595. }
  596. /**
  597. * hif_log_runtime_suspend_failure() - log a failed runtime suspend
  598. *
  599. * log a failed runtime suspend
  600. * mark last busy to prevent immediate runtime suspend
  601. */
  602. static void hif_log_runtime_suspend_failure(void *hif_ctx)
  603. {
  604. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  605. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  606. if (!rpm_ctx)
  607. return;
  608. rpm_ctx->pm_stats.suspend_err++;
  609. }
  610. /**
  611. * hif_log_runtime_resume_success() - log a successful runtime resume
  612. *
  613. * log a successful runtime resume
  614. * mark last busy to prevent immediate runtime suspend
  615. */
  616. static void hif_log_runtime_resume_success(void *hif_ctx)
  617. {
  618. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  619. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  620. if (!rpm_ctx)
  621. return;
  622. rpm_ctx->pm_stats.resumed++;
  623. }
  624. /**
  625. * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
  626. *
  627. * Record the failure.
  628. * mark last busy to delay a retry.
  629. * adjust the runtime_pm state.
  630. */
  631. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
  632. {
  633. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  634. hif_log_runtime_suspend_failure(hif_ctx);
  635. hif_pm_runtime_mark_last_busy(hif_ctx);
  636. hif_runtime_pm_set_state_on(scn);
  637. }
  638. static void hif_pm_runtime_print_prevent_list(struct hif_softc *scn)
  639. {
  640. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  641. struct hif_pm_runtime_lock *ctx;
  642. hif_info("prevent_suspend_cnt %u", rpm_ctx->prevent_suspend_cnt);
  643. list_for_each_entry(ctx, &rpm_ctx->prevent_suspend_list, list)
  644. hif_info("%s", ctx->name);
  645. }
  646. static bool hif_pm_runtime_is_suspend_allowed(struct hif_softc *scn)
  647. {
  648. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  649. bool ret;
  650. if (!scn->hif_config.enable_runtime_pm)
  651. return 0;
  652. spin_lock_bh(&rpm_ctx->runtime_lock);
  653. ret = (rpm_ctx->prevent_suspend_cnt == 0);
  654. spin_unlock_bh(&rpm_ctx->runtime_lock);
  655. return ret;
  656. }
  657. /**
  658. * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
  659. *
  660. * Makes sure that the pci link will be taken down by the suspend opperation.
  661. * If the hif layer is configured to leave the bus on, runtime suspend will
  662. * not save any power.
  663. *
  664. * Set the runtime suspend state to in progress.
  665. *
  666. * return -EINVAL if the bus won't go down. otherwise return 0
  667. */
  668. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  669. {
  670. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  671. if (!hif_can_suspend_link(hif_ctx)) {
  672. hif_err("Runtime PM not supported for link up suspend");
  673. return -EINVAL;
  674. }
  675. hif_runtime_pm_set_state_suspending(scn);
  676. /* keep this after set suspending */
  677. if (!hif_pm_runtime_is_suspend_allowed(scn)) {
  678. hif_info("Runtime PM not allowed now");
  679. hif_pm_runtime_print_prevent_list(scn);
  680. return -EINVAL;
  681. }
  682. return 0;
  683. }
  684. /**
  685. * hif_process_runtime_suspend_success() - bookkeeping of suspend success
  686. *
  687. * Record the success.
  688. * adjust the runtime_pm state
  689. */
  690. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
  691. {
  692. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  693. hif_runtime_pm_set_state_suspended(scn);
  694. hif_log_runtime_suspend_success(scn);
  695. }
  696. /**
  697. * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
  698. *
  699. * update the runtime pm state.
  700. */
  701. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
  702. {
  703. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  704. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  705. hif_runtime_pm_set_state_resuming(scn);
  706. }
  707. /**
  708. * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
  709. *
  710. * record the success.
  711. * adjust the runtime_pm state
  712. */
  713. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
  714. {
  715. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  716. hif_log_runtime_resume_success(hif_ctx);
  717. hif_pm_runtime_mark_last_busy(hif_ctx);
  718. hif_runtime_pm_set_state_on(scn);
  719. }
  720. /**
  721. * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
  722. *
  723. * Return: 0 for success and non-zero error code for failure
  724. */
  725. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  726. {
  727. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  728. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  729. int errno;
  730. errno = hif_bus_suspend(hif_ctx);
  731. if (errno) {
  732. hif_err("Failed bus suspend: %d", errno);
  733. return errno;
  734. }
  735. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
  736. errno = hif_bus_suspend_noirq(hif_ctx);
  737. if (errno) {
  738. hif_err("Failed bus suspend noirq: %d", errno);
  739. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  740. goto bus_resume;
  741. }
  742. qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 0);
  743. return 0;
  744. bus_resume:
  745. QDF_BUG(!hif_bus_resume(hif_ctx));
  746. return errno;
  747. }
  748. /**
  749. * hif_fastpath_resume() - resume fastpath for runtimepm
  750. *
  751. * ensure that the fastpath write index register is up to date
  752. * since runtime pm may cause ce_send_fast to skip the register
  753. * write.
  754. *
  755. * fastpath only applicable to legacy copy engine
  756. */
  757. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
  758. {
  759. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  760. struct CE_state *ce_state;
  761. if (!scn)
  762. return;
  763. if (scn->fastpath_mode_on) {
  764. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  765. return;
  766. ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
  767. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  768. /*war_ce_src_ring_write_idx_set */
  769. CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
  770. ce_state->src_ring->write_index);
  771. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  772. Q_TARGET_ACCESS_END(scn);
  773. }
  774. }
  775. /**
  776. * hif_runtime_resume() - do the bus resume part of a runtime resume
  777. *
  778. * Return: 0 for success and non-zero error code for failure
  779. */
  780. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
  781. {
  782. int errno;
  783. QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
  784. errno = hif_bus_resume(hif_ctx);
  785. if (errno)
  786. hif_err("Failed runtime resume: %d", errno);
  787. return errno;
  788. }
  789. /**
  790. * hif_pm_stats_runtime_get_record() - record runtime get statistics
  791. * @scn: hif context
  792. * @rtpm_dbgid: debug id to trace who use it
  793. *
  794. *
  795. * Return: void
  796. */
  797. static void hif_pm_stats_runtime_get_record(struct hif_softc *scn,
  798. wlan_rtpm_dbgid rtpm_dbgid)
  799. {
  800. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  801. if (rtpm_dbgid >= RTPM_ID_MAX) {
  802. QDF_BUG(0);
  803. return;
  804. }
  805. qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get);
  806. qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_get_dbgid[rtpm_dbgid]);
  807. rpm_ctx->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] =
  808. qdf_get_log_timestamp();
  809. }
  810. /**
  811. * hif_pm_stats_runtime_put_record() - record runtime put statistics
  812. * @scn: hif context
  813. * @rtpm_dbgid: dbg_id to trace who use it
  814. *
  815. *
  816. * Return: void
  817. */
  818. static void hif_pm_stats_runtime_put_record(struct hif_softc *scn,
  819. wlan_rtpm_dbgid rtpm_dbgid)
  820. {
  821. struct device *dev = hif_bus_get_dev(scn);
  822. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  823. if (rtpm_dbgid >= RTPM_ID_MAX) {
  824. QDF_BUG(0);
  825. return;
  826. }
  827. if (atomic_read(&dev->power.usage_count) <= 0) {
  828. QDF_BUG(0);
  829. return;
  830. }
  831. qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put);
  832. qdf_atomic_inc(&rpm_ctx->pm_stats.runtime_put_dbgid[rtpm_dbgid]);
  833. rpm_ctx->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] =
  834. qdf_get_log_timestamp();
  835. }
  836. /**
  837. * hif_pm_runtime_get_sync() - do a get operation with sync resume
  838. * @hif_ctx: pointer of HIF context
  839. * @rtpm_dbgid: dbgid to trace who use it
  840. *
  841. * A get operation will prevent a runtime suspend until a corresponding
  842. * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
  843. * resume instead of requesting a resume if it is runtime PM suspended
  844. * so it can only be called in non-atomic context.
  845. *
  846. * Return: 0 if it is runtime PM resumed otherwise an error code.
  847. */
  848. int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
  849. wlan_rtpm_dbgid rtpm_dbgid)
  850. {
  851. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  852. struct device *dev = hif_bus_get_dev(scn);
  853. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  854. int pm_state;
  855. int ret;
  856. if (!rpm_ctx)
  857. return -EINVAL;
  858. if (!hif_pci_pm_runtime_enabled(scn))
  859. return 0;
  860. pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
  861. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  862. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  863. hif_info_high("Runtime PM resume is requested by %ps",
  864. (void *)_RET_IP_);
  865. hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
  866. ret = pm_runtime_get_sync(dev);
  867. /* Get can return 1 if the device is already active, just return
  868. * success in that case.
  869. */
  870. if (ret > 0)
  871. ret = 0;
  872. if (ret) {
  873. rpm_ctx->pm_stats.runtime_get_err++;
  874. hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
  875. qdf_atomic_read(&rpm_ctx->pm_state), ret);
  876. hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
  877. }
  878. return ret;
  879. }
  880. /**
  881. * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
  882. * @hif_ctx: pointer of HIF context
  883. * @rtpm_dbgid: dbgid to trace who use it
  884. *
  885. * This API will do a runtime put operation followed by a sync suspend if usage
  886. * count is 0 so it can only be called in non-atomic context.
  887. *
  888. * Return: 0 for success otherwise an error code
  889. */
  890. int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
  891. wlan_rtpm_dbgid rtpm_dbgid)
  892. {
  893. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  894. struct device *dev;
  895. int usage_count;
  896. char *err = NULL;
  897. if (!scn)
  898. return -EINVAL;
  899. if (!hif_pci_pm_runtime_enabled(scn))
  900. return 0;
  901. dev = hif_bus_get_dev(scn);
  902. usage_count = atomic_read(&dev->power.usage_count);
  903. if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
  904. err = "Uexpected PUT when runtime PM is disabled";
  905. else if (usage_count == 0)
  906. err = "PUT without a GET Operation";
  907. if (err) {
  908. hif_pci_runtime_pm_warn(scn, err);
  909. return -EINVAL;
  910. }
  911. hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
  912. return pm_runtime_put_sync_suspend(dev);
  913. }
  914. /**
  915. * hif_pm_runtime_request_resume() - Invoke async runtime resume
  916. * @hif_ctx: hif context
  917. *
  918. * This function will invoke asynchronous runtime resume.
  919. *
  920. * Return: status
  921. */
  922. int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
  923. {
  924. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  925. struct hif_runtime_pm_ctx *rpm_ctx;
  926. int pm_state;
  927. if (!scn)
  928. return -EINVAL;
  929. if (!hif_pci_pm_runtime_enabled(scn))
  930. return 0;
  931. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  932. pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
  933. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  934. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  935. hif_info("Runtime PM resume is requested by %ps",
  936. (void *)_RET_IP_);
  937. rpm_ctx->pm_stats.request_resume++;
  938. rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
  939. return hif_pm_request_resume(hif_bus_get_dev(scn));
  940. }
  941. /**
  942. * hif_pm_runtime_mark_last_busy() - Mark last busy time
  943. * @hif_ctx: hif context
  944. *
  945. * This function will mark the last busy time, this will be used
  946. * to check if auto suspend delay expired or not.
  947. *
  948. * Return: void
  949. */
  950. void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
  951. {
  952. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  953. struct hif_runtime_pm_ctx *rpm_ctx;
  954. if (!scn)
  955. return;
  956. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  957. rpm_ctx->pm_stats.last_busy_marker = (void *)_RET_IP_;
  958. rpm_ctx->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
  959. pm_runtime_mark_last_busy(hif_bus_get_dev(scn));
  960. return;
  961. }
  962. /**
  963. * hif_pm_runtime_get_noresume() - Inc usage count without resume
  964. * @hif_ctx: hif context
  965. * rtpm_dbgid: Id of the module calling get
  966. *
  967. * This function will increment device usage count to avoid runtime
  968. * suspend, but it would not do resume.
  969. *
  970. * Return: void
  971. */
  972. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
  973. wlan_rtpm_dbgid rtpm_dbgid)
  974. {
  975. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  976. if (!scn)
  977. return;
  978. if (!hif_pci_pm_runtime_enabled(scn))
  979. return;
  980. hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
  981. pm_runtime_get_noresume(hif_bus_get_dev(scn));
  982. }
  983. /**
  984. * hif_pm_runtime_get() - do a get opperation on the device
  985. * @hif_ctx: pointer of HIF context
  986. * @rtpm_dbgid: dbgid to trace who use it
  987. *
  988. * A get opperation will prevent a runtime suspend until a
  989. * corresponding put is done. This api should be used when sending
  990. * data.
  991. *
  992. * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
  993. * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
  994. *
  995. * return: success if the bus is up and a get has been issued
  996. * otherwise an error code.
  997. */
  998. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
  999. wlan_rtpm_dbgid rtpm_dbgid)
  1000. {
  1001. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1002. struct hif_runtime_pm_ctx *rpm_ctx;
  1003. struct device *dev;
  1004. int ret;
  1005. int pm_state;
  1006. if (!scn) {
  1007. hif_err("Could not do runtime get, scn is null");
  1008. return -EFAULT;
  1009. }
  1010. if (!hif_pci_pm_runtime_enabled(scn))
  1011. return 0;
  1012. dev = hif_bus_get_dev(scn);
  1013. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1014. pm_state = qdf_atomic_read(&rpm_ctx->pm_state);
  1015. if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
  1016. pm_state == HIF_PM_RUNTIME_STATE_NONE) {
  1017. hif_pm_stats_runtime_get_record(scn, rtpm_dbgid);
  1018. ret = __hif_pm_runtime_get(dev);
  1019. /* Get can return 1 if the device is already active, just return
  1020. * success in that case
  1021. */
  1022. if (ret > 0)
  1023. ret = 0;
  1024. if (ret)
  1025. hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
  1026. if (ret && ret != -EINPROGRESS) {
  1027. rpm_ctx->pm_stats.runtime_get_err++;
  1028. hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
  1029. qdf_atomic_read(&rpm_ctx->pm_state), ret);
  1030. }
  1031. return ret;
  1032. }
  1033. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  1034. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
  1035. hif_info_high("Runtime PM resume is requested by %ps",
  1036. (void *)_RET_IP_);
  1037. ret = -EAGAIN;
  1038. } else {
  1039. ret = -EBUSY;
  1040. }
  1041. rpm_ctx->pm_stats.request_resume++;
  1042. rpm_ctx->pm_stats.last_resume_caller = (void *)_RET_IP_;
  1043. hif_pm_request_resume(dev);
  1044. return ret;
  1045. }
  1046. /**
  1047. * hif_pm_runtime_put() - do a put operation on the device
  1048. * @hif_ctx: pointer of HIF context
  1049. * @rtpm_dbgid: dbgid to trace who use it
  1050. *
  1051. * A put operation will allow a runtime suspend after a corresponding
  1052. * get was done. This api should be used when sending data.
  1053. *
  1054. * This api will return a failure if runtime pm is stopped
  1055. * This api will return failure if it would decrement the usage count below 0.
  1056. *
  1057. * return: QDF_STATUS_SUCCESS if the put is performed
  1058. */
  1059. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
  1060. wlan_rtpm_dbgid rtpm_dbgid)
  1061. {
  1062. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1063. struct device *dev;
  1064. int usage_count;
  1065. char *error = NULL;
  1066. if (!scn) {
  1067. hif_err("Could not do runtime put, scn is null");
  1068. return -EFAULT;
  1069. }
  1070. if (!hif_pci_pm_runtime_enabled(scn))
  1071. return 0;
  1072. dev = hif_bus_get_dev(scn);
  1073. usage_count = atomic_read(&dev->power.usage_count);
  1074. if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
  1075. error = "Unexpected PUT when runtime PM is disabled";
  1076. else if (usage_count == 0)
  1077. error = "PUT without a GET operation";
  1078. if (error) {
  1079. hif_pci_runtime_pm_warn(scn, error);
  1080. return -EINVAL;
  1081. }
  1082. hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
  1083. hif_pm_runtime_mark_last_busy(hif_ctx);
  1084. hif_pm_runtime_put_auto(dev);
  1085. return 0;
  1086. }
  1087. /**
  1088. * hif_pm_runtime_put_noidle() - do a put operation with no idle
  1089. * @hif_ctx: pointer of HIF context
  1090. * @rtpm_dbgid: dbgid to trace who use it
  1091. *
  1092. * This API will do a runtime put no idle operation
  1093. *
  1094. * Return: 0 for success otherwise an error code
  1095. */
  1096. int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
  1097. wlan_rtpm_dbgid rtpm_dbgid)
  1098. {
  1099. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1100. struct device *dev;
  1101. int usage_count;
  1102. char *err = NULL;
  1103. if (!scn)
  1104. return -EINVAL;
  1105. if (!hif_pci_pm_runtime_enabled(scn))
  1106. return 0;
  1107. dev = hif_bus_get_dev(scn);
  1108. usage_count = atomic_read(&dev->power.usage_count);
  1109. if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
  1110. err = "Unexpected PUT when runtime PM is disabled";
  1111. else if (usage_count == 0)
  1112. err = "PUT without a GET operation";
  1113. if (err) {
  1114. hif_pci_runtime_pm_warn(scn, err);
  1115. return -EINVAL;
  1116. }
  1117. hif_pm_stats_runtime_put_record(scn, rtpm_dbgid);
  1118. pm_runtime_put_noidle(dev);
  1119. return 0;
  1120. }
  1121. /**
  1122. * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
  1123. * reason
  1124. * @scn: hif context
  1125. * @lock: runtime_pm lock being acquired
  1126. *
  1127. * Return 0 if successful.
  1128. */
  1129. static int __hif_pm_runtime_prevent_suspend(struct hif_softc *scn,
  1130. struct hif_pm_runtime_lock *lock)
  1131. {
  1132. struct device *dev = hif_bus_get_dev(scn);
  1133. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1134. int ret = 0;
  1135. /*
  1136. * We shouldn't be setting context->timeout to zero here when
  1137. * context is active as we will have a case where Timeout API's
  1138. * for the same context called back to back.
  1139. * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
  1140. * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
  1141. * API to ensure the timeout version is no more active and
  1142. * list entry of this context will be deleted during allow suspend.
  1143. */
  1144. if (lock->active)
  1145. return 0;
  1146. ret = __hif_pm_runtime_get(dev);
  1147. /**
  1148. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  1149. * RPM_SUSPENDING. Any other negative value is an error.
  1150. * We shouldn't be do runtime_put here as in later point allow
  1151. * suspend gets called with the the context and there the usage count
  1152. * is decremented, so suspend will be prevented.
  1153. */
  1154. if (ret < 0 && ret != -EINPROGRESS) {
  1155. rpm_ctx->pm_stats.runtime_get_err++;
  1156. hif_pci_runtime_pm_warn(scn,
  1157. "Prevent Suspend Runtime PM Error");
  1158. }
  1159. rpm_ctx->prevent_suspend_cnt++;
  1160. lock->active = true;
  1161. list_add_tail(&lock->list, &rpm_ctx->prevent_suspend_list);
  1162. qdf_atomic_inc(&rpm_ctx->pm_stats.prevent_suspend);
  1163. hif_debug("%s: in pm_state:%s ret: %d", __func__,
  1164. hif_pm_runtime_state_to_string(
  1165. qdf_atomic_read(&rpm_ctx->pm_state)),
  1166. ret);
  1167. return ret;
  1168. }
  1169. /**
  1170. * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
  1171. * @scn: hif context
  1172. * @lock: runtime pm lock
  1173. *
  1174. * This function will allow runtime suspend, by decrementing
  1175. * device's usage count.
  1176. *
  1177. * Return: status
  1178. */
  1179. static int __hif_pm_runtime_allow_suspend(struct hif_softc *scn,
  1180. struct hif_pm_runtime_lock *lock)
  1181. {
  1182. struct device *dev = hif_bus_get_dev(scn);
  1183. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1184. int ret = 0;
  1185. int usage_count;
  1186. if (rpm_ctx->prevent_suspend_cnt == 0)
  1187. return ret;
  1188. if (!lock->active)
  1189. return ret;
  1190. usage_count = atomic_read(&dev->power.usage_count);
  1191. /*
  1192. * For runtime PM enabled case, the usage count should never be 0
  1193. * at this point. For runtime PM disabled case, it should never be
  1194. * 2 at this point. Catch unexpected PUT without GET here.
  1195. */
  1196. if ((usage_count == 2 && !scn->hif_config.enable_runtime_pm) ||
  1197. usage_count == 0) {
  1198. hif_pci_runtime_pm_warn(scn, "PUT without a GET Operation");
  1199. return -EINVAL;
  1200. }
  1201. list_del(&lock->list);
  1202. rpm_ctx->prevent_suspend_cnt--;
  1203. lock->active = false;
  1204. lock->timeout = 0;
  1205. hif_pm_runtime_mark_last_busy(GET_HIF_OPAQUE_HDL(scn));
  1206. ret = hif_pm_runtime_put_auto(dev);
  1207. hif_debug("%s: in pm_state:%s ret: %d", __func__,
  1208. hif_pm_runtime_state_to_string(
  1209. qdf_atomic_read(&rpm_ctx->pm_state)),
  1210. ret);
  1211. qdf_atomic_inc(&rpm_ctx->pm_stats.allow_suspend);
  1212. return ret;
  1213. }
  1214. /**
  1215. * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
  1216. * @data: calback data that is the pci context
  1217. *
  1218. * if runtime locks are acquired with a timeout, this function releases
  1219. * the locks when the last runtime lock expires.
  1220. *
  1221. * dummy implementation until lock acquisition is implemented.
  1222. */
  1223. static void hif_pm_runtime_lock_timeout_fn(void *data)
  1224. {
  1225. struct hif_softc *scn = data;
  1226. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1227. unsigned long timer_expires;
  1228. struct hif_pm_runtime_lock *context, *temp;
  1229. spin_lock_bh(&rpm_ctx->runtime_lock);
  1230. timer_expires = rpm_ctx->runtime_timer_expires;
  1231. /* Make sure we are not called too early, this should take care of
  1232. * following case
  1233. *
  1234. * CPU0 CPU1 (timeout function)
  1235. * ---- ----------------------
  1236. * spin_lock_irq
  1237. * timeout function called
  1238. *
  1239. * mod_timer()
  1240. *
  1241. * spin_unlock_irq
  1242. * spin_lock_irq
  1243. */
  1244. if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
  1245. rpm_ctx->runtime_timer_expires = 0;
  1246. list_for_each_entry_safe(context, temp,
  1247. &rpm_ctx->prevent_suspend_list, list) {
  1248. if (context->timeout) {
  1249. __hif_pm_runtime_allow_suspend(scn, context);
  1250. rpm_ctx->pm_stats.allow_suspend_timeout++;
  1251. }
  1252. }
  1253. }
  1254. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1255. }
  1256. /**
  1257. * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
  1258. * @scn: hif context
  1259. * @data: runtime pm lock
  1260. *
  1261. * This function will prevent runtime suspend, by incrementing
  1262. * device's usage count.
  1263. *
  1264. * Return: status
  1265. */
  1266. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  1267. struct hif_pm_runtime_lock *data)
  1268. {
  1269. struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
  1270. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1271. struct hif_pm_runtime_lock *context = data;
  1272. if (!scn->hif_config.enable_runtime_pm)
  1273. return 0;
  1274. if (!context)
  1275. return -EINVAL;
  1276. if (in_irq())
  1277. WARN_ON(1);
  1278. spin_lock_bh(&rpm_ctx->runtime_lock);
  1279. context->timeout = 0;
  1280. __hif_pm_runtime_prevent_suspend(scn, context);
  1281. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1282. return 0;
  1283. }
  1284. /**
  1285. * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
  1286. * @scn: hif context
  1287. * @data: runtime pm lock
  1288. *
  1289. * This function will allow runtime suspend, by decrementing
  1290. * device's usage count.
  1291. *
  1292. * Return: status
  1293. */
  1294. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  1295. struct hif_pm_runtime_lock *data)
  1296. {
  1297. struct hif_softc *scn = HIF_GET_SOFTC(ol_sc);
  1298. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1299. struct hif_pm_runtime_lock *context = data;
  1300. if (!scn->hif_config.enable_runtime_pm)
  1301. return 0;
  1302. if (!context)
  1303. return -EINVAL;
  1304. if (in_irq())
  1305. WARN_ON(1);
  1306. spin_lock_bh(&rpm_ctx->runtime_lock);
  1307. __hif_pm_runtime_allow_suspend(scn, context);
  1308. /* The list can be empty as well in cases where
  1309. * we have one context in the list and the allow
  1310. * suspend came before the timer expires and we delete
  1311. * context above from the list.
  1312. * When list is empty prevent_suspend count will be zero.
  1313. */
  1314. if (rpm_ctx->prevent_suspend_cnt == 0 &&
  1315. rpm_ctx->runtime_timer_expires > 0) {
  1316. qdf_timer_free(&rpm_ctx->runtime_timer);
  1317. rpm_ctx->runtime_timer_expires = 0;
  1318. }
  1319. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1320. return 0;
  1321. }
  1322. /**
  1323. * hif_runtime_lock_init() - API to initialize Runtime PM context
  1324. * @name: Context name
  1325. *
  1326. * This API initializes the Runtime PM context of the caller and
  1327. * return the pointer.
  1328. *
  1329. * Return: None
  1330. */
  1331. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
  1332. {
  1333. struct hif_pm_runtime_lock *context;
  1334. hif_info("Initializing Runtime PM wakelock %s", name);
  1335. context = qdf_mem_malloc(sizeof(*context));
  1336. if (!context)
  1337. return -ENOMEM;
  1338. context->name = name ? name : "Default";
  1339. lock->lock = context;
  1340. return 0;
  1341. }
  1342. /**
  1343. * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
  1344. * @data: Runtime PM context
  1345. *
  1346. * Return: void
  1347. */
  1348. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  1349. struct hif_pm_runtime_lock *data)
  1350. {
  1351. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1352. struct hif_runtime_pm_ctx *rpm_ctx;
  1353. struct hif_pm_runtime_lock *context = data;
  1354. if (!context) {
  1355. hif_err("Runtime PM wakelock context is NULL");
  1356. return;
  1357. }
  1358. hif_info("Deinitializing Runtime PM wakelock %s", context->name);
  1359. /*
  1360. * Ensure to delete the context list entry and reduce the usage count
  1361. * before freeing the context if context is active.
  1362. */
  1363. if (scn) {
  1364. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1365. spin_lock_bh(&rpm_ctx->runtime_lock);
  1366. __hif_pm_runtime_allow_suspend(scn, context);
  1367. spin_unlock_bh(&rpm_ctx->runtime_lock);
  1368. }
  1369. qdf_mem_free(context);
  1370. }
  1371. /**
  1372. * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
  1373. * @hif_ctx: HIF context
  1374. *
  1375. * Return: true for runtime suspended, otherwise false
  1376. */
  1377. bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
  1378. {
  1379. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1380. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1381. return qdf_atomic_read(&rpm_ctx->pm_state) ==
  1382. HIF_PM_RUNTIME_STATE_SUSPENDED;
  1383. }
  1384. /*
  1385. * hif_pm_runtime_suspend_lock() - spin_lock on marking runtime suspend
  1386. * @hif_ctx: HIF context
  1387. *
  1388. * Return: void
  1389. */
  1390. void hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx)
  1391. {
  1392. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1393. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1394. qdf_spin_lock_irqsave(&rpm_ctx->runtime_suspend_lock);
  1395. }
  1396. /*
  1397. * hif_pm_runtime_suspend_unlock() - spin_unlock on marking runtime suspend
  1398. * @hif_ctx: HIF context
  1399. *
  1400. * Return: void
  1401. */
  1402. void hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx)
  1403. {
  1404. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1405. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1406. qdf_spin_unlock_irqrestore(&rpm_ctx->runtime_suspend_lock);
  1407. }
  1408. /**
  1409. * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
  1410. * @hif_ctx: HIF context
  1411. *
  1412. * monitor_wake_intr variable can be used to indicate if driver expects wake
  1413. * MSI for runtime PM
  1414. *
  1415. * Return: monitor_wake_intr variable
  1416. */
  1417. int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
  1418. {
  1419. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1420. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1421. return qdf_atomic_read(&rpm_ctx->monitor_wake_intr);
  1422. }
  1423. /**
  1424. * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
  1425. * @hif_ctx: HIF context
  1426. * @val: value to set
  1427. *
  1428. * monitor_wake_intr variable can be used to indicate if driver expects wake
  1429. * MSI for runtime PM
  1430. *
  1431. * Return: void
  1432. */
  1433. void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
  1434. int val)
  1435. {
  1436. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1437. struct hif_runtime_pm_ctx *rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1438. qdf_atomic_set(&rpm_ctx->monitor_wake_intr, val);
  1439. }
  1440. /**
  1441. * hif_pm_runtime_check_and_request_resume() - check if the device is runtime
  1442. * suspended and request resume.
  1443. * @hif_ctx: HIF context
  1444. *
  1445. * This function is to check if the device is runtime suspended and
  1446. * request for runtime resume.
  1447. *
  1448. * Return: void
  1449. */
  1450. void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
  1451. {
  1452. hif_pm_runtime_suspend_lock(hif_ctx);
  1453. if (hif_pm_runtime_is_suspended(hif_ctx)) {
  1454. hif_pm_runtime_suspend_unlock(hif_ctx);
  1455. hif_pm_runtime_request_resume(hif_ctx);
  1456. } else {
  1457. hif_pm_runtime_suspend_unlock(hif_ctx);
  1458. }
  1459. }
  1460. /**
  1461. * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
  1462. * @hif_ctx: HIF context
  1463. *
  1464. * Return: void
  1465. */
  1466. void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  1467. {
  1468. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1469. struct hif_runtime_pm_ctx *rpm_ctx;
  1470. if (!scn)
  1471. return;
  1472. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1473. qdf_atomic_set(&rpm_ctx->pm_dp_rx_busy, 1);
  1474. rpm_ctx->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
  1475. hif_pm_runtime_mark_last_busy(hif_ctx);
  1476. }
  1477. /**
  1478. * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
  1479. * @hif_ctx: HIF context
  1480. *
  1481. * Return: dp rx busy set value
  1482. */
  1483. int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  1484. {
  1485. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1486. struct hif_runtime_pm_ctx *rpm_ctx;
  1487. if (!scn)
  1488. return 0;
  1489. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1490. return qdf_atomic_read(&rpm_ctx->pm_dp_rx_busy);
  1491. }
  1492. /**
  1493. * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
  1494. * @hif_ctx: HIF context
  1495. *
  1496. * Return: timestamp of last mark busy by dp rx
  1497. */
  1498. qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
  1499. {
  1500. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1501. struct hif_runtime_pm_ctx *rpm_ctx;
  1502. if (!scn)
  1503. return 0;
  1504. rpm_ctx = hif_bus_get_rpm_ctx(scn);
  1505. return rpm_ctx->dp_last_busy_timestamp;
  1506. }
  1507. #endif /* FEATURE_RUNTIME_PM */