hif_runtime_pm.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. /*
  2. * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/if_arp.h>
  19. #include "hif_io32.h"
  20. #include "hif_runtime_pm.h"
  21. #include "hif.h"
  22. #include "target_type.h"
  23. #include "hif_main.h"
  24. #include "ce_main.h"
  25. #include "ce_api.h"
  26. #include "ce_internal.h"
  27. #include "ce_reg.h"
  28. #include "ce_bmi.h"
  29. #include "regtable.h"
  30. #include "hif_hw_version.h"
  31. #include <linux/debugfs.h>
  32. #include <linux/seq_file.h>
  33. #include "qdf_status.h"
  34. #include "qdf_atomic.h"
  35. #include "pld_common.h"
  36. #include "mp_dev.h"
  37. #include "hif_debug.h"
  38. #include "ce_tasklet.h"
  39. #include "targaddrs.h"
  40. #include "hif_exec.h"
  41. #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
  42. #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
  43. #ifdef FEATURE_RUNTIME_PM
  44. static struct hif_rtpm_ctx g_hif_rtpm_ctx;
  45. static struct hif_rtpm_ctx *gp_hif_rtpm_ctx;
  46. /**
  47. * hif_rtpm_id_to_string() - Convert dbgid to respective string
  48. * @id - debug id
  49. *
  50. * Debug support function to convert dbgid to string.
  51. * Please note to add new string in the array at index equal to
  52. * its enum value in wlan_rtpm_dbgid.
  53. *
  54. * Return: String of ID
  55. */
  56. static const char *hif_rtpm_id_to_string(enum hif_rtpm_client_id id)
  57. {
  58. static const char * const strings[] = {
  59. "HIF_RTPM_ID_RESERVED",
  60. "HIF_RTPM_HAL_REO_CMD",
  61. "HIF_RTPM_WMI",
  62. "HIF_RTPM_HTT",
  63. "HIF_RTPM_DP",
  64. "HIF_RTPM_RING_STATS",
  65. "HIF_RTPM_CE",
  66. "HIF_RTPM_FORCE_WAKE",
  67. "HIF_RTPM_ID_PM_QOS_NOTIFY",
  68. "HIF_RTPM_ID_WIPHY_SUSPEND",
  69. "HIF_RTPM_ID_MAX"
  70. };
  71. return strings[id];
  72. }
  73. /**
  74. * hif_rtpm_read_usage_count() - Read device usage count
  75. * @dev: device structure
  76. *
  77. * Return: current usage count
  78. */
  79. static inline int hif_rtpm_read_usage_count(void)
  80. {
  81. return qdf_atomic_read(&gp_hif_rtpm_ctx->dev->power.usage_count);
  82. }
  83. #define HIF_RTPM_STATS(_s, _rtpm_ctx, _name) \
  84. seq_printf(_s, "%30s: %u\n", #_name, (_rtpm_ctx)->stats._name)
  85. /**
  86. * hif_rtpm_debugfs_show(): show debug stats for runtimepm
  87. * @s: file to print to
  88. * @data: unused
  89. *
  90. * debugging tool added to the debug fs for displaying runtimepm stats
  91. *
  92. * Return: 0
  93. */
  94. static int hif_rtpm_debugfs_show(struct seq_file *s, void *data)
  95. {
  96. struct hif_rtpm_client *client = NULL;
  97. struct hif_pm_runtime_lock *ctx;
  98. static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
  99. "RESUMING_LINKUP", "SUSPENDING", "SUSPENDED"};
  100. int pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  101. int i;
  102. seq_printf(s, "%30s: %llu\n", "Current timestamp",
  103. qdf_get_log_timestamp());
  104. seq_printf(s, "%30s: %s\n", "Runtime PM state", autopm_state[pm_state]);
  105. seq_printf(s, "%30s: %llu\n", "Last Busy timestamp",
  106. gp_hif_rtpm_ctx->stats.last_busy_ts);
  107. seq_printf(s, "%30s: %ps\n", "Last Busy Marker",
  108. gp_hif_rtpm_ctx->stats.last_busy_marker);
  109. seq_puts(s, "Rx busy marker counts:\n");
  110. seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_DP),
  111. gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_cnt,
  112. gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_ts);
  113. seq_printf(s, "%30s: %u %llu\n", hif_rtpm_id_to_string(HIF_RTPM_ID_CE),
  114. gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_cnt,
  115. gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_ts);
  116. HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, last_busy_id);
  117. if (pm_state == HIF_RTPM_STATE_SUSPENDED) {
  118. seq_printf(s, "%30s: %llx us\n", "Suspended Since",
  119. gp_hif_rtpm_ctx->stats.suspend_ts);
  120. }
  121. HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, resume_count);
  122. HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_count);
  123. HIF_RTPM_STATS(s, gp_hif_rtpm_ctx, suspend_err_count);
  124. seq_printf(s, "%30s: %d\n", "PM Usage count",
  125. hif_rtpm_read_usage_count());
  126. seq_puts(s, "get put get-timestamp put-timestamp :DBGID_NAME\n");
  127. for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
  128. client = gp_hif_rtpm_ctx->clients[i];
  129. if (!client)
  130. continue;
  131. seq_printf(s, "%-10d ", qdf_atomic_read(&client->get_count));
  132. seq_printf(s, "%-10d ", qdf_atomic_read(&client->put_count));
  133. seq_printf(s, "0x%-10llx ", client->get_ts);
  134. seq_printf(s, "0x%-10llx ", client->put_ts);
  135. seq_printf(s, ":%-2d %-30s\n", i, hif_rtpm_id_to_string(i));
  136. }
  137. seq_puts(s, "\n");
  138. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  139. if (list_empty(&gp_hif_rtpm_ctx->prevent_list)) {
  140. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  141. return 0;
  142. }
  143. seq_printf(s, "%30s: ", "Active Wakeup_Sources");
  144. list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list) {
  145. seq_printf(s, "%s", ctx->name);
  146. seq_puts(s, " ");
  147. }
  148. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  149. return 0;
  150. }
  151. #undef HIF_RTPM_STATS
  152. /**
  153. * hif_rtpm_debugfs_open() - open a debug fs file to access the runtime pm stats
  154. * @inode
  155. * @file
  156. *
  157. * Return: linux error code of single_open.
  158. */
  159. static int hif_rtpm_debugfs_open(struct inode *inode, struct file *file)
  160. {
  161. return single_open(file, hif_rtpm_debugfs_show,
  162. inode->i_private);
  163. }
  164. static const struct file_operations hif_rtpm_fops = {
  165. .owner = THIS_MODULE,
  166. .open = hif_rtpm_debugfs_open,
  167. .release = single_release,
  168. .read = seq_read,
  169. .llseek = seq_lseek,
  170. };
  171. /**
  172. * hif_rtpm_debugfs_create() - creates runtimepm debugfs entry
  173. * @scn: hif context
  174. *
  175. * creates a debugfs entry to debug the runtime pm feature.
  176. */
  177. static void hif_rtpm_debugfs_create(void)
  178. {
  179. gp_hif_rtpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
  180. CNSS_RUNTIME_FILE_PERM,
  181. NULL,
  182. NULL,
  183. &hif_rtpm_fops);
  184. }
  185. /**
  186. * hif_rtpm_debugfs_remove() - removes runtimepm debugfs entry
  187. * @scn: pci context
  188. *
  189. * removes the debugfs entry to debug the runtime pm feature.
  190. */
  191. static void hif_rtpm_debugfs_remove(void)
  192. {
  193. qdf_debugfs_remove_file(gp_hif_rtpm_ctx->pm_dentry);
  194. }
  195. /**
  196. * hif_rtpm_init() - Initialize Runtime PM
  197. * @dev: device structure
  198. * @delay: delay to be confgured for auto suspend
  199. *
  200. * This function will init all the Runtime PM config.
  201. *
  202. * Return: void
  203. */
  204. static void hif_rtpm_init(struct device *dev, int delay)
  205. {
  206. pm_runtime_set_autosuspend_delay(dev, delay);
  207. pm_runtime_use_autosuspend(dev);
  208. pm_runtime_allow(dev);
  209. pm_runtime_mark_last_busy(dev);
  210. pm_runtime_put_noidle(dev);
  211. pm_suspend_ignore_children(dev, true);
  212. }
  213. /**
  214. * hif_rtpm_exit() - Deinit/Exit Runtime PM
  215. * @dev: device structure
  216. *
  217. * This function will deinit all the Runtime PM config.
  218. *
  219. * Return: void
  220. */
  221. static void hif_rtpm_exit(struct device *dev)
  222. {
  223. pm_runtime_get_noresume(dev);
  224. pm_runtime_set_active(dev);
  225. pm_runtime_forbid(dev);
  226. }
  227. void hif_rtpm_open(struct hif_softc *scn)
  228. {
  229. gp_hif_rtpm_ctx = &g_hif_rtpm_ctx;
  230. gp_hif_rtpm_ctx->dev = scn->qdf_dev->dev;
  231. qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_lock);
  232. qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_suspend_lock);
  233. qdf_spinlock_create(&gp_hif_rtpm_ctx->prevent_list_lock);
  234. qdf_atomic_init(&gp_hif_rtpm_ctx->pm_state);
  235. qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
  236. qdf_atomic_init(&gp_hif_rtpm_ctx->monitor_wake_intr);
  237. INIT_LIST_HEAD(&gp_hif_rtpm_ctx->prevent_list);
  238. gp_hif_rtpm_ctx->client_count = 0;
  239. gp_hif_rtpm_ctx->pending_job = 0;
  240. hif_rtpm_register(HIF_RTPM_ID_CE, NULL);
  241. hif_rtpm_register(HIF_RTPM_ID_FORCE_WAKE, NULL);
  242. hif_info_high("Runtime PM attached");
  243. }
  244. static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock);
  245. /**
  246. * hif_rtpm_sanitize_exit(): sanitize runtime PM gets/puts from driver
  247. *
  248. * Ensure all gets/puts are in sync before exiting runtime PM feature.
  249. * Also make sure all runtime PM locks are deinitialized properly.
  250. *
  251. * Return: void
  252. */
  253. static void hif_rtpm_sanitize_exit(void)
  254. {
  255. struct hif_pm_runtime_lock *ctx, *tmp;
  256. struct hif_rtpm_client *client;
  257. int i, active_count;
  258. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  259. list_for_each_entry_safe(ctx, tmp,
  260. &gp_hif_rtpm_ctx->prevent_list, list) {
  261. hif_runtime_lock_deinit(ctx);
  262. }
  263. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  264. /* check if get and put out of sync for all clients */
  265. for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
  266. client = gp_hif_rtpm_ctx->clients[i];
  267. if (client) {
  268. if (qdf_atomic_read(&client->active_count)) {
  269. active_count =
  270. qdf_atomic_read(&client->active_count);
  271. hif_err("Client active: %u- %s", i,
  272. hif_rtpm_id_to_string(i));
  273. QDF_DEBUG_PANIC("Client active on exit!");
  274. while (active_count--)
  275. __hif_rtpm_put_noidle(
  276. gp_hif_rtpm_ctx->dev);
  277. }
  278. QDF_DEBUG_PANIC("Client not deinitialized");
  279. qdf_mem_free(client);
  280. gp_hif_rtpm_ctx->clients[i] = NULL;
  281. }
  282. }
  283. }
  284. /**
  285. * hif_rtpm_sanitize_on_ssr_exit() - Empty the suspend list on SSR
  286. *
  287. * API is used to empty the runtime pm prevent suspend list.
  288. *
  289. * Return: void
  290. */
  291. static void hif_rtpm_sanitize_ssr_exit(void)
  292. {
  293. struct hif_pm_runtime_lock *ctx, *tmp;
  294. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  295. list_for_each_entry_safe(ctx, tmp,
  296. &gp_hif_rtpm_ctx->prevent_list, list) {
  297. __hif_pm_runtime_allow_suspend(ctx);
  298. }
  299. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  300. }
  301. void hif_rtpm_close(struct hif_softc *scn)
  302. {
  303. hif_rtpm_deregister(HIF_RTPM_ID_CE);
  304. hif_rtpm_deregister(HIF_RTPM_ID_FORCE_WAKE);
  305. hif_is_recovery_in_progress(scn) ?
  306. hif_rtpm_sanitize_ssr_exit() :
  307. hif_rtpm_sanitize_exit();
  308. qdf_mem_set(gp_hif_rtpm_ctx, sizeof(*gp_hif_rtpm_ctx), 0);
  309. gp_hif_rtpm_ctx = NULL;
  310. hif_info_high("Runtime PM context detached");
  311. }
  312. void hif_rtpm_start(struct hif_softc *scn)
  313. {
  314. uint32_t mode = hif_get_conparam(scn);
  315. gp_hif_rtpm_ctx->enable_rpm = scn->hif_config.enable_runtime_pm;
  316. if (!gp_hif_rtpm_ctx->enable_rpm) {
  317. hif_info_high("RUNTIME PM is disabled in ini");
  318. return;
  319. }
  320. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  321. mode == QDF_GLOBAL_MONITOR_MODE) {
  322. hif_info("RUNTIME PM is disabled for FTM/EPPING/MONITOR mode");
  323. return;
  324. }
  325. hif_info_high("Enabling RUNTIME PM, Delay: %d ms",
  326. scn->hif_config.runtime_pm_delay);
  327. qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_ON);
  328. hif_rtpm_init(gp_hif_rtpm_ctx->dev, scn->hif_config.runtime_pm_delay);
  329. hif_rtpm_debugfs_create();
  330. }
  331. void hif_rtpm_stop(struct hif_softc *scn)
  332. {
  333. uint32_t mode = hif_get_conparam(scn);
  334. if (!gp_hif_rtpm_ctx->enable_rpm)
  335. return;
  336. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  337. mode == QDF_GLOBAL_MONITOR_MODE)
  338. return;
  339. hif_rtpm_exit(gp_hif_rtpm_ctx->dev);
  340. hif_rtpm_sync_resume();
  341. qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
  342. hif_rtpm_debugfs_remove();
  343. }
  344. QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rtpm_cbk)(void))
  345. {
  346. struct hif_rtpm_client *client;
  347. if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
  348. hif_err("Runtime PM context NULL");
  349. return QDF_STATUS_E_FAILURE;
  350. }
  351. if (id >= HIF_RTPM_ID_MAX || gp_hif_rtpm_ctx->clients[id]) {
  352. hif_err("Invalid client %d", id);
  353. return QDF_STATUS_E_INVAL;
  354. }
  355. client = qdf_mem_malloc(sizeof(struct hif_rtpm_client));
  356. if (!client)
  357. return QDF_STATUS_E_NOMEM;
  358. client->hif_rtpm_cbk = hif_rtpm_cbk;
  359. qdf_atomic_init(&client->active_count);
  360. qdf_atomic_init(&client->get_count);
  361. qdf_atomic_init(&client->put_count);
  362. gp_hif_rtpm_ctx->clients[id] = client;
  363. gp_hif_rtpm_ctx->client_count++;
  364. return QDF_STATUS_SUCCESS;
  365. }
  366. QDF_STATUS hif_rtpm_deregister(uint32_t id)
  367. {
  368. struct hif_rtpm_client *client;
  369. int active_count;
  370. if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
  371. hif_err("Runtime PM context NULL");
  372. return QDF_STATUS_E_FAILURE;
  373. }
  374. if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
  375. hif_err("invalid client, id: %u", id);
  376. return QDF_STATUS_E_INVAL;
  377. }
  378. client = gp_hif_rtpm_ctx->clients[id];
  379. if (qdf_atomic_read(&client->active_count)) {
  380. active_count = qdf_atomic_read(&client->active_count);
  381. hif_err("Client: %u-%s Runtime PM active",
  382. id, hif_rtpm_id_to_string(id));
  383. hif_err("last get called: 0x%llx, get count: %d, put count: %d",
  384. client->get_ts, qdf_atomic_read(&client->get_count),
  385. qdf_atomic_read(&client->put_count));
  386. QDF_DEBUG_PANIC("Get and PUT call out of sync!");
  387. while (active_count--)
  388. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  389. }
  390. qdf_mem_free(client);
  391. gp_hif_rtpm_ctx->clients[id] = NULL;
  392. return QDF_STATUS_SUCCESS;
  393. }
  394. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
  395. {
  396. struct hif_pm_runtime_lock *context;
  397. if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
  398. hif_err("Runtime PM context NULL");
  399. return QDF_STATUS_E_FAILURE;
  400. }
  401. hif_debug("Initializing Runtime PM wakelock %s", name);
  402. context = qdf_mem_malloc(sizeof(*context));
  403. if (!context)
  404. return -ENOMEM;
  405. context->name = name ? name : "Default";
  406. lock->lock = context;
  407. return 0;
  408. }
  409. void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
  410. {
  411. if (!lock) {
  412. hif_err("Runtime PM lock already freed");
  413. return;
  414. }
  415. hif_debug("Deinitializing Runtime PM wakelock %s", lock->name);
  416. if (gp_hif_rtpm_ctx) {
  417. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  418. __hif_pm_runtime_allow_suspend(lock);
  419. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  420. }
  421. qdf_mem_free(lock);
  422. }
  423. /**
  424. * hif_rtpm_enabled() - To check if Runtime PM is enabled
  425. *
  426. * This function will check if Runtime PM is enabled or not.
  427. *
  428. * Return: void
  429. */
  430. static bool hif_rtpm_enabled(void)
  431. {
  432. if (qdf_unlikely(!gp_hif_rtpm_ctx))
  433. return false;
  434. if (gp_hif_rtpm_ctx->enable_rpm)
  435. return true;
  436. return __hif_rtpm_enabled(gp_hif_rtpm_ctx->dev);
  437. }
  438. QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id)
  439. {
  440. struct hif_rtpm_client *client = NULL;
  441. int ret = QDF_STATUS_E_FAILURE;
  442. int pm_state;
  443. if (!hif_rtpm_enabled())
  444. return QDF_STATUS_SUCCESS;
  445. if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
  446. QDF_DEBUG_PANIC("Invalid client, id: %u", id);
  447. return -QDF_STATUS_E_INVAL;
  448. }
  449. client = gp_hif_rtpm_ctx->clients[id];
  450. if (type != HIF_RTPM_GET_ASYNC) {
  451. switch (type) {
  452. case HIF_RTPM_GET_FORCE:
  453. ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
  454. break;
  455. case HIF_RTPM_GET_SYNC:
  456. ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
  457. break;
  458. case HIF_RTPM_GET_NORESUME:
  459. __hif_rtpm_get_noresume(gp_hif_rtpm_ctx->dev);
  460. ret = 0;
  461. break;
  462. default:
  463. QDF_DEBUG_PANIC("Invalid call type");
  464. return QDF_STATUS_E_BADMSG;
  465. }
  466. if (ret < 0 && ret != -EINPROGRESS) {
  467. hif_err("pm_state: %d ret: %d",
  468. qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
  469. ret);
  470. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  471. } else {
  472. ret = QDF_STATUS_SUCCESS;
  473. }
  474. goto out;
  475. }
  476. pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  477. if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP) {
  478. ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
  479. /* Get will return 1 if the device is already active,
  480. * just return success in that case
  481. */
  482. if (ret > 0) {
  483. ret = QDF_STATUS_SUCCESS;
  484. } else if (ret == 0 || ret == -EINPROGRESS) {
  485. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  486. pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  487. if (pm_state >= HIF_RTPM_STATE_RESUMING) {
  488. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  489. gp_hif_rtpm_ctx->stats.request_resume_ts =
  490. qdf_get_log_timestamp();
  491. gp_hif_rtpm_ctx->stats.request_resume_id = id;
  492. ret = QDF_STATUS_E_FAILURE;
  493. } else {
  494. ret = QDF_STATUS_SUCCESS;
  495. }
  496. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  497. } else if (ret < 0) {
  498. hif_err("pm_state: %d ret: %d",
  499. qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
  500. ret);
  501. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  502. }
  503. } else if (pm_state >= HIF_RTPM_STATE_RESUMING) {
  504. /* Do not log in performance path */
  505. if (id != HIF_RTPM_ID_DP)
  506. hif_info_high("request RTPM resume by %d- %s",
  507. id, hif_rtpm_id_to_string(id));
  508. __hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
  509. gp_hif_rtpm_ctx->stats.request_resume_ts =
  510. qdf_get_log_timestamp();
  511. gp_hif_rtpm_ctx->stats.request_resume_id = id;
  512. return QDF_STATUS_E_FAILURE;
  513. }
  514. out:
  515. if (QDF_IS_STATUS_SUCCESS(ret)) {
  516. qdf_atomic_inc(&client->active_count);
  517. qdf_atomic_inc(&client->get_count);
  518. client->get_ts = qdf_get_log_timestamp();
  519. }
  520. return ret;
  521. }
  522. QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
  523. {
  524. struct hif_rtpm_client *client;
  525. int usage_count;
  526. if (!hif_rtpm_enabled())
  527. return QDF_STATUS_SUCCESS;
  528. if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
  529. hif_err("Invalid client, id: %u", id);
  530. return QDF_STATUS_E_INVAL;
  531. }
  532. client = gp_hif_rtpm_ctx->clients[id];
  533. usage_count = hif_rtpm_read_usage_count();
  534. if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
  535. hif_err("Unexpected PUT when runtime PM is disabled");
  536. QDF_BUG(0);
  537. return QDF_STATUS_E_CANCELED;
  538. } else if (!usage_count || !qdf_atomic_read(&client->active_count)) {
  539. hif_info_high("Put without a Get operation, %u-%s",
  540. id, hif_rtpm_id_to_string(id));
  541. return QDF_STATUS_E_CANCELED;
  542. }
  543. switch (type) {
  544. case HIF_RTPM_PUT_ASYNC:
  545. __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
  546. break;
  547. case HIF_RTPM_PUT_NOIDLE:
  548. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  549. break;
  550. case HIF_RTPM_PUT_SYNC_SUSPEND:
  551. __hif_rtpm_put_sync_suspend(gp_hif_rtpm_ctx->dev);
  552. break;
  553. default:
  554. QDF_DEBUG_PANIC("Invalid call type");
  555. return QDF_STATUS_E_BADMSG;
  556. }
  557. __hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
  558. qdf_atomic_dec(&client->active_count);
  559. qdf_atomic_inc(&client->put_count);
  560. client->put_ts = qdf_get_log_timestamp();
  561. gp_hif_rtpm_ctx->stats.last_busy_ts = client->put_ts;
  562. return QDF_STATUS_SUCCESS;
  563. }
  564. /**
  565. * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
  566. * reason
  567. * @lock: runtime_pm lock being acquired
  568. *
  569. * Return: 0 if successful.
  570. */
  571. static int __hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
  572. {
  573. int ret = 0;
  574. if (lock->active)
  575. return 0;
  576. ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
  577. /**
  578. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  579. * RPM_SUSPENDING. Any other negative value is an error.
  580. * We shouldn't do runtime_put here as in later point allow
  581. * suspend gets called with the context and there the usage count
  582. * is decremented, so suspend will be prevented.
  583. */
  584. if (ret < 0 && ret != -EINPROGRESS) {
  585. gp_hif_rtpm_ctx->stats.runtime_get_err++;
  586. hif_err("pm_state: %d ret: %d",
  587. qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
  588. ret);
  589. }
  590. list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
  591. lock->active = true;
  592. gp_hif_rtpm_ctx->prevent_cnt++;
  593. gp_hif_rtpm_ctx->stats.prevent_suspend++;
  594. return ret;
  595. }
  596. /**
  597. * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
  598. * @lock: runtime pm lock
  599. *
  600. * This function will allow runtime suspend, by decrementing
  601. * device's usage count.
  602. *
  603. * Return: status
  604. */
  605. static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
  606. {
  607. int ret = 0;
  608. int usage_count;
  609. if (gp_hif_rtpm_ctx->prevent_cnt == 0 || !lock->active)
  610. return ret;
  611. usage_count = hif_rtpm_read_usage_count();
  612. /*
  613. * For runtime PM enabled case, the usage count should never be 0
  614. * at this point. For runtime PM disabled case, it should never be
  615. * 2 at this point. Catch unexpected PUT without GET here.
  616. */
  617. if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
  618. hif_err("Unexpected PUT when runtime PM is disabled");
  619. QDF_BUG(0);
  620. return QDF_STATUS_E_CANCELED;
  621. } else if (!usage_count) {
  622. hif_info_high("Put without a Get operation, %s", lock->name);
  623. return QDF_STATUS_E_CANCELED;
  624. }
  625. hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
  626. ret = __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
  627. list_del(&lock->list);
  628. lock->active = false;
  629. gp_hif_rtpm_ctx->prevent_cnt--;
  630. gp_hif_rtpm_ctx->stats.allow_suspend++;
  631. return ret;
  632. }
  633. int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
  634. {
  635. if (!hif_rtpm_enabled() || !lock)
  636. return -EINVAL;
  637. if (in_irq())
  638. WARN_ON(1);
  639. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  640. __hif_pm_runtime_prevent_suspend(lock);
  641. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  642. if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
  643. HIF_RTPM_STATE_SUSPENDING)
  644. hif_info_high("request RTPM resume by %s",
  645. lock->name);
  646. return 0;
  647. }
  648. /**
  649. * __hif_pm_runtime_prevent_suspend_sync() - synchronized prevent runtime
  650. * suspend for a protocol reason
  651. * @lock: runtime_pm lock being acquired
  652. *
  653. * Return: 0 if successful.
  654. */
  655. static
  656. int __hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
  657. {
  658. int ret = 0;
  659. if (lock->active)
  660. return 0;
  661. ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
  662. /**
  663. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  664. * RPM_SUSPENDING. Any other negative value is an error.
  665. * We shouldn't do runtime_put here as in later point allow
  666. * suspend gets called with the context and there the usage count
  667. * is decremented, so suspend will be prevented.
  668. */
  669. if (ret < 0 && ret != -EINPROGRESS) {
  670. gp_hif_rtpm_ctx->stats.runtime_get_err++;
  671. hif_err("pm_state: %d ret: %d",
  672. qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
  673. ret);
  674. }
  675. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  676. list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
  677. lock->active = true;
  678. gp_hif_rtpm_ctx->prevent_cnt++;
  679. gp_hif_rtpm_ctx->stats.prevent_suspend++;
  680. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  681. return ret;
  682. }
  683. int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
  684. {
  685. if (!hif_rtpm_enabled())
  686. return 0;
  687. if (!lock)
  688. return -EINVAL;
  689. if (in_irq())
  690. WARN_ON(1);
  691. __hif_pm_runtime_prevent_suspend_sync(lock);
  692. if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
  693. HIF_RTPM_STATE_SUSPENDING)
  694. hif_info_high("request RTPM resume by %s",
  695. lock->name);
  696. return 0;
  697. }
  698. int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
  699. {
  700. if (!hif_rtpm_enabled())
  701. return 0;
  702. if (!lock)
  703. return -EINVAL;
  704. if (in_irq())
  705. WARN_ON(1);
  706. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  707. __hif_pm_runtime_allow_suspend(lock);
  708. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  709. return 0;
  710. }
  711. QDF_STATUS hif_rtpm_sync_resume(void)
  712. {
  713. struct device *dev;
  714. int pm_state;
  715. int ret;
  716. if (!hif_rtpm_enabled())
  717. return 0;
  718. dev = gp_hif_rtpm_ctx->dev;
  719. pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  720. ret = __hif_rtpm_resume(dev);
  721. __hif_rtpm_mark_last_busy(dev);
  722. if (ret >= 0) {
  723. gp_hif_rtpm_ctx->stats.resume_count++;
  724. gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
  725. gp_hif_rtpm_ctx->stats.last_busy_ts =
  726. gp_hif_rtpm_ctx->stats.resume_ts;
  727. return QDF_STATUS_SUCCESS;
  728. }
  729. hif_err("pm_state: %d, err: %d", pm_state, ret);
  730. return QDF_STATUS_E_FAILURE;
  731. }
  732. void hif_rtpm_request_resume(void)
  733. {
  734. __hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
  735. hif_info_high("request RTPM resume %s", (char *)_RET_IP_);
  736. }
  737. void hif_rtpm_check_and_request_resume(void)
  738. {
  739. hif_rtpm_suspend_lock();
  740. if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
  741. HIF_RTPM_STATE_SUSPENDING) {
  742. hif_rtpm_suspend_unlock();
  743. __hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
  744. gp_hif_rtpm_ctx->stats.request_resume_ts =
  745. qdf_get_log_timestamp();
  746. gp_hif_rtpm_ctx->stats.request_resume_id = HIF_RTPM_ID_RESERVED;
  747. } else {
  748. __hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
  749. gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
  750. hif_rtpm_suspend_unlock();
  751. }
  752. }
  753. int hif_rtpm_get_monitor_wake_intr(void)
  754. {
  755. return qdf_atomic_read(&gp_hif_rtpm_ctx->monitor_wake_intr);
  756. }
  757. void hif_rtpm_set_monitor_wake_intr(int val)
  758. {
  759. qdf_atomic_set(&gp_hif_rtpm_ctx->monitor_wake_intr, val);
  760. }
  761. void hif_rtpm_mark_last_busy(uint32_t id)
  762. {
  763. __hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
  764. gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
  765. gp_hif_rtpm_ctx->stats.last_busy_id = id;
  766. gp_hif_rtpm_ctx->stats.last_busy_marker = (void *)_RET_IP_;
  767. if (gp_hif_rtpm_ctx->clients[id]) {
  768. gp_hif_rtpm_ctx->clients[id]->last_busy_cnt++;
  769. gp_hif_rtpm_ctx->clients[id]->last_busy_ts =
  770. gp_hif_rtpm_ctx->stats.last_busy_ts;
  771. }
  772. }
  773. void hif_rtpm_set_client_job(uint32_t client_id)
  774. {
  775. int pm_state;
  776. if (!gp_hif_rtpm_ctx->clients[client_id])
  777. return;
  778. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  779. pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  780. if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP &&
  781. gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk)
  782. gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk();
  783. else
  784. qdf_set_bit(client_id, &gp_hif_rtpm_ctx->pending_job);
  785. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  786. }
  787. /**
  788. * hif_rtpm_pending_job() - continue jobs when bus resumed
  789. *
  790. * Return: Void
  791. */
  792. static void hif_rtpm_pending_job(void)
  793. {
  794. int i;
  795. for (i = 0; i < gp_hif_rtpm_ctx->client_count; i++) {
  796. if (qdf_test_and_clear_bit(i, &gp_hif_rtpm_ctx->pending_job)) {
  797. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  798. if (gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk)
  799. gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk();
  800. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  801. }
  802. }
  803. }
  804. #define PREVENT_LIST_STRING_LEN 200
  805. void hif_rtpm_print_prevent_list(void)
  806. {
  807. struct hif_rtpm_client *client;
  808. struct hif_pm_runtime_lock *ctx;
  809. char *str_buf;
  810. int i, prevent_list_count, len = 0;
  811. str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
  812. if (!str_buf)
  813. return;
  814. qdf_spin_lock(&gp_hif_rtpm_ctx->prevent_list_lock);
  815. prevent_list_count = gp_hif_rtpm_ctx->prevent_cnt;
  816. if (prevent_list_count) {
  817. list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list)
  818. len += qdf_scnprintf(str_buf + len,
  819. PREVENT_LIST_STRING_LEN - len,
  820. "%s ", ctx->name);
  821. }
  822. qdf_spin_unlock(&gp_hif_rtpm_ctx->prevent_list_lock);
  823. if (prevent_list_count)
  824. hif_info_high("prevent_suspend_cnt %u, prevent_list: %s",
  825. prevent_list_count, str_buf);
  826. qdf_mem_free(str_buf);
  827. for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
  828. client = gp_hif_rtpm_ctx->clients[i];
  829. if (client && qdf_atomic_read(&client->active_count))
  830. hif_info_high("client: %d: %s- active count: %d", i,
  831. hif_rtpm_id_to_string(i),
  832. qdf_atomic_read(&client->active_count));
  833. }
  834. }
  835. /**
  836. * hif_rtpm_is_suspend_allowed() - Reject suspend if client is active
  837. *
  838. * Return: True if no clients are active
  839. */
  840. static bool hif_rtpm_is_suspend_allowed(void)
  841. {
  842. if (!gp_hif_rtpm_ctx || !gp_hif_rtpm_ctx->enable_rpm)
  843. return false;
  844. if (!hif_rtpm_read_usage_count())
  845. return true;
  846. return false;
  847. }
  848. void hif_rtpm_suspend_lock(void)
  849. {
  850. qdf_spin_lock_irqsave(&gp_hif_rtpm_ctx->runtime_suspend_lock);
  851. }
  852. void hif_rtpm_suspend_unlock(void)
  853. {
  854. qdf_spin_unlock_irqrestore(&gp_hif_rtpm_ctx->runtime_suspend_lock);
  855. }
  856. /**
  857. * hif_rtpm_set_state(): utility function
  858. * @state: state to set
  859. *
  860. * Return: Void
  861. */
  862. static inline
  863. void hif_rtpm_set_state(enum hif_rtpm_state state)
  864. {
  865. qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, state);
  866. }
  867. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  868. {
  869. if (!hif_can_suspend_link(hif_ctx)) {
  870. hif_err("Runtime PM not supported for link up suspend");
  871. return -EINVAL;
  872. }
  873. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  874. hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDING);
  875. /* keep this after set suspending */
  876. if (!hif_rtpm_is_suspend_allowed()) {
  877. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  878. hif_rtpm_print_prevent_list();
  879. gp_hif_rtpm_ctx->stats.suspend_err_count++;
  880. gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
  881. hif_info_high("Runtime PM not allowed now");
  882. return -EINVAL;
  883. }
  884. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  885. return QDF_STATUS_SUCCESS;
  886. }
  887. void hif_process_runtime_suspend_success(void)
  888. {
  889. hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDED);
  890. gp_hif_rtpm_ctx->stats.suspend_count++;
  891. gp_hif_rtpm_ctx->stats.suspend_ts = qdf_get_log_timestamp();
  892. }
  893. void hif_process_runtime_suspend_failure(void)
  894. {
  895. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  896. hif_rtpm_set_state(HIF_RTPM_STATE_ON);
  897. hif_rtpm_pending_job();
  898. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  899. gp_hif_rtpm_ctx->stats.suspend_err_count++;
  900. gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
  901. gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
  902. hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
  903. }
  904. void hif_pre_runtime_resume(void)
  905. {
  906. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  907. hif_rtpm_set_monitor_wake_intr(0);
  908. hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING);
  909. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  910. }
  911. void hif_process_runtime_resume_linkup(void)
  912. {
  913. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  914. hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING_LINKUP);
  915. hif_rtpm_pending_job();
  916. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  917. }
  918. void hif_process_runtime_resume_success(void)
  919. {
  920. hif_rtpm_set_state(HIF_RTPM_STATE_ON);
  921. gp_hif_rtpm_ctx->stats.resume_count++;
  922. gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
  923. gp_hif_rtpm_ctx->stats.last_busy_ts = gp_hif_rtpm_ctx->stats.resume_ts;
  924. hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
  925. }
  926. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  927. {
  928. int errno;
  929. errno = hif_bus_suspend(hif_ctx);
  930. if (errno) {
  931. hif_err("Failed bus suspend: %d", errno);
  932. return errno;
  933. }
  934. hif_rtpm_set_monitor_wake_intr(1);
  935. errno = hif_bus_suspend_noirq(hif_ctx);
  936. if (errno) {
  937. hif_err("Failed bus suspend noirq: %d", errno);
  938. hif_rtpm_set_monitor_wake_intr(0);
  939. goto bus_resume;
  940. }
  941. return 0;
  942. bus_resume:
  943. QDF_BUG(!hif_bus_resume(hif_ctx));
  944. return errno;
  945. }
  946. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
  947. {
  948. int errno;
  949. QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
  950. errno = hif_bus_resume(hif_ctx);
  951. if (errno)
  952. hif_err("Failed runtime resume: %d", errno);
  953. return errno;
  954. }
  955. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
  956. {
  957. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  958. struct CE_state *ce_state;
  959. if (!scn)
  960. return;
  961. if (scn->fastpath_mode_on) {
  962. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  963. return;
  964. ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
  965. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  966. /*war_ce_src_ring_write_idx_set */
  967. CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
  968. ce_state->src_ring->write_index);
  969. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  970. Q_TARGET_ACCESS_END(scn);
  971. }
  972. }
  973. #endif /* FEATURE_RUNTIME_PM */