hif_runtime_pm.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333
  1. /*
  2. * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/if_arp.h>
  19. #include "hif_io32.h"
  20. #include "hif_runtime_pm.h"
  21. #include "hif.h"
  22. #include "target_type.h"
  23. #include "hif_main.h"
  24. #include "ce_main.h"
  25. #include "ce_api.h"
  26. #include "ce_internal.h"
  27. #include "ce_reg.h"
  28. #include "ce_bmi.h"
  29. #include "regtable.h"
  30. #include "hif_hw_version.h"
  31. #include <linux/debugfs.h>
  32. #include <linux/seq_file.h>
  33. #include "qdf_status.h"
  34. #include "qdf_atomic.h"
  35. #include "pld_common.h"
  36. #include "mp_dev.h"
  37. #include "hif_debug.h"
  38. #include "ce_tasklet.h"
  39. #include "targaddrs.h"
  40. #include "hif_exec.h"
  41. #define CNSS_RUNTIME_FILE "cnss_runtime_pm"
  42. #define CNSS_RUNTIME_FILE_PERM QDF_FILE_USR_READ
  43. #ifdef FEATURE_RUNTIME_PM
  44. static struct hif_rtpm_ctx g_hif_rtpm_ctx;
  45. static struct hif_rtpm_ctx *gp_hif_rtpm_ctx;
  46. /**
  47. * hif_rtpm_id_to_string() - Convert dbgid to respective string
  48. * @id: debug id
  49. *
  50. * Debug support function to convert dbgid to string.
  51. * Please note to add new string in the array at index equal to
  52. * its enum value in wlan_rtpm_dbgid.
  53. *
  54. * Return: String of ID
  55. */
  56. static const char *hif_rtpm_id_to_string(enum hif_rtpm_client_id id)
  57. {
  58. static const char * const strings[] = {
  59. "HIF_RTPM_ID_RESERVED",
  60. "HIF_RTPM_HAL_REO_CMD",
  61. "HIF_RTPM_WMI",
  62. "HIF_RTPM_HTT",
  63. "HIF_RTPM_DP",
  64. "HIF_RTPM_RING_STATS",
  65. "HIF_RTPM_CE",
  66. "HIF_RTPM_FORCE_WAKE",
  67. "HIF_RTPM_ID_PM_QOS_NOTIFY",
  68. "HIF_RTPM_ID_WIPHY_SUSPEND",
  69. "HIF_RTPM_ID_MAX"
  70. };
  71. return strings[id];
  72. }
  73. /**
  74. * hif_rtpm_read_usage_count() - Read device usage count
  75. *
  76. * Return: current usage count
  77. */
  78. static inline int hif_rtpm_read_usage_count(void)
  79. {
  80. return qdf_atomic_read(&gp_hif_rtpm_ctx->dev->power.usage_count);
  81. }
  82. /**
  83. * hif_rtpm_print(): print stats for runtimepm
  84. * @type: type of caller
  85. * @index: pointer to index to keep track of print position
  86. * @buf: pointer of buffer to print to
  87. * @fmt: format string
  88. *
  89. * debugging tool added to allow for unified API for debug/sys fs rtpm printing
  90. */
  91. static void
  92. hif_rtpm_print(enum hif_rtpm_fill_type type, int *index, void *buf,
  93. char *fmt, ...)
  94. {
  95. va_list args;
  96. va_start(args, fmt);
  97. if (type == HIF_RTPM_FILL_TYPE_SYSFS) {
  98. if (index)
  99. *index += vscnprintf((char *)buf + *index, PAGE_SIZE,
  100. fmt, args);
  101. } else if (type == HIF_RTPM_FILL_TYPE_DEBUGFS) {
  102. seq_vprintf((struct seq_file *)buf, fmt, args);
  103. }
  104. va_end(args);
  105. }
  106. #define HIF_RTPM_STATS(_type, _index, _s, _rtpm_ctx, _name) \
  107. hif_rtpm_print(_type, _index, _s, "%30s: %u\n", #_name, \
  108. (_rtpm_ctx)->stats._name)
  109. int hif_rtpm_log_debug_stats(void *s, enum hif_rtpm_fill_type type)
  110. {
  111. int index = 0;
  112. struct hif_rtpm_client *client = NULL;
  113. struct hif_pm_runtime_lock *ctx;
  114. static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
  115. "RESUMING_LINKUP", "SUSPENDING", "SUSPENDED"};
  116. int pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  117. int i;
  118. hif_rtpm_print(type, &index, s, "%30s: %llu\n", "Current timestamp",
  119. qdf_get_log_timestamp());
  120. hif_rtpm_print(type, &index, s, "%30s: %s\n", "Runtime PM state",
  121. autopm_state[pm_state]);
  122. hif_rtpm_print(type, &index, s, "%30s: %llu\n", "Last Busy timestamp",
  123. gp_hif_rtpm_ctx->stats.last_busy_ts);
  124. hif_rtpm_print(type, &index, s, "%30s: %ps\n", "Last Busy Marker",
  125. gp_hif_rtpm_ctx->stats.last_busy_marker);
  126. hif_rtpm_print(type, &index, s, "Rx busy marker counts:\n");
  127. hif_rtpm_print(type, &index, s, "%30s: %u %llu\n",
  128. hif_rtpm_id_to_string(HIF_RTPM_ID_DP),
  129. gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_cnt,
  130. gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_DP]->last_busy_ts);
  131. hif_rtpm_print(type, &index, s, "%30s: %u %llu\n",
  132. hif_rtpm_id_to_string(HIF_RTPM_ID_CE),
  133. gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_cnt,
  134. gp_hif_rtpm_ctx->clients[HIF_RTPM_ID_CE]->last_busy_ts);
  135. HIF_RTPM_STATS(type, &index, s, gp_hif_rtpm_ctx, last_busy_id);
  136. if (pm_state == HIF_RTPM_STATE_SUSPENDED) {
  137. hif_rtpm_print(type, &index, s, "%30s: %llx us\n",
  138. "Suspended Since",
  139. gp_hif_rtpm_ctx->stats.suspend_ts);
  140. }
  141. HIF_RTPM_STATS(type, &index, s, gp_hif_rtpm_ctx, resume_count);
  142. HIF_RTPM_STATS(type, &index, s, gp_hif_rtpm_ctx, suspend_count);
  143. HIF_RTPM_STATS(type, &index, s, gp_hif_rtpm_ctx, suspend_err_count);
  144. hif_rtpm_print(type, &index, s, "%30s: %d\n", "PM Usage count",
  145. hif_rtpm_read_usage_count());
  146. hif_rtpm_print(type, &index, s,
  147. "get put get-timestamp put-timestamp :DBGID_NAME\n");
  148. for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
  149. client = gp_hif_rtpm_ctx->clients[i];
  150. if (!client)
  151. continue;
  152. hif_rtpm_print(type, &index, s, "%-10d ",
  153. qdf_atomic_read(&client->get_count));
  154. hif_rtpm_print(type, &index, s, "%-10d ",
  155. qdf_atomic_read(&client->put_count));
  156. hif_rtpm_print(type, &index, s, "0x%-10llx ", client->get_ts);
  157. hif_rtpm_print(type, &index, s, "0x%-10llx ", client->put_ts);
  158. hif_rtpm_print(type, &index, s, ":%-2d %-30s\n", i,
  159. hif_rtpm_id_to_string(i));
  160. }
  161. hif_rtpm_print(type, &index, s, "\n");
  162. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  163. if (list_empty(&gp_hif_rtpm_ctx->prevent_list)) {
  164. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  165. return index;
  166. }
  167. hif_rtpm_print(type, &index, s, "%30s: ", "Active Wakeup_Sources");
  168. list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list) {
  169. hif_rtpm_print(type, &index, s, "%s", ctx->name);
  170. hif_rtpm_print(type, &index, s, " ");
  171. }
  172. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  173. hif_rtpm_print(type, &index, s, "\n");
  174. return index;
  175. }
  176. /**
  177. * hif_rtpm_debugfs_show(): show debug stats for runtimepm
  178. * @s: file to print to
  179. * @data: unused
  180. *
  181. * debugging tool added to the debug fs for displaying runtimepm stats
  182. *
  183. * Return: 0
  184. */
  185. static int hif_rtpm_debugfs_show(struct seq_file *s, void *data)
  186. {
  187. return hif_rtpm_log_debug_stats((void *)s, HIF_RTPM_FILL_TYPE_DEBUGFS);
  188. }
  189. #undef HIF_RTPM_STATS
  190. /**
  191. * hif_rtpm_debugfs_open() - open a debug fs file to access the runtime pm stats
  192. * @inode:
  193. * @file:
  194. *
  195. * Return: linux error code of single_open.
  196. */
  197. static int hif_rtpm_debugfs_open(struct inode *inode, struct file *file)
  198. {
  199. return single_open(file, hif_rtpm_debugfs_show,
  200. inode->i_private);
  201. }
  202. static const struct file_operations hif_rtpm_fops = {
  203. .owner = THIS_MODULE,
  204. .open = hif_rtpm_debugfs_open,
  205. .release = single_release,
  206. .read = seq_read,
  207. .llseek = seq_lseek,
  208. };
  209. /**
  210. * hif_rtpm_debugfs_create() - creates runtimepm debugfs entry
  211. *
  212. * creates a debugfs entry to debug the runtime pm feature.
  213. */
  214. static void hif_rtpm_debugfs_create(void)
  215. {
  216. gp_hif_rtpm_ctx->pm_dentry = qdf_debugfs_create_entry(CNSS_RUNTIME_FILE,
  217. CNSS_RUNTIME_FILE_PERM,
  218. NULL,
  219. NULL,
  220. &hif_rtpm_fops);
  221. }
  222. /**
  223. * hif_rtpm_debugfs_remove() - removes runtimepm debugfs entry
  224. *
  225. * removes the debugfs entry to debug the runtime pm feature.
  226. */
  227. static void hif_rtpm_debugfs_remove(void)
  228. {
  229. qdf_debugfs_remove_file(gp_hif_rtpm_ctx->pm_dentry);
  230. }
  231. /**
  232. * hif_rtpm_init() - Initialize Runtime PM
  233. * @dev: device structure
  234. * @delay: delay to be configured for auto suspend
  235. *
  236. * This function will init all the Runtime PM config.
  237. *
  238. * Return: void
  239. */
  240. static void hif_rtpm_init(struct device *dev, int delay)
  241. {
  242. pm_runtime_set_autosuspend_delay(dev, delay);
  243. pm_runtime_use_autosuspend(dev);
  244. pm_runtime_allow(dev);
  245. pm_runtime_mark_last_busy(dev);
  246. pm_runtime_put_noidle(dev);
  247. pm_suspend_ignore_children(dev, true);
  248. }
  249. /**
  250. * hif_rtpm_exit() - Deinit/Exit Runtime PM
  251. * @dev: device structure
  252. *
  253. * This function will deinit all the Runtime PM config.
  254. *
  255. * Return: void
  256. */
  257. static void hif_rtpm_exit(struct device *dev)
  258. {
  259. pm_runtime_get_noresume(dev);
  260. pm_runtime_set_active(dev);
  261. pm_runtime_forbid(dev);
  262. }
  263. static void hif_rtpm_alloc_last_busy_hist(void)
  264. {
  265. int i;
  266. for (i = 0; i < CE_COUNT_MAX; i++) {
  267. if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7) {
  268. gp_hif_rtpm_ctx->busy_hist[i] = NULL;
  269. continue;
  270. }
  271. gp_hif_rtpm_ctx->busy_hist[i] =
  272. qdf_mem_malloc(sizeof(struct hif_rtpm_last_busy_hist));
  273. if (!gp_hif_rtpm_ctx->busy_hist[i])
  274. return;
  275. }
  276. }
  277. static void hif_rtpm_free_last_busy_hist(void)
  278. {
  279. int i;
  280. for (i = 0; i < CE_COUNT_MAX; i++) {
  281. if (i != CE_ID_1 && i != CE_ID_2 && i != CE_ID_7)
  282. continue;
  283. qdf_mem_free(gp_hif_rtpm_ctx->busy_hist[i]);
  284. }
  285. }
  286. void hif_rtpm_open(struct hif_softc *scn)
  287. {
  288. gp_hif_rtpm_ctx = &g_hif_rtpm_ctx;
  289. gp_hif_rtpm_ctx->dev = scn->qdf_dev->dev;
  290. qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_lock);
  291. qdf_spinlock_create(&gp_hif_rtpm_ctx->runtime_suspend_lock);
  292. qdf_spinlock_create(&gp_hif_rtpm_ctx->prevent_list_lock);
  293. qdf_atomic_init(&gp_hif_rtpm_ctx->pm_state);
  294. qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
  295. qdf_atomic_init(&gp_hif_rtpm_ctx->monitor_wake_intr);
  296. INIT_LIST_HEAD(&gp_hif_rtpm_ctx->prevent_list);
  297. gp_hif_rtpm_ctx->client_count = 0;
  298. gp_hif_rtpm_ctx->pending_job = 0;
  299. hif_rtpm_register(HIF_RTPM_ID_CE, NULL);
  300. hif_rtpm_register(HIF_RTPM_ID_FORCE_WAKE, NULL);
  301. hif_rtpm_alloc_last_busy_hist();
  302. hif_info_high("Runtime PM attached");
  303. }
  304. static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock);
  305. /**
  306. * hif_rtpm_sanitize_exit(): sanitize runtime PM gets/puts from driver
  307. *
  308. * Ensure all gets/puts are in sync before exiting runtime PM feature.
  309. * Also make sure all runtime PM locks are deinitialized properly.
  310. *
  311. * Return: void
  312. */
  313. static void hif_rtpm_sanitize_exit(void)
  314. {
  315. struct hif_pm_runtime_lock *ctx, *tmp;
  316. struct hif_rtpm_client *client;
  317. int i, active_count;
  318. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  319. list_for_each_entry_safe(ctx, tmp,
  320. &gp_hif_rtpm_ctx->prevent_list, list) {
  321. hif_runtime_lock_deinit(ctx);
  322. }
  323. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  324. /* check if get and put out of sync for all clients */
  325. for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
  326. client = gp_hif_rtpm_ctx->clients[i];
  327. if (client) {
  328. if (qdf_atomic_read(&client->active_count)) {
  329. active_count =
  330. qdf_atomic_read(&client->active_count);
  331. hif_err("Client active: %u- %s", i,
  332. hif_rtpm_id_to_string(i));
  333. QDF_DEBUG_PANIC("Client active on exit!");
  334. while (active_count--)
  335. __hif_rtpm_put_noidle(
  336. gp_hif_rtpm_ctx->dev);
  337. }
  338. QDF_DEBUG_PANIC("Client not deinitialized");
  339. qdf_mem_free(client);
  340. gp_hif_rtpm_ctx->clients[i] = NULL;
  341. }
  342. }
  343. }
  344. /**
  345. * hif_rtpm_sanitize_ssr_exit() - Empty the suspend list on SSR
  346. *
  347. * API is used to empty the runtime pm prevent suspend list.
  348. *
  349. * Return: void
  350. */
  351. static void hif_rtpm_sanitize_ssr_exit(void)
  352. {
  353. struct hif_pm_runtime_lock *ctx, *tmp;
  354. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  355. list_for_each_entry_safe(ctx, tmp,
  356. &gp_hif_rtpm_ctx->prevent_list, list) {
  357. __hif_pm_runtime_allow_suspend(ctx);
  358. }
  359. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  360. }
  361. void hif_rtpm_close(struct hif_softc *scn)
  362. {
  363. hif_rtpm_free_last_busy_hist();
  364. hif_rtpm_deregister(HIF_RTPM_ID_CE);
  365. hif_rtpm_deregister(HIF_RTPM_ID_FORCE_WAKE);
  366. hif_is_recovery_in_progress(scn) ?
  367. hif_rtpm_sanitize_ssr_exit() :
  368. hif_rtpm_sanitize_exit();
  369. qdf_mem_set(gp_hif_rtpm_ctx, sizeof(*gp_hif_rtpm_ctx), 0);
  370. gp_hif_rtpm_ctx = NULL;
  371. hif_info_high("Runtime PM context detached");
  372. }
  373. void hif_rtpm_start(struct hif_softc *scn)
  374. {
  375. uint32_t mode = hif_get_conparam(scn);
  376. gp_hif_rtpm_ctx->enable_rpm = scn->hif_config.enable_runtime_pm;
  377. if (!gp_hif_rtpm_ctx->enable_rpm) {
  378. hif_info_high("RUNTIME PM is disabled in ini");
  379. return;
  380. }
  381. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  382. mode == QDF_GLOBAL_MONITOR_MODE) {
  383. hif_info("RUNTIME PM is disabled for FTM/EPPING/MONITOR mode");
  384. return;
  385. }
  386. hif_info_high("Enabling RUNTIME PM, Delay: %d ms",
  387. scn->hif_config.runtime_pm_delay);
  388. qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_ON);
  389. hif_rtpm_init(gp_hif_rtpm_ctx->dev, scn->hif_config.runtime_pm_delay);
  390. gp_hif_rtpm_ctx->cfg_delay = scn->hif_config.runtime_pm_delay;
  391. gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
  392. hif_rtpm_debugfs_create();
  393. }
  394. void hif_rtpm_stop(struct hif_softc *scn)
  395. {
  396. uint32_t mode = hif_get_conparam(scn);
  397. if (!gp_hif_rtpm_ctx->enable_rpm)
  398. return;
  399. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  400. mode == QDF_GLOBAL_MONITOR_MODE)
  401. return;
  402. hif_rtpm_exit(gp_hif_rtpm_ctx->dev);
  403. hif_rtpm_sync_resume();
  404. qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, HIF_RTPM_STATE_NONE);
  405. hif_rtpm_debugfs_remove();
  406. }
  407. QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rtpm_cbk)(void))
  408. {
  409. struct hif_rtpm_client *client;
  410. if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
  411. hif_err("Runtime PM context NULL");
  412. return QDF_STATUS_E_FAILURE;
  413. }
  414. if (id >= HIF_RTPM_ID_MAX || gp_hif_rtpm_ctx->clients[id]) {
  415. hif_err("Invalid client %d", id);
  416. return QDF_STATUS_E_INVAL;
  417. }
  418. client = qdf_mem_malloc(sizeof(struct hif_rtpm_client));
  419. if (!client)
  420. return QDF_STATUS_E_NOMEM;
  421. client->hif_rtpm_cbk = hif_rtpm_cbk;
  422. qdf_atomic_init(&client->active_count);
  423. qdf_atomic_init(&client->get_count);
  424. qdf_atomic_init(&client->put_count);
  425. gp_hif_rtpm_ctx->clients[id] = client;
  426. gp_hif_rtpm_ctx->client_count++;
  427. return QDF_STATUS_SUCCESS;
  428. }
  429. QDF_STATUS hif_rtpm_deregister(uint32_t id)
  430. {
  431. struct hif_rtpm_client *client;
  432. int active_count;
  433. if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
  434. hif_err("Runtime PM context NULL");
  435. return QDF_STATUS_E_FAILURE;
  436. }
  437. if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
  438. hif_err("invalid client, id: %u", id);
  439. return QDF_STATUS_E_INVAL;
  440. }
  441. client = gp_hif_rtpm_ctx->clients[id];
  442. if (qdf_atomic_read(&client->active_count)) {
  443. active_count = qdf_atomic_read(&client->active_count);
  444. hif_err("Client: %u-%s Runtime PM active",
  445. id, hif_rtpm_id_to_string(id));
  446. hif_err("last get called: 0x%llx, get count: %d, put count: %d",
  447. client->get_ts, qdf_atomic_read(&client->get_count),
  448. qdf_atomic_read(&client->put_count));
  449. QDF_DEBUG_PANIC("Get and PUT call out of sync!");
  450. while (active_count--)
  451. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  452. }
  453. qdf_mem_free(client);
  454. gp_hif_rtpm_ctx->clients[id] = NULL;
  455. return QDF_STATUS_SUCCESS;
  456. }
  457. QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
  458. {
  459. if (delay < HIF_RTPM_DELAY_MIN || delay > HIF_RTPM_DELAY_MAX) {
  460. hif_err("Invalid delay value %d ms", delay);
  461. return QDF_STATUS_E_INVAL;
  462. }
  463. __hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev, delay);
  464. gp_hif_rtpm_ctx->delay = delay;
  465. hif_info_high("RTPM delay set: %d ms", delay);
  466. return QDF_STATUS_SUCCESS;
  467. }
  468. QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
  469. {
  470. if (gp_hif_rtpm_ctx->delay == gp_hif_rtpm_ctx->cfg_delay) {
  471. hif_info_rl("RTPM delay already default: %d",
  472. gp_hif_rtpm_ctx->delay);
  473. return QDF_STATUS_E_ALREADY;
  474. }
  475. __hif_rtpm_set_autosuspend_delay(gp_hif_rtpm_ctx->dev,
  476. gp_hif_rtpm_ctx->cfg_delay);
  477. gp_hif_rtpm_ctx->delay = gp_hif_rtpm_ctx->cfg_delay;
  478. hif_info_rl("RTPM delay set: %d ms", gp_hif_rtpm_ctx->delay);
  479. return QDF_STATUS_SUCCESS;
  480. }
  481. int hif_rtpm_get_autosuspend_delay(void)
  482. {
  483. return gp_hif_rtpm_ctx->delay;
  484. }
  485. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
  486. {
  487. struct hif_pm_runtime_lock *context;
  488. if (qdf_unlikely(!gp_hif_rtpm_ctx)) {
  489. hif_err("Runtime PM context NULL");
  490. return QDF_STATUS_E_FAILURE;
  491. }
  492. hif_debug("Initializing Runtime PM wakelock %s", name);
  493. context = qdf_mem_malloc(sizeof(*context));
  494. if (!context)
  495. return -ENOMEM;
  496. context->name = name ? name : "Default";
  497. lock->lock = context;
  498. return 0;
  499. }
  500. void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *lock)
  501. {
  502. if (!lock) {
  503. hif_err("Runtime PM lock already freed");
  504. return;
  505. }
  506. hif_debug("Deinitializing Runtime PM wakelock %s", lock->name);
  507. if (gp_hif_rtpm_ctx) {
  508. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  509. __hif_pm_runtime_allow_suspend(lock);
  510. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  511. }
  512. qdf_mem_free(lock);
  513. }
  514. /**
  515. * hif_rtpm_enabled() - To check if Runtime PM is enabled
  516. *
  517. * This function will check if Runtime PM is enabled or not.
  518. *
  519. * Return: void
  520. */
  521. static bool hif_rtpm_enabled(void)
  522. {
  523. if (qdf_unlikely(!gp_hif_rtpm_ctx))
  524. return false;
  525. if (gp_hif_rtpm_ctx->enable_rpm)
  526. return true;
  527. return __hif_rtpm_enabled(gp_hif_rtpm_ctx->dev);
  528. }
  529. QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id)
  530. {
  531. struct hif_rtpm_client *client = NULL;
  532. int ret = QDF_STATUS_E_FAILURE;
  533. int pm_state;
  534. if (!hif_rtpm_enabled())
  535. return QDF_STATUS_SUCCESS;
  536. if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
  537. QDF_DEBUG_PANIC("Invalid client, id: %u", id);
  538. return -QDF_STATUS_E_INVAL;
  539. }
  540. client = gp_hif_rtpm_ctx->clients[id];
  541. if (type != HIF_RTPM_GET_ASYNC) {
  542. switch (type) {
  543. case HIF_RTPM_GET_FORCE:
  544. ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
  545. break;
  546. case HIF_RTPM_GET_SYNC:
  547. ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
  548. break;
  549. case HIF_RTPM_GET_NORESUME:
  550. __hif_rtpm_get_noresume(gp_hif_rtpm_ctx->dev);
  551. ret = 0;
  552. break;
  553. default:
  554. QDF_DEBUG_PANIC("Invalid call type");
  555. return QDF_STATUS_E_BADMSG;
  556. }
  557. if (ret < 0 && ret != -EINPROGRESS) {
  558. hif_err("pm_state: %d ret: %d",
  559. qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
  560. ret);
  561. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  562. } else {
  563. ret = QDF_STATUS_SUCCESS;
  564. }
  565. goto out;
  566. }
  567. pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  568. if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP) {
  569. ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
  570. /* Get will return 1 if the device is already active,
  571. * just return success in that case
  572. */
  573. if (ret > 0) {
  574. ret = QDF_STATUS_SUCCESS;
  575. } else if (ret == 0 || ret == -EINPROGRESS) {
  576. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  577. pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  578. if (pm_state >= HIF_RTPM_STATE_RESUMING) {
  579. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  580. gp_hif_rtpm_ctx->stats.request_resume_ts =
  581. qdf_get_log_timestamp();
  582. gp_hif_rtpm_ctx->stats.request_resume_id = id;
  583. ret = QDF_STATUS_E_FAILURE;
  584. } else {
  585. ret = QDF_STATUS_SUCCESS;
  586. }
  587. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  588. } else if (ret < 0) {
  589. hif_err("pm_state: %d ret: %d",
  590. qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
  591. ret);
  592. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  593. }
  594. } else if (pm_state >= HIF_RTPM_STATE_RESUMING) {
  595. /* Do not log in performance path */
  596. if (id != HIF_RTPM_ID_DP)
  597. hif_info_high("request RTPM resume by %d- %s",
  598. id, hif_rtpm_id_to_string(id));
  599. __hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
  600. gp_hif_rtpm_ctx->stats.request_resume_ts =
  601. qdf_get_log_timestamp();
  602. gp_hif_rtpm_ctx->stats.request_resume_id = id;
  603. return QDF_STATUS_E_FAILURE;
  604. }
  605. out:
  606. if (QDF_IS_STATUS_SUCCESS(ret)) {
  607. qdf_atomic_inc(&client->active_count);
  608. qdf_atomic_inc(&client->get_count);
  609. client->get_ts = qdf_get_log_timestamp();
  610. }
  611. return ret;
  612. }
  613. QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
  614. {
  615. struct hif_rtpm_client *client;
  616. int usage_count;
  617. if (!hif_rtpm_enabled())
  618. return QDF_STATUS_SUCCESS;
  619. if (id >= HIF_RTPM_ID_MAX || !gp_hif_rtpm_ctx->clients[id]) {
  620. hif_err("Invalid client, id: %u", id);
  621. return QDF_STATUS_E_INVAL;
  622. }
  623. client = gp_hif_rtpm_ctx->clients[id];
  624. usage_count = hif_rtpm_read_usage_count();
  625. if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
  626. hif_err("Unexpected PUT when runtime PM is disabled");
  627. QDF_BUG(0);
  628. return QDF_STATUS_E_CANCELED;
  629. } else if (!usage_count || !qdf_atomic_read(&client->active_count)) {
  630. hif_info_high("Put without a Get operation, %u-%s",
  631. id, hif_rtpm_id_to_string(id));
  632. return QDF_STATUS_E_CANCELED;
  633. }
  634. switch (type) {
  635. case HIF_RTPM_PUT_ASYNC:
  636. __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
  637. break;
  638. case HIF_RTPM_PUT_NOIDLE:
  639. __hif_rtpm_put_noidle(gp_hif_rtpm_ctx->dev);
  640. break;
  641. case HIF_RTPM_PUT_SYNC_SUSPEND:
  642. __hif_rtpm_put_sync_suspend(gp_hif_rtpm_ctx->dev);
  643. break;
  644. default:
  645. QDF_DEBUG_PANIC("Invalid call type");
  646. return QDF_STATUS_E_BADMSG;
  647. }
  648. __hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
  649. qdf_atomic_dec(&client->active_count);
  650. qdf_atomic_inc(&client->put_count);
  651. client->put_ts = qdf_get_log_timestamp();
  652. gp_hif_rtpm_ctx->stats.last_busy_ts = client->put_ts;
  653. return QDF_STATUS_SUCCESS;
  654. }
  655. /**
  656. * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
  657. * reason
  658. * @lock: runtime_pm lock being acquired
  659. *
  660. * Return: 0 if successful.
  661. */
  662. static int __hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
  663. {
  664. int ret = 0;
  665. if (lock->active)
  666. return 0;
  667. ret = __hif_rtpm_get(gp_hif_rtpm_ctx->dev);
  668. /**
  669. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  670. * RPM_SUSPENDING. Any other negative value is an error.
  671. * We shouldn't do runtime_put here as in later point allow
  672. * suspend gets called with the context and there the usage count
  673. * is decremented, so suspend will be prevented.
  674. */
  675. if (ret < 0 && ret != -EINPROGRESS) {
  676. gp_hif_rtpm_ctx->stats.runtime_get_err++;
  677. hif_err("pm_state: %d ret: %d",
  678. qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
  679. ret);
  680. }
  681. list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
  682. lock->active = true;
  683. gp_hif_rtpm_ctx->prevent_cnt++;
  684. gp_hif_rtpm_ctx->stats.prevent_suspend++;
  685. return ret;
  686. }
  687. /**
  688. * __hif_pm_runtime_allow_suspend() - Allow Runtime suspend
  689. * @lock: runtime pm lock
  690. *
  691. * This function will allow runtime suspend, by decrementing
  692. * device's usage count.
  693. *
  694. * Return: status
  695. */
  696. static int __hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
  697. {
  698. int ret = 0;
  699. int usage_count;
  700. if (gp_hif_rtpm_ctx->prevent_cnt == 0 || !lock->active)
  701. return ret;
  702. usage_count = hif_rtpm_read_usage_count();
  703. /*
  704. * For runtime PM enabled case, the usage count should never be 0
  705. * at this point. For runtime PM disabled case, it should never be
  706. * 2 at this point. Catch unexpected PUT without GET here.
  707. */
  708. if (usage_count == 2 && !gp_hif_rtpm_ctx->enable_rpm) {
  709. hif_err("Unexpected PUT when runtime PM is disabled");
  710. QDF_BUG(0);
  711. return QDF_STATUS_E_CANCELED;
  712. } else if (!usage_count) {
  713. hif_info_high("Put without a Get operation, %s", lock->name);
  714. return QDF_STATUS_E_CANCELED;
  715. }
  716. hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
  717. ret = __hif_rtpm_put_auto(gp_hif_rtpm_ctx->dev);
  718. list_del(&lock->list);
  719. lock->active = false;
  720. gp_hif_rtpm_ctx->prevent_cnt--;
  721. gp_hif_rtpm_ctx->stats.allow_suspend++;
  722. return ret;
  723. }
  724. int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *lock)
  725. {
  726. if (!hif_rtpm_enabled() || !lock)
  727. return -EINVAL;
  728. if (in_irq())
  729. WARN_ON(1);
  730. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  731. __hif_pm_runtime_prevent_suspend(lock);
  732. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  733. if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
  734. HIF_RTPM_STATE_SUSPENDING)
  735. hif_info_high("request RTPM resume by %s",
  736. lock->name);
  737. return 0;
  738. }
  739. /**
  740. * __hif_pm_runtime_prevent_suspend_sync() - synchronized prevent runtime
  741. * suspend for a protocol reason
  742. * @lock: runtime_pm lock being acquired
  743. *
  744. * Return: 0 if successful.
  745. */
  746. static
  747. int __hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
  748. {
  749. int ret = 0;
  750. if (lock->active)
  751. return 0;
  752. ret = __hif_rtpm_get_sync(gp_hif_rtpm_ctx->dev);
  753. /**
  754. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  755. * RPM_SUSPENDING. Any other negative value is an error.
  756. * We shouldn't do runtime_put here as in later point allow
  757. * suspend gets called with the context and there the usage count
  758. * is decremented, so suspend will be prevented.
  759. */
  760. if (ret < 0 && ret != -EINPROGRESS) {
  761. gp_hif_rtpm_ctx->stats.runtime_get_err++;
  762. hif_err("pm_state: %d ret: %d",
  763. qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state),
  764. ret);
  765. }
  766. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  767. list_add_tail(&lock->list, &gp_hif_rtpm_ctx->prevent_list);
  768. lock->active = true;
  769. gp_hif_rtpm_ctx->prevent_cnt++;
  770. gp_hif_rtpm_ctx->stats.prevent_suspend++;
  771. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  772. return ret;
  773. }
  774. int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *lock)
  775. {
  776. if (!hif_rtpm_enabled())
  777. return 0;
  778. if (!lock)
  779. return -EINVAL;
  780. if (in_irq())
  781. WARN_ON(1);
  782. __hif_pm_runtime_prevent_suspend_sync(lock);
  783. if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
  784. HIF_RTPM_STATE_SUSPENDING)
  785. hif_info_high("request RTPM resume by %s",
  786. lock->name);
  787. return 0;
  788. }
  789. int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *lock)
  790. {
  791. if (!hif_rtpm_enabled())
  792. return 0;
  793. if (!lock)
  794. return -EINVAL;
  795. if (in_irq())
  796. WARN_ON(1);
  797. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  798. __hif_pm_runtime_allow_suspend(lock);
  799. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->prevent_list_lock);
  800. return 0;
  801. }
  802. QDF_STATUS hif_rtpm_sync_resume(void)
  803. {
  804. struct device *dev;
  805. int pm_state;
  806. int ret;
  807. if (!hif_rtpm_enabled())
  808. return 0;
  809. dev = gp_hif_rtpm_ctx->dev;
  810. pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  811. ret = __hif_rtpm_resume(dev);
  812. __hif_rtpm_mark_last_busy(dev);
  813. if (ret >= 0) {
  814. gp_hif_rtpm_ctx->stats.resume_count++;
  815. gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
  816. gp_hif_rtpm_ctx->stats.last_busy_ts =
  817. gp_hif_rtpm_ctx->stats.resume_ts;
  818. return QDF_STATUS_SUCCESS;
  819. }
  820. hif_err("pm_state: %d, err: %d", pm_state, ret);
  821. return QDF_STATUS_E_FAILURE;
  822. }
  823. void hif_rtpm_request_resume(void)
  824. {
  825. __hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
  826. hif_info_high("request RTPM resume %s", (char *)_RET_IP_);
  827. }
  828. void hif_rtpm_check_and_request_resume(void)
  829. {
  830. hif_rtpm_suspend_lock();
  831. if (qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state) >=
  832. HIF_RTPM_STATE_SUSPENDING) {
  833. hif_rtpm_suspend_unlock();
  834. __hif_rtpm_request_resume(gp_hif_rtpm_ctx->dev);
  835. gp_hif_rtpm_ctx->stats.request_resume_ts =
  836. qdf_get_log_timestamp();
  837. gp_hif_rtpm_ctx->stats.request_resume_id = HIF_RTPM_ID_RESERVED;
  838. } else {
  839. __hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
  840. gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
  841. hif_rtpm_suspend_unlock();
  842. }
  843. }
  844. int hif_rtpm_get_monitor_wake_intr(void)
  845. {
  846. return qdf_atomic_read(&gp_hif_rtpm_ctx->monitor_wake_intr);
  847. }
  848. void hif_rtpm_set_monitor_wake_intr(int val)
  849. {
  850. qdf_atomic_set(&gp_hif_rtpm_ctx->monitor_wake_intr, val);
  851. }
  852. void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx)
  853. {
  854. struct hif_softc *scn;
  855. struct hif_rtpm_ctx *rtpm_ctx = gp_hif_rtpm_ctx;
  856. struct hif_rtpm_last_busy_hist *hist;
  857. unsigned long cur_idx;
  858. int i;
  859. scn = HIF_GET_SOFTC(hif_ctx);
  860. if (!scn)
  861. return;
  862. hif_info_high("RTPM last busy ts:%llu client:%s from:%ps",
  863. rtpm_ctx->stats.last_busy_ts,
  864. hif_rtpm_id_to_string(rtpm_ctx->stats.last_busy_id),
  865. rtpm_ctx->stats.last_busy_marker);
  866. /*Display CE and DP clients RTPM stats*/
  867. for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
  868. if (!rtpm_ctx->clients[i] ||
  869. (i != HIF_RTPM_ID_CE && i != HIF_RTPM_ID_DP))
  870. continue;
  871. hif_info_high("RTPM client:%s busy_ts:%llu get_ts:%llu put_ts:%llu get_cnt:%d put_cnt:%d",
  872. hif_rtpm_id_to_string(i),
  873. rtpm_ctx->clients[i]->last_busy_ts,
  874. rtpm_ctx->clients[i]->get_ts,
  875. rtpm_ctx->clients[i]->put_ts,
  876. qdf_atomic_read(&rtpm_ctx->clients[i]->get_count),
  877. qdf_atomic_read(&rtpm_ctx->clients[i]->put_count));
  878. }
  879. for (i = 0; i < CE_COUNT_MAX; i++) {
  880. hist = gp_hif_rtpm_ctx->busy_hist[i];
  881. if (!hist)
  882. continue;
  883. cur_idx = hist->last_busy_idx;
  884. hif_info_high("RTPM CE-%u last busy_cnt:%lu cur_idx:%lu ts1:%llu ts2:%llu ts3:%llu ts4:%llu",
  885. i, hist->last_busy_cnt, cur_idx,
  886. hist->last_busy_ts[cur_idx & HIF_RTPM_BUSY_HIST_MASK],
  887. hist->last_busy_ts[(cur_idx + 4) & HIF_RTPM_BUSY_HIST_MASK],
  888. hist->last_busy_ts[(cur_idx + 8) & HIF_RTPM_BUSY_HIST_MASK],
  889. hist->last_busy_ts[(cur_idx + 12) & HIF_RTPM_BUSY_HIST_MASK]);
  890. }
  891. }
  892. void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
  893. unsigned long ce_id)
  894. {
  895. struct hif_rtpm_last_busy_hist *hist;
  896. unsigned long idx;
  897. if (!scn || !gp_hif_rtpm_ctx->busy_hist[ce_id])
  898. return;
  899. hist = gp_hif_rtpm_ctx->busy_hist[ce_id];
  900. hist->last_busy_cnt++;
  901. hist->last_busy_idx++;
  902. idx = hist->last_busy_idx & HIF_RTPM_BUSY_HIST_MASK;
  903. hist->last_busy_ts[idx] = qdf_get_log_timestamp();
  904. }
  905. void hif_rtpm_mark_last_busy(uint32_t id)
  906. {
  907. __hif_rtpm_mark_last_busy(gp_hif_rtpm_ctx->dev);
  908. gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
  909. gp_hif_rtpm_ctx->stats.last_busy_id = id;
  910. gp_hif_rtpm_ctx->stats.last_busy_marker = (void *)_RET_IP_;
  911. if (gp_hif_rtpm_ctx->clients[id]) {
  912. gp_hif_rtpm_ctx->clients[id]->last_busy_cnt++;
  913. gp_hif_rtpm_ctx->clients[id]->last_busy_ts =
  914. gp_hif_rtpm_ctx->stats.last_busy_ts;
  915. }
  916. }
  917. void hif_rtpm_set_client_job(uint32_t client_id)
  918. {
  919. int pm_state;
  920. if (!gp_hif_rtpm_ctx->clients[client_id])
  921. return;
  922. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  923. pm_state = qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  924. if (pm_state <= HIF_RTPM_STATE_RESUMING_LINKUP &&
  925. gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk)
  926. gp_hif_rtpm_ctx->clients[client_id]->hif_rtpm_cbk();
  927. else
  928. qdf_set_bit(client_id, &gp_hif_rtpm_ctx->pending_job);
  929. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  930. }
  931. /**
  932. * hif_rtpm_pending_job() - continue jobs when bus resumed
  933. *
  934. * Return: Void
  935. */
  936. static void hif_rtpm_pending_job(void)
  937. {
  938. int i;
  939. for (i = 0; i < gp_hif_rtpm_ctx->client_count; i++) {
  940. if (qdf_test_and_clear_bit(i, &gp_hif_rtpm_ctx->pending_job)) {
  941. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  942. if (gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk)
  943. gp_hif_rtpm_ctx->clients[i]->hif_rtpm_cbk();
  944. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  945. }
  946. }
  947. }
  948. #define PREVENT_LIST_STRING_LEN 200
  949. void hif_rtpm_print_prevent_list(void)
  950. {
  951. struct hif_rtpm_client *client;
  952. struct hif_pm_runtime_lock *ctx;
  953. char *str_buf;
  954. int i, prevent_list_count, len = 0;
  955. str_buf = qdf_mem_malloc(PREVENT_LIST_STRING_LEN);
  956. if (!str_buf)
  957. return;
  958. qdf_spin_lock(&gp_hif_rtpm_ctx->prevent_list_lock);
  959. prevent_list_count = gp_hif_rtpm_ctx->prevent_cnt;
  960. if (prevent_list_count) {
  961. list_for_each_entry(ctx, &gp_hif_rtpm_ctx->prevent_list, list)
  962. len += qdf_scnprintf(str_buf + len,
  963. PREVENT_LIST_STRING_LEN - len,
  964. "%s ", ctx->name);
  965. }
  966. qdf_spin_unlock(&gp_hif_rtpm_ctx->prevent_list_lock);
  967. if (prevent_list_count)
  968. hif_info_high("prevent_suspend_cnt %u, prevent_list: %s",
  969. prevent_list_count, str_buf);
  970. qdf_mem_free(str_buf);
  971. for (i = 0; i < HIF_RTPM_ID_MAX; i++) {
  972. client = gp_hif_rtpm_ctx->clients[i];
  973. if (client && qdf_atomic_read(&client->active_count))
  974. hif_info_high("client: %d: %s- active count: %d", i,
  975. hif_rtpm_id_to_string(i),
  976. qdf_atomic_read(&client->active_count));
  977. }
  978. }
  979. /**
  980. * hif_rtpm_is_suspend_allowed() - Reject suspend if client is active
  981. *
  982. * Return: True if no clients are active
  983. */
  984. static bool hif_rtpm_is_suspend_allowed(void)
  985. {
  986. if (!gp_hif_rtpm_ctx || !gp_hif_rtpm_ctx->enable_rpm)
  987. return false;
  988. if (!hif_rtpm_read_usage_count())
  989. return true;
  990. return false;
  991. }
  992. void hif_rtpm_suspend_lock(void)
  993. {
  994. qdf_spin_lock_irqsave(&gp_hif_rtpm_ctx->runtime_suspend_lock);
  995. }
  996. void hif_rtpm_suspend_unlock(void)
  997. {
  998. qdf_spin_unlock_irqrestore(&gp_hif_rtpm_ctx->runtime_suspend_lock);
  999. }
  1000. /**
  1001. * hif_rtpm_set_state(): utility function
  1002. * @state: state to set
  1003. *
  1004. * Return: Void
  1005. */
  1006. static inline
  1007. void hif_rtpm_set_state(enum hif_rtpm_state state)
  1008. {
  1009. qdf_atomic_set(&gp_hif_rtpm_ctx->pm_state, state);
  1010. }
  1011. int hif_rtpm_get_state(void)
  1012. {
  1013. return qdf_atomic_read(&gp_hif_rtpm_ctx->pm_state);
  1014. }
  1015. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  1016. {
  1017. if (!hif_can_suspend_link(hif_ctx)) {
  1018. hif_err("Runtime PM not supported for link up suspend");
  1019. return -EINVAL;
  1020. }
  1021. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1022. hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDING);
  1023. /* keep this after set suspending */
  1024. if (!hif_rtpm_is_suspend_allowed()) {
  1025. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1026. hif_rtpm_print_prevent_list();
  1027. gp_hif_rtpm_ctx->stats.suspend_err_count++;
  1028. gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
  1029. hif_info_high("Runtime PM not allowed now");
  1030. return -EINVAL;
  1031. }
  1032. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1033. return QDF_STATUS_SUCCESS;
  1034. }
  1035. void hif_process_runtime_suspend_success(void)
  1036. {
  1037. hif_rtpm_set_state(HIF_RTPM_STATE_SUSPENDED);
  1038. gp_hif_rtpm_ctx->stats.suspend_count++;
  1039. gp_hif_rtpm_ctx->stats.suspend_ts = qdf_get_log_timestamp();
  1040. }
  1041. void hif_process_runtime_suspend_failure(void)
  1042. {
  1043. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1044. hif_rtpm_set_state(HIF_RTPM_STATE_ON);
  1045. hif_rtpm_pending_job();
  1046. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1047. gp_hif_rtpm_ctx->stats.suspend_err_count++;
  1048. gp_hif_rtpm_ctx->stats.suspend_err_ts = qdf_get_log_timestamp();
  1049. gp_hif_rtpm_ctx->stats.last_busy_ts = qdf_get_log_timestamp();
  1050. hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
  1051. }
  1052. void hif_pre_runtime_resume(void)
  1053. {
  1054. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1055. hif_rtpm_set_monitor_wake_intr(0);
  1056. hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING);
  1057. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1058. }
  1059. void hif_process_runtime_resume_linkup(void)
  1060. {
  1061. qdf_spin_lock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1062. hif_rtpm_set_state(HIF_RTPM_STATE_RESUMING_LINKUP);
  1063. hif_rtpm_pending_job();
  1064. qdf_spin_unlock_bh(&gp_hif_rtpm_ctx->runtime_lock);
  1065. }
  1066. void hif_process_runtime_resume_success(void)
  1067. {
  1068. hif_rtpm_set_state(HIF_RTPM_STATE_ON);
  1069. gp_hif_rtpm_ctx->stats.resume_count++;
  1070. gp_hif_rtpm_ctx->stats.resume_ts = qdf_get_log_timestamp();
  1071. gp_hif_rtpm_ctx->stats.last_busy_ts = gp_hif_rtpm_ctx->stats.resume_ts;
  1072. hif_rtpm_mark_last_busy(HIF_RTPM_ID_RESERVED);
  1073. }
  1074. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  1075. {
  1076. int errno;
  1077. errno = hif_bus_suspend(hif_ctx);
  1078. if (errno) {
  1079. hif_err("Failed bus suspend: %d", errno);
  1080. return errno;
  1081. }
  1082. hif_rtpm_set_monitor_wake_intr(1);
  1083. errno = hif_bus_suspend_noirq(hif_ctx);
  1084. if (errno) {
  1085. hif_err("Failed bus suspend noirq: %d", errno);
  1086. hif_rtpm_set_monitor_wake_intr(0);
  1087. goto bus_resume;
  1088. }
  1089. return 0;
  1090. bus_resume:
  1091. QDF_BUG(!hif_bus_resume(hif_ctx));
  1092. return errno;
  1093. }
  1094. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
  1095. {
  1096. int errno;
  1097. QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
  1098. errno = hif_bus_resume(hif_ctx);
  1099. if (errno)
  1100. hif_err("Failed runtime resume: %d", errno);
  1101. return errno;
  1102. }
  1103. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
  1104. {
  1105. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1106. struct CE_state *ce_state;
  1107. if (!scn)
  1108. return;
  1109. if (scn->fastpath_mode_on) {
  1110. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  1111. return;
  1112. ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
  1113. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  1114. /*war_ce_src_ring_write_idx_set */
  1115. CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
  1116. ce_state->src_ring->write_index);
  1117. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  1118. Q_TARGET_ACCESS_END(scn);
  1119. }
  1120. }
  1121. #endif /* FEATURE_RUNTIME_PM */