scheduler_api.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. /*
  2. * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <scheduler_api.h>
  19. #include <scheduler_core.h>
  20. #include <qdf_atomic.h>
  21. QDF_STATUS scheduler_disable(void)
  22. {
  23. struct scheduler_ctx *sched_ctx;
  24. sched_debug("Disabling Scheduler");
  25. sched_ctx = scheduler_get_context();
  26. QDF_BUG(sched_ctx);
  27. if (!sched_ctx)
  28. return QDF_STATUS_E_INVAL;
  29. /* send shutdown signal to scheduler thread */
  30. qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
  31. qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
  32. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  33. /* wait for scheduler thread to shutdown */
  34. qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
  35. sched_ctx->sch_thread = NULL;
  36. /* flush any unprocessed scheduler messages */
  37. scheduler_queues_flush(sched_ctx);
  38. return QDF_STATUS_SUCCESS;
  39. }
  40. static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched)
  41. {
  42. char symbol[QDF_SYMBOL_LEN];
  43. if (sched->watchdog_callback)
  44. qdf_sprint_symbol(symbol, sched->watchdog_callback);
  45. sched_err("WLAN_BUG_RCA: Callback %s (type 0x%x) exceeded its allotted time of %ds",
  46. sched->watchdog_callback ? symbol : "<null>",
  47. sched->watchdog_msg_type, SCHEDULER_WATCHDOG_TIMEOUT / 1000);
  48. }
  49. #ifdef CONFIG_SLUB_DEBUG_ON
  50. static void scheduler_watchdog_timeout(void *arg)
  51. {
  52. struct scheduler_ctx *sched = arg;
  53. scheduler_watchdog_notify(sched);
  54. if (sched->sch_thread)
  55. qdf_print_thread_trace(sched->sch_thread);
  56. /* avoid crashing during shutdown */
  57. if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag))
  58. return;
  59. QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!");
  60. }
  61. #else
  62. static void scheduler_watchdog_timeout(void *arg)
  63. {
  64. scheduler_watchdog_notify((struct scheduler_ctx *)arg);
  65. }
  66. #endif
  67. QDF_STATUS scheduler_enable(void)
  68. {
  69. struct scheduler_ctx *sched_ctx;
  70. sched_debug("Enabling Scheduler");
  71. sched_ctx = scheduler_get_context();
  72. QDF_BUG(sched_ctx);
  73. if (!sched_ctx)
  74. return QDF_STATUS_E_INVAL;
  75. qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK,
  76. &sched_ctx->sch_event_flag);
  77. qdf_atomic_clear_bit(MC_POST_EVENT_MASK,
  78. &sched_ctx->sch_event_flag);
  79. /* create the scheduler thread */
  80. sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx,
  81. "scheduler_thread");
  82. if (IS_ERR(sched_ctx->sch_thread)) {
  83. sched_err("Failed to create scheduler thread");
  84. return QDF_STATUS_E_RESOURCES;
  85. }
  86. sched_debug("Scheduler thread created");
  87. /* wait for the scheduler thread to startup */
  88. qdf_wake_up_process(sched_ctx->sch_thread);
  89. qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
  90. sched_debug("Scheduler thread started");
  91. return QDF_STATUS_SUCCESS;
  92. }
  93. QDF_STATUS scheduler_init(void)
  94. {
  95. QDF_STATUS status;
  96. struct scheduler_ctx *sched_ctx;
  97. sched_debug("Initializing Scheduler");
  98. status = scheduler_create_ctx();
  99. if (QDF_IS_STATUS_ERROR(status)) {
  100. sched_err("Failed to create context; status:%d", status);
  101. return status;
  102. }
  103. sched_ctx = scheduler_get_context();
  104. QDF_BUG(sched_ctx);
  105. if (!sched_ctx) {
  106. status = QDF_STATUS_E_FAILURE;
  107. goto ctx_destroy;
  108. }
  109. status = scheduler_queues_init(sched_ctx);
  110. if (QDF_IS_STATUS_ERROR(status)) {
  111. sched_err("Failed to init queues; status:%d", status);
  112. goto ctx_destroy;
  113. }
  114. status = qdf_event_create(&sched_ctx->sch_start_event);
  115. if (QDF_IS_STATUS_ERROR(status)) {
  116. sched_err("Failed to create start event; status:%d", status);
  117. goto queues_deinit;
  118. }
  119. status = qdf_event_create(&sched_ctx->sch_shutdown);
  120. if (QDF_IS_STATUS_ERROR(status)) {
  121. sched_err("Failed to create shutdown event; status:%d", status);
  122. goto start_event_destroy;
  123. }
  124. status = qdf_event_create(&sched_ctx->resume_sch_event);
  125. if (QDF_IS_STATUS_ERROR(status)) {
  126. sched_err("Failed to create resume event; status:%d", status);
  127. goto shutdown_event_destroy;
  128. }
  129. qdf_spinlock_create(&sched_ctx->sch_thread_lock);
  130. qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
  131. sched_ctx->sch_event_flag = 0;
  132. qdf_timer_init(NULL,
  133. &sched_ctx->watchdog_timer,
  134. &scheduler_watchdog_timeout,
  135. sched_ctx,
  136. QDF_TIMER_TYPE_SW);
  137. qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
  138. return QDF_STATUS_SUCCESS;
  139. shutdown_event_destroy:
  140. qdf_event_destroy(&sched_ctx->sch_shutdown);
  141. start_event_destroy:
  142. qdf_event_destroy(&sched_ctx->sch_start_event);
  143. queues_deinit:
  144. scheduler_queues_deinit(sched_ctx);
  145. ctx_destroy:
  146. scheduler_destroy_ctx();
  147. return status;
  148. }
  149. QDF_STATUS scheduler_deinit(void)
  150. {
  151. QDF_STATUS status;
  152. struct scheduler_ctx *sched_ctx;
  153. sched_debug("Deinitializing Scheduler");
  154. sched_ctx = scheduler_get_context();
  155. QDF_BUG(sched_ctx);
  156. if (!sched_ctx)
  157. return QDF_STATUS_E_INVAL;
  158. qdf_timer_free(&sched_ctx->watchdog_timer);
  159. qdf_spinlock_destroy(&sched_ctx->sch_thread_lock);
  160. qdf_event_destroy(&sched_ctx->resume_sch_event);
  161. qdf_event_destroy(&sched_ctx->sch_shutdown);
  162. qdf_event_destroy(&sched_ctx->sch_start_event);
  163. status = scheduler_queues_deinit(sched_ctx);
  164. if (QDF_IS_STATUS_ERROR(status))
  165. sched_err("Failed to deinit queues; status:%d", status);
  166. status = scheduler_destroy_ctx();
  167. if (QDF_IS_STATUS_ERROR(status))
  168. sched_err("Failed to destroy context; status:%d", status);
  169. return QDF_STATUS_SUCCESS;
  170. }
  171. QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
  172. struct scheduler_msg *msg,
  173. bool is_high_priority)
  174. {
  175. uint8_t qidx;
  176. struct scheduler_mq_type *target_mq;
  177. struct scheduler_msg *queue_msg;
  178. struct scheduler_ctx *sched_ctx;
  179. QDF_BUG(msg);
  180. if (!msg)
  181. return QDF_STATUS_E_INVAL;
  182. sched_ctx = scheduler_get_context();
  183. QDF_BUG(sched_ctx);
  184. if (!sched_ctx)
  185. return QDF_STATUS_E_INVAL;
  186. if (!sched_ctx->sch_thread) {
  187. sched_err("Cannot post message; scheduler thread is stopped");
  188. return QDF_STATUS_E_FAILURE;
  189. }
  190. if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) {
  191. QDF_DEBUG_PANIC("Scheduler messages must be initialized");
  192. return QDF_STATUS_E_FAILURE;
  193. }
  194. /* Target_If is a special message queue in phase 3 convergence beacause
  195. * its used by both legacy WMA and as well as new UMAC components which
  196. * directly populate callback handlers in message body.
  197. * 1) WMA legacy messages should not have callback
  198. * 2) New target_if message needs to have valid callback
  199. * Clear callback handler for legacy WMA messages such that in case
  200. * if someone is sending legacy WMA message from stack which has
  201. * uninitialized callback then its handled properly. Also change
  202. * legacy WMA message queue id to target_if queue such that its always
  203. * handled in right order.
  204. */
  205. if (QDF_MODULE_ID_WMA == qid) {
  206. msg->callback = NULL;
  207. /* change legacy WMA message id to new target_if mq id */
  208. qid = QDF_MODULE_ID_TARGET_IF;
  209. }
  210. qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
  211. if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  212. sched_err("Scheduler is deinitialized ignore msg");
  213. return QDF_STATUS_E_FAILURE;
  214. }
  215. if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
  216. QDF_DEBUG_PANIC("callback not registered for qid[%d]", qid);
  217. return QDF_STATUS_E_FAILURE;
  218. }
  219. target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
  220. queue_msg = scheduler_core_msg_dup(msg);
  221. if (!queue_msg)
  222. return QDF_STATUS_E_NOMEM;
  223. if (is_high_priority)
  224. scheduler_mq_put_front(target_mq, queue_msg);
  225. else
  226. scheduler_mq_put(target_mq, queue_msg);
  227. qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
  228. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  229. return QDF_STATUS_SUCCESS;
  230. }
  231. QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
  232. scheduler_msg_process_fn_t callback)
  233. {
  234. struct scheduler_mq_ctx *ctx;
  235. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  236. sched_enter();
  237. QDF_BUG(sched_ctx);
  238. if (!sched_ctx)
  239. return QDF_STATUS_E_FAILURE;
  240. if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  241. sched_err("Already registered max %d no of message queues",
  242. SCHEDULER_NUMBER_OF_MSG_QUEUE);
  243. return QDF_STATUS_E_FAILURE;
  244. }
  245. ctx = &sched_ctx->queue_ctx;
  246. ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
  247. ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
  248. ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
  249. sched_ctx->sch_last_qidx++;
  250. sched_exit();
  251. return QDF_STATUS_SUCCESS;
  252. }
  253. QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
  254. {
  255. struct scheduler_mq_ctx *ctx;
  256. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  257. uint8_t qidx;
  258. sched_enter();
  259. QDF_BUG(sched_ctx);
  260. if (!sched_ctx)
  261. return QDF_STATUS_E_FAILURE;
  262. ctx = &sched_ctx->queue_ctx;
  263. qidx = ctx->scheduler_msg_qid_to_qidx[qid];
  264. ctx->scheduler_msg_process_fn[qidx] = NULL;
  265. sched_ctx->sch_last_qidx--;
  266. ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
  267. sched_exit();
  268. return QDF_STATUS_SUCCESS;
  269. }
  270. void scheduler_resume(void)
  271. {
  272. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  273. if (sched_ctx)
  274. qdf_event_set(&sched_ctx->resume_sch_event);
  275. }
  276. void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
  277. {
  278. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  279. if (sched_ctx)
  280. sched_ctx->hdd_callback = callback;
  281. }
  282. void scheduler_wake_up_controller_thread(void)
  283. {
  284. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  285. if (sched_ctx)
  286. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  287. }
  288. void scheduler_set_event_mask(uint32_t event_mask)
  289. {
  290. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  291. if (sched_ctx)
  292. qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag);
  293. }
  294. void scheduler_clear_event_mask(uint32_t event_mask)
  295. {
  296. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  297. if (sched_ctx)
  298. qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag);
  299. }
  300. QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
  301. {
  302. QDF_STATUS status;
  303. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  304. QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
  305. QDF_BUG(msg);
  306. if (!msg)
  307. return QDF_STATUS_E_FAILURE;
  308. QDF_BUG(sched_ctx);
  309. if (!sched_ctx)
  310. return QDF_STATUS_E_FAILURE;
  311. target_if_msg_handler = msg->callback;
  312. /* Target_If is a special message queue in phase 3 convergence beacause
  313. * its used by both legacy WMA and as well as new UMAC components. New
  314. * UMAC components directly pass their message handlers as callback in
  315. * message body.
  316. * 1) All Legacy WMA messages do not contain message callback so invoke
  317. * registered legacy WMA handler. Scheduler message posting APIs
  318. * makes sure legacy WMA messages do not have callbacks.
  319. * 2) For new messages which have valid callbacks invoke their callbacks
  320. * directly.
  321. */
  322. if (!target_if_msg_handler)
  323. status = sched_ctx->legacy_wma_handler(msg);
  324. else
  325. status = target_if_msg_handler(msg);
  326. return status;
  327. }
  328. QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
  329. {
  330. QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
  331. QDF_BUG(msg);
  332. if (!msg)
  333. return QDF_STATUS_E_FAILURE;
  334. os_if_msg_handler = msg->callback;
  335. QDF_BUG(os_if_msg_handler);
  336. if (!os_if_msg_handler)
  337. return QDF_STATUS_E_FAILURE;
  338. os_if_msg_handler(msg);
  339. return QDF_STATUS_SUCCESS;
  340. }
  341. QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
  342. {
  343. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  344. qdf_mc_timer_callback_t timer_callback;
  345. QDF_BUG(msg);
  346. if (!msg)
  347. return QDF_STATUS_E_FAILURE;
  348. QDF_BUG(sched_ctx);
  349. if (!sched_ctx)
  350. return QDF_STATUS_E_FAILURE;
  351. /* legacy sys message handler? */
  352. if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER)
  353. return sched_ctx->legacy_sys_handler(msg);
  354. timer_callback = msg->callback;
  355. QDF_BUG(timer_callback);
  356. if (!timer_callback)
  357. return QDF_STATUS_E_FAILURE;
  358. timer_callback(msg->bodyptr);
  359. return QDF_STATUS_SUCCESS;
  360. }
  361. QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg)
  362. {
  363. QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *);
  364. QDF_BUG(msg);
  365. if (!msg)
  366. return QDF_STATUS_E_FAILURE;
  367. scan_q_msg_handler = msg->callback;
  368. QDF_BUG(scan_q_msg_handler);
  369. if (!scan_q_msg_handler)
  370. return QDF_STATUS_E_FAILURE;
  371. scan_q_msg_handler(msg);
  372. return QDF_STATUS_SUCCESS;
  373. }
  374. QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
  375. wma_callback)
  376. {
  377. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  378. QDF_BUG(sched_ctx);
  379. if (!sched_ctx)
  380. return QDF_STATUS_E_FAILURE;
  381. sched_ctx->legacy_wma_handler = wma_callback;
  382. return QDF_STATUS_SUCCESS;
  383. }
  384. QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
  385. sys_callback)
  386. {
  387. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  388. QDF_BUG(sched_ctx);
  389. if (!sched_ctx)
  390. return QDF_STATUS_E_FAILURE;
  391. sched_ctx->legacy_sys_handler = sys_callback;
  392. return QDF_STATUS_SUCCESS;
  393. }
  394. QDF_STATUS scheduler_deregister_wma_legacy_handler(void)
  395. {
  396. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  397. QDF_BUG(sched_ctx);
  398. if (!sched_ctx)
  399. return QDF_STATUS_E_FAILURE;
  400. sched_ctx->legacy_wma_handler = NULL;
  401. return QDF_STATUS_SUCCESS;
  402. }
  403. QDF_STATUS scheduler_deregister_sys_legacy_handler(void)
  404. {
  405. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  406. QDF_BUG(sched_ctx);
  407. if (!sched_ctx)
  408. return QDF_STATUS_E_FAILURE;
  409. sched_ctx->legacy_sys_handler = NULL;
  410. return QDF_STATUS_SUCCESS;
  411. }
  412. static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg)
  413. {
  414. return QDF_STATUS_SUCCESS;
  415. }
  416. void scheduler_mc_timer_callback(unsigned long data)
  417. {
  418. qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data;
  419. struct scheduler_msg msg = {0};
  420. QDF_STATUS status;
  421. qdf_mc_timer_callback_t callback = NULL;
  422. void *user_data = NULL;
  423. QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
  424. QDF_BUG(timer);
  425. if (!timer)
  426. return;
  427. qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
  428. switch (timer->state) {
  429. case QDF_TIMER_STATE_STARTING:
  430. /* we are in this state because someone just started the timer,
  431. * MC timer got started and expired, but the time content have
  432. * not been updated this is a rare race condition!
  433. */
  434. timer->state = QDF_TIMER_STATE_STOPPED;
  435. status = QDF_STATUS_E_ALREADY;
  436. break;
  437. case QDF_TIMER_STATE_STOPPED:
  438. status = QDF_STATUS_E_ALREADY;
  439. break;
  440. case QDF_TIMER_STATE_UNUSED:
  441. status = QDF_STATUS_E_EXISTS;
  442. break;
  443. case QDF_TIMER_STATE_RUNNING:
  444. /* need to go to stop state here because the call-back function
  445. * may restart timer (to emulate periodic timer)
  446. */
  447. timer->state = QDF_TIMER_STATE_STOPPED;
  448. /* copy the relevant timer information to local variables;
  449. * once we exits from this critical section, the timer content
  450. * may be modified by other tasks
  451. */
  452. callback = timer->callback;
  453. user_data = timer->user_data;
  454. type = timer->type;
  455. status = QDF_STATUS_SUCCESS;
  456. break;
  457. default:
  458. QDF_ASSERT(0);
  459. status = QDF_STATUS_E_FAULT;
  460. break;
  461. }
  462. qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
  463. if (QDF_IS_STATUS_ERROR(status)) {
  464. sched_debug("MC timer fired but is not running; skip callback");
  465. return;
  466. }
  467. qdf_try_allowing_sleep(type);
  468. QDF_BUG(callback);
  469. if (!callback)
  470. return;
  471. /* serialize to scheduler controller thread */
  472. msg.type = SYS_MSG_ID_MC_TIMER;
  473. msg.reserved = SYS_MSG_COOKIE;
  474. msg.callback = callback;
  475. msg.bodyptr = user_data;
  476. msg.bodyval = 0;
  477. /* bodyptr points to user data, do not free it during msg flush */
  478. msg.flush_callback = scheduler_msg_flush_noop;
  479. status = scheduler_post_msg(QDF_MODULE_ID_SYS, &msg);
  480. if (QDF_IS_STATUS_ERROR(status))
  481. sched_err("Could not enqueue timer to timer queue");
  482. }