scheduler_api.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. /*
  2. * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <scheduler_api.h>
  27. #include <scheduler_core.h>
  28. static void scheduler_flush_mqs(struct scheduler_ctx *sched_ctx)
  29. {
  30. int i;
  31. /* Here each of the MC thread MQ shall be drained and returned to the
  32. * Core. Before returning a wrapper to the Core, the Scheduler message
  33. * shall be freed first
  34. */
  35. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  36. ("Flushing scheduler message queue"));
  37. if (!sched_ctx) {
  38. QDF_ASSERT(0);
  39. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  40. "%s: sched_ctx is NULL", __func__);
  41. return;
  42. }
  43. for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
  44. scheduler_cleanup_queues(sched_ctx, i);
  45. }
  46. static QDF_STATUS scheduler_close(struct scheduler_ctx *sched_ctx)
  47. {
  48. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  49. "%s: invoked", __func__);
  50. if (!sched_ctx) {
  51. QDF_ASSERT(0);
  52. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  53. "%s: sched_ctx == NULL", __func__);
  54. return QDF_STATUS_E_FAILURE;
  55. }
  56. /* shut down scheduler thread */
  57. qdf_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
  58. qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
  59. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  60. /* Wait for MC to exit */
  61. qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
  62. sched_ctx->sch_thread = 0;
  63. /* Clean up message queues of MC thread */
  64. scheduler_flush_mqs(sched_ctx);
  65. /* Deinit all the queues */
  66. scheduler_queues_deinit(sched_ctx);
  67. return QDF_STATUS_SUCCESS;
  68. }
  69. static QDF_STATUS scheduler_open(struct scheduler_ctx *sched_ctx)
  70. {
  71. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  72. "%s: Opening the QDF Scheduler", __func__);
  73. /* Sanity checks */
  74. if (!sched_ctx) {
  75. QDF_ASSERT(0);
  76. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  77. "%s: Null params being passed", __func__);
  78. return QDF_STATUS_E_FAILURE;
  79. }
  80. /* Initialize the helper events and event queues */
  81. qdf_event_create(&sched_ctx->sch_start_event);
  82. qdf_event_create(&sched_ctx->sch_shutdown);
  83. qdf_event_create(&sched_ctx->resume_sch_event);
  84. qdf_spinlock_create(&sched_ctx->sch_thread_lock);
  85. qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
  86. sched_ctx->sch_event_flag = 0;
  87. /* Create the Scheduler Main Controller thread */
  88. sched_ctx->sch_thread = qdf_create_thread(scheduler_thread,
  89. sched_ctx, "scheduler_thread");
  90. if (IS_ERR(sched_ctx->sch_thread)) {
  91. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
  92. "%s: Could not Create QDF Main Thread Controller",
  93. __func__);
  94. scheduler_queues_deinit(sched_ctx);
  95. return QDF_STATUS_E_RESOURCES;
  96. }
  97. /* start the thread here */
  98. qdf_wake_up_process(sched_ctx->sch_thread);
  99. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  100. "%s: QDF Main Controller thread Created", __func__);
  101. /*
  102. * Now make sure all threads have started before we exit.
  103. * Each thread should normally ACK back when it starts.
  104. */
  105. qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
  106. /* We're good now: Let's get the ball rolling!!! */
  107. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  108. "%s: Scheduler thread has started", __func__);
  109. return QDF_STATUS_SUCCESS;
  110. }
  111. QDF_STATUS scheduler_init(void)
  112. {
  113. QDF_STATUS status = QDF_STATUS_SUCCESS;
  114. struct scheduler_ctx *sched_ctx;
  115. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  116. FL("Opening Scheduler"));
  117. status = scheduler_create_ctx();
  118. if (QDF_STATUS_SUCCESS != status) {
  119. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  120. FL("can't create scheduler ctx"));
  121. return status;
  122. }
  123. sched_ctx = scheduler_get_context();
  124. status = scheduler_queues_init(sched_ctx);
  125. if (QDF_STATUS_SUCCESS != status) {
  126. QDF_ASSERT(0);
  127. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  128. FL("Queue init failed"));
  129. scheduler_destroy_ctx();
  130. return status;
  131. }
  132. status = scheduler_open(sched_ctx);
  133. if (!QDF_IS_STATUS_SUCCESS(status)) {
  134. /* Critical Error ... Cannot proceed further */
  135. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
  136. "Failed to open QDF Scheduler");
  137. QDF_ASSERT(0);
  138. scheduler_queues_deinit(sched_ctx);
  139. scheduler_destroy_ctx();
  140. }
  141. qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
  142. return QDF_STATUS_SUCCESS;
  143. }
  144. QDF_STATUS scheduler_deinit(void)
  145. {
  146. QDF_STATUS status = QDF_STATUS_SUCCESS;
  147. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  148. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  149. FL("Closing Scheduler"));
  150. status = scheduler_close(sched_ctx);
  151. if (QDF_STATUS_SUCCESS != status) {
  152. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  153. FL("Scheduler close failed"));
  154. return status;
  155. }
  156. return scheduler_destroy_ctx();
  157. }
  158. QDF_STATUS scheduler_post_msg_by_priority(CDS_MQ_ID qid,
  159. struct scheduler_msg *pMsg, bool is_high_priority)
  160. {
  161. uint8_t qidx;
  162. struct scheduler_mq_type *target_mq = NULL;
  163. struct scheduler_msg_wrapper *msg_wrapper = NULL;
  164. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  165. if (!sched_ctx || !pMsg) {
  166. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  167. "%s: Null params or global sch context is null",
  168. __func__);
  169. QDF_ASSERT(0);
  170. return QDF_STATUS_E_FAILURE;
  171. }
  172. /* Target_If is a special message queue in phase 3 convergence beacause
  173. * its used by both legacy WMA and as well as new UMAC components which
  174. * directly populate callback handlers in message body.
  175. * 1) WMA legacy messages should not have callback
  176. * 2) New target_if message needs to have valid callback
  177. * Clear callback handler for legacy WMA messages such that in case
  178. * if someone is sending legacy WMA message from stack which has
  179. * uninitialized callback then its handled properly. Also change
  180. * legacy WMA message queue id to target_if queue such that its always
  181. * handled in right order.
  182. */
  183. if (CDS_MQ_ID_WMA == qid) {
  184. pMsg->callback = NULL;
  185. /* change legacy WMA message id to new target_if mq id */
  186. qid = CDS_MQ_ID_TARGET_IF;
  187. }
  188. qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
  189. if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  190. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  191. FL("Scheduler is deinitialized ignore msg"));
  192. return QDF_STATUS_E_FAILURE;
  193. }
  194. if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
  195. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  196. FL("callback not registered for qid[%d]"), qid);
  197. QDF_ASSERT(0);
  198. return QDF_STATUS_E_FAILURE;
  199. }
  200. target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
  201. QDF_ASSERT(target_mq);
  202. if (target_mq == NULL) {
  203. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  204. "%s: target_mq == NULL", __func__);
  205. return QDF_STATUS_E_FAILURE;
  206. }
  207. /* Try and get a free Msg wrapper */
  208. msg_wrapper = scheduler_mq_get(&sched_ctx->queue_ctx.free_msg_q);
  209. if (NULL == msg_wrapper) {
  210. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  211. FL("message wrapper empty"));
  212. return QDF_STATUS_E_RESOURCES;
  213. }
  214. /* Copy the message now */
  215. qdf_mem_copy((void *)msg_wrapper->msg_buf,
  216. (void *)pMsg, sizeof(struct scheduler_msg));
  217. if (is_high_priority)
  218. scheduler_mq_put_front(target_mq, msg_wrapper);
  219. else
  220. scheduler_mq_put(target_mq, msg_wrapper);
  221. qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
  222. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  223. return QDF_STATUS_SUCCESS;
  224. }
  225. QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
  226. scheduler_msg_process_fn_t callback)
  227. {
  228. struct scheduler_mq_ctx *ctx;
  229. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  230. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  231. FL("Enter"));
  232. if (!sched_ctx) {
  233. QDF_ASSERT(0);
  234. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  235. FL("sched_ctx is NULL"));
  236. return QDF_STATUS_E_FAILURE;
  237. }
  238. ctx = &sched_ctx->queue_ctx;
  239. ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
  240. ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
  241. ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
  242. sched_ctx->sch_last_qidx++;
  243. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  244. FL("Exit"));
  245. return QDF_STATUS_SUCCESS;
  246. }
  247. QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
  248. {
  249. struct scheduler_mq_ctx *ctx;
  250. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  251. uint8_t qidx;
  252. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  253. FL("Enter"));
  254. if (!sched_ctx) {
  255. QDF_ASSERT(0);
  256. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  257. FL("sched_ctx is NULL"));
  258. return QDF_STATUS_E_FAILURE;
  259. }
  260. ctx = &sched_ctx->queue_ctx;
  261. qidx = ctx->scheduler_msg_qid_to_qidx[qid];
  262. ctx->scheduler_msg_process_fn[qidx] = NULL;
  263. ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
  264. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  265. FL("Exit"));
  266. return QDF_STATUS_SUCCESS;
  267. }
  268. void scheduler_resume_complete(void)
  269. {
  270. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  271. if (sched_ctx)
  272. qdf_event_set(&sched_ctx->resume_sch_event);
  273. }
  274. void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
  275. {
  276. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  277. if (sched_ctx)
  278. sched_ctx->hdd_callback = callback;
  279. }
  280. void scheduler_wake_up_controller_thread(void)
  281. {
  282. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  283. if (sched_ctx)
  284. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  285. }
  286. void scheduler_set_event_mask(uint32_t event_mask)
  287. {
  288. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  289. if (sched_ctx)
  290. qdf_set_bit(event_mask, &sched_ctx->sch_event_flag);
  291. }
  292. void scheduler_clear_event_mask(uint32_t event_mask)
  293. {
  294. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  295. if (sched_ctx)
  296. qdf_clear_bit(event_mask, &sched_ctx->sch_event_flag);
  297. }
  298. QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
  299. {
  300. QDF_STATUS status;
  301. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  302. QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
  303. if (NULL == msg || NULL == sched_ctx) {
  304. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  305. QDF_TRACE_LEVEL_ERROR, FL("msg %p sch %p"),
  306. msg, sched_ctx);
  307. return QDF_STATUS_E_FAILURE;
  308. }
  309. target_if_msg_handler = msg->callback;
  310. /* Target_If is a special message queue in phase 3 convergence beacause
  311. * its used by both legacy WMA and as well as new UMAC components. New
  312. * UMAC components directly pass their message handlers as callback in
  313. * message body.
  314. * 1) All Legacy WMA messages do not contain message callback so invoke
  315. * registered legacy WMA handler. Scheduler message posting APIs
  316. * makes sure legacy WMA messages do not have callbacks.
  317. * 2) For new messages which have valid callbacks invoke their callbacks
  318. * directly.
  319. */
  320. if (NULL == target_if_msg_handler)
  321. status = sched_ctx->legacy_wma_handler(msg);
  322. else
  323. status = target_if_msg_handler(msg);
  324. return status;
  325. }
  326. QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
  327. {
  328. QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
  329. if (NULL == msg) {
  330. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  331. QDF_TRACE_LEVEL_ERROR, FL("Msg is NULL"));
  332. return QDF_STATUS_E_FAILURE;
  333. }
  334. os_if_msg_handler = msg->callback;
  335. if (NULL == os_if_msg_handler) {
  336. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  337. QDF_TRACE_LEVEL_ERROR, FL("Msg callback is NULL"));
  338. QDF_ASSERT(0);
  339. return QDF_STATUS_E_FAILURE;
  340. }
  341. os_if_msg_handler(msg);
  342. return QDF_STATUS_SUCCESS;
  343. }
  344. QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
  345. {
  346. QDF_STATUS status;
  347. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  348. qdf_mc_timer_callback_t timer_q_msg_handler;
  349. if (NULL == msg || NULL == sched_ctx) {
  350. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  351. QDF_TRACE_LEVEL_ERROR, FL("msg %p sch %p"),
  352. msg, sched_ctx);
  353. return QDF_STATUS_E_FAILURE;
  354. }
  355. timer_q_msg_handler = msg->callback;
  356. /* Timer message handler */
  357. if (SYS_MSG_COOKIE == msg->reserved &&
  358. SYS_MSG_ID_MC_TIMER == msg->type) {
  359. if (timer_q_msg_handler) {
  360. status = QDF_STATUS_SUCCESS;
  361. timer_q_msg_handler(msg->bodyptr);
  362. } else {
  363. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  364. QDF_TRACE_LEVEL_ERROR, FL("Timer cb is null"));
  365. status = QDF_STATUS_E_FAILURE;
  366. }
  367. return status;
  368. } else {
  369. /* Legacy sys message handler */
  370. status = sched_ctx->legacy_sys_handler(msg);
  371. return status;
  372. }
  373. }
  374. QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
  375. wma_callback)
  376. {
  377. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  378. if (NULL == sched_ctx) {
  379. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  380. QDF_TRACE_LEVEL_ERROR, FL("scheduler context is null"));
  381. return QDF_STATUS_E_FAILURE;
  382. }
  383. sched_ctx->legacy_wma_handler = wma_callback;
  384. return QDF_STATUS_SUCCESS;
  385. }
  386. QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
  387. sys_callback)
  388. {
  389. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  390. if (NULL == sched_ctx) {
  391. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  392. QDF_TRACE_LEVEL_ERROR, FL("scheduler context is null"));
  393. return QDF_STATUS_E_FAILURE;
  394. }
  395. sched_ctx->legacy_sys_handler = sys_callback;
  396. return QDF_STATUS_SUCCESS;
  397. }
  398. void scheduler_mc_timer_callback(unsigned long data)
  399. {
  400. qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data;
  401. struct scheduler_msg msg;
  402. QDF_STATUS status;
  403. qdf_mc_timer_callback_t callback = NULL;
  404. void *user_data = NULL;
  405. QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
  406. QDF_ASSERT(timer);
  407. if (timer == NULL) {
  408. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  409. "%s Null pointer passed in!", __func__);
  410. return;
  411. }
  412. qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
  413. switch (timer->state) {
  414. case QDF_TIMER_STATE_STARTING:
  415. /* we are in this state because someone just started the timer,
  416. * MC timer got started and expired, but the time content have
  417. * not been updated this is a rare race condition!
  418. */
  419. timer->state = QDF_TIMER_STATE_STOPPED;
  420. status = QDF_STATUS_E_ALREADY;
  421. break;
  422. case QDF_TIMER_STATE_STOPPED:
  423. status = QDF_STATUS_E_ALREADY;
  424. break;
  425. case QDF_TIMER_STATE_UNUSED:
  426. status = QDF_STATUS_E_EXISTS;
  427. break;
  428. case QDF_TIMER_STATE_RUNNING:
  429. /* need to go to stop state here because the call-back function
  430. * may restart timer (to emulate periodic timer)
  431. */
  432. timer->state = QDF_TIMER_STATE_STOPPED;
  433. /* copy the relevant timer information to local variables;
  434. * once we exits from this critical section, the timer content
  435. * may be modified by other tasks
  436. */
  437. callback = timer->callback;
  438. user_data = timer->user_data;
  439. type = timer->type;
  440. status = QDF_STATUS_SUCCESS;
  441. break;
  442. default:
  443. QDF_ASSERT(0);
  444. status = QDF_STATUS_E_FAULT;
  445. break;
  446. }
  447. qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
  448. if (QDF_STATUS_SUCCESS != status) {
  449. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  450. "TIMER callback called in a wrong state=%d",
  451. timer->state);
  452. return;
  453. }
  454. qdf_try_allowing_sleep(type);
  455. if (callback == NULL) {
  456. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  457. "%s: No TIMER callback, Couldn't enqueue timer to any queue",
  458. __func__);
  459. QDF_ASSERT(0);
  460. return;
  461. }
  462. /* serialize to scheduler controller thread */
  463. msg.type = SYS_MSG_ID_MC_TIMER;
  464. msg.reserved = SYS_MSG_COOKIE;
  465. msg.callback = callback;
  466. msg.bodyptr = user_data;
  467. msg.bodyval = 0;
  468. if (scheduler_post_msg(CDS_MQ_ID_SYS, &msg) == QDF_STATUS_SUCCESS)
  469. return;
  470. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  471. "%s: Could not enqueue timer to timer queue", __func__);
  472. }