scheduler_api.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. /*
  2. * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <scheduler_api.h>
  27. #include <scheduler_core.h>
  28. #include <qdf_atomic.h>
  29. /* Debug variable to detect if controller thread is stuck */
  30. static qdf_atomic_t scheduler_msg_post_fail_count;
  31. static void scheduler_flush_mqs(struct scheduler_ctx *sched_ctx)
  32. {
  33. int i;
  34. /* Here each of the MC thread MQ shall be drained and returned to the
  35. * Core. Before returning a wrapper to the Core, the Scheduler message
  36. * shall be freed first
  37. */
  38. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  39. ("Flushing scheduler message queue"));
  40. if (!sched_ctx) {
  41. QDF_ASSERT(0);
  42. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  43. "%s: sched_ctx is NULL", __func__);
  44. return;
  45. }
  46. for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
  47. scheduler_cleanup_queues(sched_ctx, i);
  48. }
  49. static QDF_STATUS scheduler_close(struct scheduler_ctx *sched_ctx)
  50. {
  51. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  52. "%s: invoked", __func__);
  53. if (!sched_ctx) {
  54. QDF_ASSERT(0);
  55. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  56. "%s: sched_ctx == NULL", __func__);
  57. return QDF_STATUS_E_FAILURE;
  58. }
  59. /* shut down scheduler thread */
  60. qdf_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
  61. qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
  62. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  63. /* Wait for MC to exit */
  64. qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
  65. sched_ctx->sch_thread = 0;
  66. /* Clean up message queues of MC thread */
  67. scheduler_flush_mqs(sched_ctx);
  68. /* Deinit all the queues */
  69. scheduler_queues_deinit(sched_ctx);
  70. return QDF_STATUS_SUCCESS;
  71. }
  72. static QDF_STATUS scheduler_open(struct scheduler_ctx *sched_ctx)
  73. {
  74. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  75. "%s: Opening the QDF Scheduler", __func__);
  76. /* Sanity checks */
  77. if (!sched_ctx) {
  78. QDF_ASSERT(0);
  79. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  80. "%s: Null params being passed", __func__);
  81. return QDF_STATUS_E_FAILURE;
  82. }
  83. /* Initialize the helper events and event queues */
  84. qdf_event_create(&sched_ctx->sch_start_event);
  85. qdf_event_create(&sched_ctx->sch_shutdown);
  86. qdf_event_create(&sched_ctx->resume_sch_event);
  87. qdf_spinlock_create(&sched_ctx->sch_thread_lock);
  88. qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
  89. sched_ctx->sch_event_flag = 0;
  90. /* Create the Scheduler Main Controller thread */
  91. sched_ctx->sch_thread = qdf_create_thread(scheduler_thread,
  92. sched_ctx, "scheduler_thread");
  93. if (IS_ERR(sched_ctx->sch_thread)) {
  94. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
  95. "%s: Could not Create QDF Main Thread Controller",
  96. __func__);
  97. scheduler_queues_deinit(sched_ctx);
  98. return QDF_STATUS_E_RESOURCES;
  99. }
  100. /* start the thread here */
  101. qdf_wake_up_process(sched_ctx->sch_thread);
  102. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  103. "%s: QDF Main Controller thread Created", __func__);
  104. /*
  105. * Now make sure all threads have started before we exit.
  106. * Each thread should normally ACK back when it starts.
  107. */
  108. qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
  109. /* We're good now: Let's get the ball rolling!!! */
  110. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  111. "%s: Scheduler thread has started", __func__);
  112. return QDF_STATUS_SUCCESS;
  113. }
  114. QDF_STATUS scheduler_init(void)
  115. {
  116. QDF_STATUS status = QDF_STATUS_SUCCESS;
  117. struct scheduler_ctx *sched_ctx;
  118. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  119. FL("Opening Scheduler"));
  120. status = scheduler_create_ctx();
  121. if (QDF_STATUS_SUCCESS != status) {
  122. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  123. FL("can't create scheduler ctx"));
  124. return status;
  125. }
  126. sched_ctx = scheduler_get_context();
  127. status = scheduler_queues_init(sched_ctx);
  128. if (QDF_STATUS_SUCCESS != status) {
  129. QDF_ASSERT(0);
  130. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  131. FL("Queue init failed"));
  132. scheduler_destroy_ctx();
  133. return status;
  134. }
  135. status = scheduler_open(sched_ctx);
  136. if (!QDF_IS_STATUS_SUCCESS(status)) {
  137. /* Critical Error ... Cannot proceed further */
  138. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
  139. "Failed to open QDF Scheduler");
  140. QDF_ASSERT(0);
  141. scheduler_queues_deinit(sched_ctx);
  142. scheduler_destroy_ctx();
  143. }
  144. qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
  145. return QDF_STATUS_SUCCESS;
  146. }
  147. QDF_STATUS scheduler_deinit(void)
  148. {
  149. QDF_STATUS status = QDF_STATUS_SUCCESS;
  150. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  151. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  152. FL("Closing Scheduler"));
  153. status = scheduler_close(sched_ctx);
  154. if (QDF_STATUS_SUCCESS != status) {
  155. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  156. FL("Scheduler close failed"));
  157. return status;
  158. }
  159. return scheduler_destroy_ctx();
  160. }
  161. QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
  162. struct scheduler_msg *pMsg, bool is_high_priority)
  163. {
  164. uint8_t qidx;
  165. uint32_t msg_wrapper_fail_count;
  166. struct scheduler_mq_type *target_mq = NULL;
  167. struct scheduler_msg_wrapper *msg_wrapper = NULL;
  168. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  169. if (!sched_ctx || !pMsg) {
  170. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  171. "%s: Null params or global sch context is null",
  172. __func__);
  173. QDF_ASSERT(0);
  174. return QDF_STATUS_E_FAILURE;
  175. }
  176. /* Target_If is a special message queue in phase 3 convergence beacause
  177. * its used by both legacy WMA and as well as new UMAC components which
  178. * directly populate callback handlers in message body.
  179. * 1) WMA legacy messages should not have callback
  180. * 2) New target_if message needs to have valid callback
  181. * Clear callback handler for legacy WMA messages such that in case
  182. * if someone is sending legacy WMA message from stack which has
  183. * uninitialized callback then its handled properly. Also change
  184. * legacy WMA message queue id to target_if queue such that its always
  185. * handled in right order.
  186. */
  187. if (QDF_MODULE_ID_WMA == qid) {
  188. pMsg->callback = NULL;
  189. /* change legacy WMA message id to new target_if mq id */
  190. qid = QDF_MODULE_ID_TARGET_IF;
  191. }
  192. qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
  193. if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  194. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  195. FL("Scheduler is deinitialized ignore msg"));
  196. return QDF_STATUS_E_FAILURE;
  197. }
  198. if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
  199. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  200. FL("callback not registered for qid[%d]"), qid);
  201. QDF_ASSERT(0);
  202. return QDF_STATUS_E_FAILURE;
  203. }
  204. target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
  205. QDF_ASSERT(target_mq);
  206. if (target_mq == NULL) {
  207. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  208. "%s: target_mq == NULL", __func__);
  209. return QDF_STATUS_E_FAILURE;
  210. }
  211. /* Try and get a free Msg wrapper */
  212. msg_wrapper = scheduler_mq_get(&sched_ctx->queue_ctx.free_msg_q);
  213. if (NULL == msg_wrapper) {
  214. msg_wrapper_fail_count =
  215. qdf_atomic_inc_return(&scheduler_msg_post_fail_count);
  216. /* log only 1st failure to avoid over running log buffer */
  217. if (1 == msg_wrapper_fail_count) {
  218. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  219. QDF_TRACE_LEVEL_ERROR,
  220. FL("Scheduler message wrapper empty"));
  221. }
  222. if (SCHEDULER_WRAPPER_MAX_FAIL_COUNT == msg_wrapper_fail_count)
  223. QDF_BUG(0);
  224. return QDF_STATUS_E_RESOURCES;
  225. }
  226. qdf_atomic_set(&scheduler_msg_post_fail_count, 0);
  227. /* Copy the message now */
  228. qdf_mem_copy((void *)msg_wrapper->msg_buf,
  229. (void *)pMsg, sizeof(struct scheduler_msg));
  230. if (is_high_priority)
  231. scheduler_mq_put_front(target_mq, msg_wrapper);
  232. else
  233. scheduler_mq_put(target_mq, msg_wrapper);
  234. qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
  235. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  236. return QDF_STATUS_SUCCESS;
  237. }
  238. QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
  239. scheduler_msg_process_fn_t callback)
  240. {
  241. struct scheduler_mq_ctx *ctx;
  242. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  243. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  244. FL("Enter"));
  245. if (!sched_ctx) {
  246. QDF_ASSERT(0);
  247. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  248. FL("sched_ctx is NULL"));
  249. return QDF_STATUS_E_FAILURE;
  250. }
  251. if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  252. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  253. QDF_TRACE_LEVEL_ERROR,
  254. FL("Already registered max %d no of message queues"),
  255. SCHEDULER_NUMBER_OF_MSG_QUEUE);
  256. return QDF_STATUS_E_FAILURE;
  257. }
  258. ctx = &sched_ctx->queue_ctx;
  259. ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
  260. ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
  261. ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
  262. sched_ctx->sch_last_qidx++;
  263. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  264. FL("Exit"));
  265. return QDF_STATUS_SUCCESS;
  266. }
  267. QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
  268. {
  269. struct scheduler_mq_ctx *ctx;
  270. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  271. uint8_t qidx;
  272. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  273. FL("Enter"));
  274. if (!sched_ctx) {
  275. QDF_ASSERT(0);
  276. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  277. FL("sched_ctx is NULL"));
  278. return QDF_STATUS_E_FAILURE;
  279. }
  280. ctx = &sched_ctx->queue_ctx;
  281. qidx = ctx->scheduler_msg_qid_to_qidx[qid];
  282. ctx->scheduler_msg_process_fn[qidx] = NULL;
  283. sched_ctx->sch_last_qidx--;
  284. ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
  285. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  286. FL("Exit"));
  287. return QDF_STATUS_SUCCESS;
  288. }
  289. void scheduler_resume(void)
  290. {
  291. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  292. if (sched_ctx)
  293. qdf_event_set(&sched_ctx->resume_sch_event);
  294. }
  295. void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
  296. {
  297. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  298. if (sched_ctx)
  299. sched_ctx->hdd_callback = callback;
  300. }
  301. void scheduler_wake_up_controller_thread(void)
  302. {
  303. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  304. if (sched_ctx)
  305. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  306. }
  307. void scheduler_set_event_mask(uint32_t event_mask)
  308. {
  309. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  310. if (sched_ctx)
  311. qdf_set_bit(event_mask, &sched_ctx->sch_event_flag);
  312. }
  313. void scheduler_clear_event_mask(uint32_t event_mask)
  314. {
  315. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  316. if (sched_ctx)
  317. qdf_clear_bit(event_mask, &sched_ctx->sch_event_flag);
  318. }
  319. QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
  320. {
  321. QDF_STATUS status;
  322. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  323. QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
  324. if (NULL == msg || NULL == sched_ctx) {
  325. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  326. QDF_TRACE_LEVEL_ERROR, FL("msg %p sch %p"),
  327. msg, sched_ctx);
  328. return QDF_STATUS_E_FAILURE;
  329. }
  330. target_if_msg_handler = msg->callback;
  331. /* Target_If is a special message queue in phase 3 convergence beacause
  332. * its used by both legacy WMA and as well as new UMAC components. New
  333. * UMAC components directly pass their message handlers as callback in
  334. * message body.
  335. * 1) All Legacy WMA messages do not contain message callback so invoke
  336. * registered legacy WMA handler. Scheduler message posting APIs
  337. * makes sure legacy WMA messages do not have callbacks.
  338. * 2) For new messages which have valid callbacks invoke their callbacks
  339. * directly.
  340. */
  341. if (NULL == target_if_msg_handler)
  342. status = sched_ctx->legacy_wma_handler(msg);
  343. else
  344. status = target_if_msg_handler(msg);
  345. return status;
  346. }
  347. QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
  348. {
  349. QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
  350. if (NULL == msg) {
  351. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  352. QDF_TRACE_LEVEL_ERROR, FL("Msg is NULL"));
  353. return QDF_STATUS_E_FAILURE;
  354. }
  355. os_if_msg_handler = msg->callback;
  356. if (NULL == os_if_msg_handler) {
  357. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  358. QDF_TRACE_LEVEL_ERROR, FL("Msg callback is NULL"));
  359. QDF_ASSERT(0);
  360. return QDF_STATUS_E_FAILURE;
  361. }
  362. os_if_msg_handler(msg);
  363. return QDF_STATUS_SUCCESS;
  364. }
  365. QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
  366. {
  367. QDF_STATUS status;
  368. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  369. qdf_mc_timer_callback_t timer_q_msg_handler;
  370. if (NULL == msg || NULL == sched_ctx) {
  371. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  372. QDF_TRACE_LEVEL_ERROR, FL("msg %p sch %p"),
  373. msg, sched_ctx);
  374. return QDF_STATUS_E_FAILURE;
  375. }
  376. timer_q_msg_handler = msg->callback;
  377. /* Timer message handler */
  378. if (SYS_MSG_COOKIE == msg->reserved &&
  379. SYS_MSG_ID_MC_TIMER == msg->type) {
  380. if (timer_q_msg_handler) {
  381. status = QDF_STATUS_SUCCESS;
  382. timer_q_msg_handler(msg->bodyptr);
  383. } else {
  384. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  385. QDF_TRACE_LEVEL_ERROR, FL("Timer cb is null"));
  386. status = QDF_STATUS_E_FAILURE;
  387. }
  388. return status;
  389. } else {
  390. /* Legacy sys message handler */
  391. status = sched_ctx->legacy_sys_handler(msg);
  392. return status;
  393. }
  394. }
  395. QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
  396. wma_callback)
  397. {
  398. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  399. if (NULL == sched_ctx) {
  400. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  401. QDF_TRACE_LEVEL_ERROR, FL("scheduler context is null"));
  402. return QDF_STATUS_E_FAILURE;
  403. }
  404. sched_ctx->legacy_wma_handler = wma_callback;
  405. return QDF_STATUS_SUCCESS;
  406. }
  407. QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
  408. sys_callback)
  409. {
  410. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  411. if (NULL == sched_ctx) {
  412. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  413. QDF_TRACE_LEVEL_ERROR, FL("scheduler context is null"));
  414. return QDF_STATUS_E_FAILURE;
  415. }
  416. sched_ctx->legacy_sys_handler = sys_callback;
  417. return QDF_STATUS_SUCCESS;
  418. }
  419. void scheduler_mc_timer_callback(unsigned long data)
  420. {
  421. qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data;
  422. struct scheduler_msg msg;
  423. QDF_STATUS status;
  424. qdf_mc_timer_callback_t callback = NULL;
  425. void *user_data = NULL;
  426. QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
  427. QDF_ASSERT(timer);
  428. if (timer == NULL) {
  429. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  430. "%s Null pointer passed in!", __func__);
  431. return;
  432. }
  433. qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
  434. switch (timer->state) {
  435. case QDF_TIMER_STATE_STARTING:
  436. /* we are in this state because someone just started the timer,
  437. * MC timer got started and expired, but the time content have
  438. * not been updated this is a rare race condition!
  439. */
  440. timer->state = QDF_TIMER_STATE_STOPPED;
  441. status = QDF_STATUS_E_ALREADY;
  442. break;
  443. case QDF_TIMER_STATE_STOPPED:
  444. status = QDF_STATUS_E_ALREADY;
  445. break;
  446. case QDF_TIMER_STATE_UNUSED:
  447. status = QDF_STATUS_E_EXISTS;
  448. break;
  449. case QDF_TIMER_STATE_RUNNING:
  450. /* need to go to stop state here because the call-back function
  451. * may restart timer (to emulate periodic timer)
  452. */
  453. timer->state = QDF_TIMER_STATE_STOPPED;
  454. /* copy the relevant timer information to local variables;
  455. * once we exits from this critical section, the timer content
  456. * may be modified by other tasks
  457. */
  458. callback = timer->callback;
  459. user_data = timer->user_data;
  460. type = timer->type;
  461. status = QDF_STATUS_SUCCESS;
  462. break;
  463. default:
  464. QDF_ASSERT(0);
  465. status = QDF_STATUS_E_FAULT;
  466. break;
  467. }
  468. qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
  469. if (QDF_STATUS_SUCCESS != status) {
  470. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  471. "TIMER callback called in a wrong state=%d",
  472. timer->state);
  473. return;
  474. }
  475. qdf_try_allowing_sleep(type);
  476. if (callback == NULL) {
  477. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  478. "%s: No TIMER callback, Couldn't enqueue timer to any queue",
  479. __func__);
  480. QDF_ASSERT(0);
  481. return;
  482. }
  483. /* serialize to scheduler controller thread */
  484. msg.type = SYS_MSG_ID_MC_TIMER;
  485. msg.reserved = SYS_MSG_COOKIE;
  486. msg.callback = callback;
  487. msg.bodyptr = user_data;
  488. msg.bodyval = 0;
  489. if (scheduler_post_msg(QDF_MODULE_ID_SYS, &msg) == QDF_STATUS_SUCCESS)
  490. return;
  491. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  492. "%s: Could not enqueue timer to timer queue", __func__);
  493. }