scheduler_api.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. /*
  2. * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <scheduler_api.h>
  27. #include <scheduler_core.h>
  28. static void scheduler_flush_mqs(struct scheduler_ctx *sched_ctx)
  29. {
  30. int i;
  31. /* Here each of the MC thread MQ shall be drained and returned to the
  32. * Core. Before returning a wrapper to the Core, the Scheduler message
  33. * shall be freed first
  34. */
  35. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  36. ("Flushing scheduler message queue"));
  37. if (!sched_ctx) {
  38. QDF_ASSERT(0);
  39. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  40. "%s: sched_ctx is NULL", __func__);
  41. return;
  42. }
  43. for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
  44. scheduler_cleanup_queues(sched_ctx, i);
  45. }
  46. static QDF_STATUS scheduler_close(struct scheduler_ctx *sched_ctx)
  47. {
  48. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  49. "%s: invoked", __func__);
  50. if (!sched_ctx) {
  51. QDF_ASSERT(0);
  52. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  53. "%s: sched_ctx == NULL", __func__);
  54. return QDF_STATUS_E_FAILURE;
  55. }
  56. /* shut down scheduler thread */
  57. qdf_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
  58. qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
  59. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  60. /* Wait for MC to exit */
  61. qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
  62. sched_ctx->sch_thread = 0;
  63. /* Clean up message queues of MC thread */
  64. scheduler_flush_mqs(sched_ctx);
  65. /* Deinit all the queues */
  66. scheduler_queues_deinit(sched_ctx);
  67. return QDF_STATUS_SUCCESS;
  68. }
  69. static QDF_STATUS scheduler_open(struct scheduler_ctx *sched_ctx)
  70. {
  71. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  72. "%s: Opening the QDF Scheduler", __func__);
  73. /* Sanity checks */
  74. if (!sched_ctx) {
  75. QDF_ASSERT(0);
  76. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  77. "%s: Null params being passed", __func__);
  78. return QDF_STATUS_E_FAILURE;
  79. }
  80. /* Initialize the helper events and event queues */
  81. qdf_event_create(&sched_ctx->sch_start_event);
  82. qdf_event_create(&sched_ctx->sch_shutdown);
  83. qdf_event_create(&sched_ctx->resume_sch_event);
  84. qdf_spinlock_create(&sched_ctx->sch_thread_lock);
  85. qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
  86. sched_ctx->sch_event_flag = 0;
  87. /* Create the Scheduler Main Controller thread */
  88. sched_ctx->sch_thread = qdf_create_thread(scheduler_thread,
  89. sched_ctx, "scheduler_thread");
  90. if (IS_ERR(sched_ctx->sch_thread)) {
  91. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
  92. "%s: Could not Create QDF Main Thread Controller",
  93. __func__);
  94. scheduler_queues_deinit(sched_ctx);
  95. return QDF_STATUS_E_RESOURCES;
  96. }
  97. /* start the thread here */
  98. qdf_wake_up_process(sched_ctx->sch_thread);
  99. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  100. "%s: QDF Main Controller thread Created", __func__);
  101. /*
  102. * Now make sure all threads have started before we exit.
  103. * Each thread should normally ACK back when it starts.
  104. */
  105. qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
  106. /* We're good now: Let's get the ball rolling!!! */
  107. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  108. "%s: Scheduler thread has started", __func__);
  109. return QDF_STATUS_SUCCESS;
  110. }
  111. QDF_STATUS scheduler_init(void)
  112. {
  113. QDF_STATUS status = QDF_STATUS_SUCCESS;
  114. struct scheduler_ctx *sched_ctx;
  115. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  116. FL("Opening Scheduler"));
  117. status = scheduler_create_ctx();
  118. if (QDF_STATUS_SUCCESS != status) {
  119. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  120. FL("can't create scheduler ctx"));
  121. return status;
  122. }
  123. sched_ctx = scheduler_get_context();
  124. status = scheduler_queues_init(sched_ctx);
  125. if (QDF_STATUS_SUCCESS != status) {
  126. QDF_ASSERT(0);
  127. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  128. FL("Queue init failed"));
  129. scheduler_destroy_ctx();
  130. return status;
  131. }
  132. status = scheduler_open(sched_ctx);
  133. if (!QDF_IS_STATUS_SUCCESS(status)) {
  134. /* Critical Error ... Cannot proceed further */
  135. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
  136. "Failed to open QDF Scheduler");
  137. QDF_ASSERT(0);
  138. scheduler_queues_deinit(sched_ctx);
  139. scheduler_destroy_ctx();
  140. }
  141. qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
  142. return QDF_STATUS_SUCCESS;
  143. }
  144. QDF_STATUS scheduler_deinit(void)
  145. {
  146. QDF_STATUS status = QDF_STATUS_SUCCESS;
  147. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  148. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
  149. FL("Closing Scheduler"));
  150. status = scheduler_close(sched_ctx);
  151. if (QDF_STATUS_SUCCESS != status) {
  152. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  153. FL("Scheduler close failed"));
  154. return status;
  155. }
  156. return scheduler_destroy_ctx();
  157. }
  158. QDF_STATUS scheduler_post_msg_by_priority(CDS_MQ_ID qid,
  159. struct scheduler_msg *pMsg, bool is_high_priority)
  160. {
  161. uint8_t qidx;
  162. struct scheduler_mq_type *target_mq = NULL;
  163. struct scheduler_msg_wrapper *msg_wrapper = NULL;
  164. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  165. if (!sched_ctx || !pMsg) {
  166. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  167. "%s: Null params or global sch context is null",
  168. __func__);
  169. QDF_ASSERT(0);
  170. return QDF_STATUS_E_FAILURE;
  171. }
  172. /* Target_If is a special message queue in phase 3 convergence beacause
  173. * its used by both legacy WMA and as well as new UMAC components which
  174. * directly populate callback handlers in message body.
  175. * 1) WMA legacy messages should not have callback
  176. * 2) New target_if message needs to have valid callback
  177. * Clear callback handler for legacy WMA messages such that in case
  178. * if someone is sending legacy WMA message from stack which has
  179. * uninitialized callback then its handled properly. Also change
  180. * legacy WMA message queue id to target_if queue such that its always
  181. * handled in right order.
  182. */
  183. if (CDS_MQ_ID_WMA == qid) {
  184. pMsg->callback = NULL;
  185. /* change legacy WMA message id to new target_if mq id */
  186. qid = CDS_MQ_ID_TARGET_IF;
  187. }
  188. qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
  189. if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  190. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  191. FL("Scheduler is deinitialized ignore msg"));
  192. return QDF_STATUS_E_FAILURE;
  193. }
  194. if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
  195. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  196. FL("callback not registered for qid[%d]"), qid);
  197. QDF_ASSERT(0);
  198. return QDF_STATUS_E_FAILURE;
  199. }
  200. target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
  201. QDF_ASSERT(target_mq);
  202. if (target_mq == NULL) {
  203. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  204. "%s: target_mq == NULL", __func__);
  205. return QDF_STATUS_E_FAILURE;
  206. }
  207. /* Try and get a free Msg wrapper */
  208. msg_wrapper = scheduler_mq_get(&sched_ctx->queue_ctx.free_msg_q);
  209. if (NULL == msg_wrapper) {
  210. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  211. FL("message wrapper empty"));
  212. return QDF_STATUS_E_RESOURCES;
  213. }
  214. /* Copy the message now */
  215. qdf_mem_copy((void *)msg_wrapper->msg_buf,
  216. (void *)pMsg, sizeof(struct scheduler_msg));
  217. if (is_high_priority)
  218. scheduler_mq_put_front(target_mq, msg_wrapper);
  219. else
  220. scheduler_mq_put(target_mq, msg_wrapper);
  221. qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
  222. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  223. return QDF_STATUS_SUCCESS;
  224. }
  225. QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
  226. scheduler_msg_process_fn_t callback)
  227. {
  228. struct scheduler_mq_ctx *ctx;
  229. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  230. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  231. FL("Enter"));
  232. if (!sched_ctx) {
  233. QDF_ASSERT(0);
  234. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  235. FL("sched_ctx is NULL"));
  236. return QDF_STATUS_E_FAILURE;
  237. }
  238. if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  239. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  240. QDF_TRACE_LEVEL_ERROR,
  241. FL("Already registered max %d no of message queues"),
  242. SCHEDULER_NUMBER_OF_MSG_QUEUE);
  243. return QDF_STATUS_E_FAILURE;
  244. }
  245. ctx = &sched_ctx->queue_ctx;
  246. ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
  247. ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
  248. ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
  249. sched_ctx->sch_last_qidx++;
  250. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  251. FL("Exit"));
  252. return QDF_STATUS_SUCCESS;
  253. }
  254. QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
  255. {
  256. struct scheduler_mq_ctx *ctx;
  257. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  258. uint8_t qidx;
  259. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  260. FL("Enter"));
  261. if (!sched_ctx) {
  262. QDF_ASSERT(0);
  263. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  264. FL("sched_ctx is NULL"));
  265. return QDF_STATUS_E_FAILURE;
  266. }
  267. ctx = &sched_ctx->queue_ctx;
  268. qidx = ctx->scheduler_msg_qid_to_qidx[qid];
  269. ctx->scheduler_msg_process_fn[qidx] = NULL;
  270. ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
  271. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  272. FL("Exit"));
  273. return QDF_STATUS_SUCCESS;
  274. }
  275. void scheduler_resume(void)
  276. {
  277. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  278. if (sched_ctx)
  279. qdf_event_set(&sched_ctx->resume_sch_event);
  280. }
  281. void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
  282. {
  283. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  284. if (sched_ctx)
  285. sched_ctx->hdd_callback = callback;
  286. }
  287. void scheduler_wake_up_controller_thread(void)
  288. {
  289. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  290. if (sched_ctx)
  291. qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
  292. }
  293. void scheduler_set_event_mask(uint32_t event_mask)
  294. {
  295. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  296. if (sched_ctx)
  297. qdf_set_bit(event_mask, &sched_ctx->sch_event_flag);
  298. }
  299. void scheduler_clear_event_mask(uint32_t event_mask)
  300. {
  301. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  302. if (sched_ctx)
  303. qdf_clear_bit(event_mask, &sched_ctx->sch_event_flag);
  304. }
  305. QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
  306. {
  307. QDF_STATUS status;
  308. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  309. QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
  310. if (NULL == msg || NULL == sched_ctx) {
  311. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  312. QDF_TRACE_LEVEL_ERROR, FL("msg %p sch %p"),
  313. msg, sched_ctx);
  314. return QDF_STATUS_E_FAILURE;
  315. }
  316. target_if_msg_handler = msg->callback;
  317. /* Target_If is a special message queue in phase 3 convergence beacause
  318. * its used by both legacy WMA and as well as new UMAC components. New
  319. * UMAC components directly pass their message handlers as callback in
  320. * message body.
  321. * 1) All Legacy WMA messages do not contain message callback so invoke
  322. * registered legacy WMA handler. Scheduler message posting APIs
  323. * makes sure legacy WMA messages do not have callbacks.
  324. * 2) For new messages which have valid callbacks invoke their callbacks
  325. * directly.
  326. */
  327. if (NULL == target_if_msg_handler)
  328. status = sched_ctx->legacy_wma_handler(msg);
  329. else
  330. status = target_if_msg_handler(msg);
  331. return status;
  332. }
  333. QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
  334. {
  335. QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
  336. if (NULL == msg) {
  337. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  338. QDF_TRACE_LEVEL_ERROR, FL("Msg is NULL"));
  339. return QDF_STATUS_E_FAILURE;
  340. }
  341. os_if_msg_handler = msg->callback;
  342. if (NULL == os_if_msg_handler) {
  343. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  344. QDF_TRACE_LEVEL_ERROR, FL("Msg callback is NULL"));
  345. QDF_ASSERT(0);
  346. return QDF_STATUS_E_FAILURE;
  347. }
  348. os_if_msg_handler(msg);
  349. return QDF_STATUS_SUCCESS;
  350. }
  351. QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
  352. {
  353. QDF_STATUS status;
  354. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  355. qdf_mc_timer_callback_t timer_q_msg_handler;
  356. if (NULL == msg || NULL == sched_ctx) {
  357. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  358. QDF_TRACE_LEVEL_ERROR, FL("msg %p sch %p"),
  359. msg, sched_ctx);
  360. return QDF_STATUS_E_FAILURE;
  361. }
  362. timer_q_msg_handler = msg->callback;
  363. /* Timer message handler */
  364. if (SYS_MSG_COOKIE == msg->reserved &&
  365. SYS_MSG_ID_MC_TIMER == msg->type) {
  366. if (timer_q_msg_handler) {
  367. status = QDF_STATUS_SUCCESS;
  368. timer_q_msg_handler(msg->bodyptr);
  369. } else {
  370. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  371. QDF_TRACE_LEVEL_ERROR, FL("Timer cb is null"));
  372. status = QDF_STATUS_E_FAILURE;
  373. }
  374. return status;
  375. } else {
  376. /* Legacy sys message handler */
  377. status = sched_ctx->legacy_sys_handler(msg);
  378. return status;
  379. }
  380. }
  381. QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
  382. wma_callback)
  383. {
  384. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  385. if (NULL == sched_ctx) {
  386. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  387. QDF_TRACE_LEVEL_ERROR, FL("scheduler context is null"));
  388. return QDF_STATUS_E_FAILURE;
  389. }
  390. sched_ctx->legacy_wma_handler = wma_callback;
  391. return QDF_STATUS_SUCCESS;
  392. }
  393. QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
  394. sys_callback)
  395. {
  396. struct scheduler_ctx *sched_ctx = scheduler_get_context();
  397. if (NULL == sched_ctx) {
  398. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  399. QDF_TRACE_LEVEL_ERROR, FL("scheduler context is null"));
  400. return QDF_STATUS_E_FAILURE;
  401. }
  402. sched_ctx->legacy_sys_handler = sys_callback;
  403. return QDF_STATUS_SUCCESS;
  404. }
  405. void scheduler_mc_timer_callback(unsigned long data)
  406. {
  407. qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data;
  408. struct scheduler_msg msg;
  409. QDF_STATUS status;
  410. qdf_mc_timer_callback_t callback = NULL;
  411. void *user_data = NULL;
  412. QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
  413. QDF_ASSERT(timer);
  414. if (timer == NULL) {
  415. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  416. "%s Null pointer passed in!", __func__);
  417. return;
  418. }
  419. qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
  420. switch (timer->state) {
  421. case QDF_TIMER_STATE_STARTING:
  422. /* we are in this state because someone just started the timer,
  423. * MC timer got started and expired, but the time content have
  424. * not been updated this is a rare race condition!
  425. */
  426. timer->state = QDF_TIMER_STATE_STOPPED;
  427. status = QDF_STATUS_E_ALREADY;
  428. break;
  429. case QDF_TIMER_STATE_STOPPED:
  430. status = QDF_STATUS_E_ALREADY;
  431. break;
  432. case QDF_TIMER_STATE_UNUSED:
  433. status = QDF_STATUS_E_EXISTS;
  434. break;
  435. case QDF_TIMER_STATE_RUNNING:
  436. /* need to go to stop state here because the call-back function
  437. * may restart timer (to emulate periodic timer)
  438. */
  439. timer->state = QDF_TIMER_STATE_STOPPED;
  440. /* copy the relevant timer information to local variables;
  441. * once we exits from this critical section, the timer content
  442. * may be modified by other tasks
  443. */
  444. callback = timer->callback;
  445. user_data = timer->user_data;
  446. type = timer->type;
  447. status = QDF_STATUS_SUCCESS;
  448. break;
  449. default:
  450. QDF_ASSERT(0);
  451. status = QDF_STATUS_E_FAULT;
  452. break;
  453. }
  454. qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
  455. if (QDF_STATUS_SUCCESS != status) {
  456. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  457. "TIMER callback called in a wrong state=%d",
  458. timer->state);
  459. return;
  460. }
  461. qdf_try_allowing_sleep(type);
  462. if (callback == NULL) {
  463. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  464. "%s: No TIMER callback, Couldn't enqueue timer to any queue",
  465. __func__);
  466. QDF_ASSERT(0);
  467. return;
  468. }
  469. /* serialize to scheduler controller thread */
  470. msg.type = SYS_MSG_ID_MC_TIMER;
  471. msg.reserved = SYS_MSG_COOKIE;
  472. msg.callback = callback;
  473. msg.bodyptr = user_data;
  474. msg.bodyval = 0;
  475. if (scheduler_post_msg(CDS_MQ_ID_SYS, &msg) == QDF_STATUS_SUCCESS)
  476. return;
  477. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  478. "%s: Could not enqueue timer to timer queue", __func__);
  479. }