scheduler_core.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /*
  2. * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <scheduler_core.h>
  27. #include <osdep.h>
  28. static struct scheduler_ctx *gp_sched_ctx;
  29. QDF_STATUS scheduler_create_ctx(void)
  30. {
  31. if (gp_sched_ctx) {
  32. QDF_ASSERT(0);
  33. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  34. FL("there is a already gp_sched_ctx mem allocated"));
  35. return QDF_STATUS_E_FAILURE;
  36. }
  37. gp_sched_ctx = qdf_mem_malloc(sizeof(struct scheduler_ctx));
  38. if (!gp_sched_ctx) {
  39. QDF_ASSERT(0);
  40. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  41. FL("gp_sched_ctx can't alloc mememory"));
  42. return QDF_STATUS_E_FAILURE;
  43. }
  44. return QDF_STATUS_SUCCESS;
  45. }
  46. QDF_STATUS scheduler_destroy_ctx(void)
  47. {
  48. if (gp_sched_ctx)
  49. qdf_mem_free(gp_sched_ctx);
  50. gp_sched_ctx = NULL;
  51. return QDF_STATUS_SUCCESS;
  52. }
  53. struct scheduler_ctx *scheduler_get_context(void)
  54. {
  55. return gp_sched_ctx;
  56. }
  57. static QDF_STATUS scheduler_all_queues_init(
  58. struct scheduler_ctx *sched_ctx)
  59. {
  60. QDF_STATUS status = QDF_STATUS_SUCCESS;
  61. int i;
  62. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("enter"));
  63. if (!sched_ctx) {
  64. QDF_ASSERT(0);
  65. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  66. "%s: Null params being passed", __func__);
  67. return QDF_STATUS_E_FAILURE;
  68. }
  69. status = scheduler_mq_init(&sched_ctx->queue_ctx.free_msg_q);
  70. if (QDF_STATUS_SUCCESS != status)
  71. return status;
  72. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  73. QDF_TRACE_LEVEL_ERROR, FL("free msg queue init complete"));
  74. /* Initialize all message queues */
  75. for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
  76. status = scheduler_mq_init(
  77. &sched_ctx->queue_ctx.sch_msg_q[i]);
  78. if (QDF_STATUS_SUCCESS != status)
  79. return status;
  80. }
  81. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("exit"));
  82. return status;
  83. }
  84. static QDF_STATUS scheduler_all_queues_deinit(
  85. struct scheduler_ctx *sched_ctx)
  86. {
  87. QDF_STATUS status = QDF_STATUS_SUCCESS;
  88. int i;
  89. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("enter"));
  90. if (!sched_ctx) {
  91. QDF_ASSERT(0);
  92. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  93. "%s: Null params being passed", __func__);
  94. return QDF_STATUS_E_FAILURE;
  95. }
  96. scheduler_mq_deinit(&sched_ctx->queue_ctx.free_msg_q);
  97. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  98. QDF_TRACE_LEVEL_ERROR, FL("free msg queue inited"));
  99. /* De-Initialize all message queues */
  100. for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
  101. scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
  102. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("exit"));
  103. return status;
  104. }
  105. QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
  106. {
  107. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Enter"));
  108. if (msg_q == NULL) {
  109. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  110. "%s: NULL pointer passed", __func__);
  111. return QDF_STATUS_E_FAILURE;
  112. }
  113. /* Now initialize the lock */
  114. qdf_spinlock_create(&msg_q->mq_lock);
  115. /* Now initialize the List data structure */
  116. qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
  117. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Exit"));
  118. return QDF_STATUS_SUCCESS;
  119. }
  120. void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
  121. {
  122. if (msg_q == NULL) {
  123. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  124. "%s: NULL pointer passed", __func__);
  125. return;
  126. }
  127. }
  128. void scheduler_mq_put(struct scheduler_mq_type *msg_q,
  129. struct scheduler_msg_wrapper *msg_wrapper)
  130. {
  131. if (msg_q == NULL || msg_wrapper == NULL) {
  132. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  133. "%s: NULL pointer passed", __func__);
  134. return;
  135. }
  136. qdf_spin_lock_irqsave(&msg_q->mq_lock);
  137. qdf_list_insert_back(&msg_q->mq_list, &msg_wrapper->msg_node);
  138. qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
  139. }
  140. void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
  141. struct scheduler_msg_wrapper *msg_wrapper)
  142. {
  143. if ((msg_q == NULL) || (msg_wrapper == NULL)) {
  144. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  145. "%s: NULL pointer passed", __func__);
  146. return;
  147. }
  148. qdf_spin_lock_irqsave(&msg_q->mq_lock);
  149. qdf_list_insert_front(&msg_q->mq_list, &msg_wrapper->msg_node);
  150. qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
  151. }
  152. struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q)
  153. {
  154. qdf_list_node_t *listptr;
  155. struct scheduler_msg_wrapper *msg_wrapper = NULL;
  156. if (msg_q == NULL) {
  157. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  158. "%s: NULL pointer passed", __func__);
  159. return NULL;
  160. }
  161. qdf_spin_lock_irqsave(&msg_q->mq_lock);
  162. if (qdf_list_empty(&msg_q->mq_list)) {
  163. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_WARN,
  164. "%s: Scheduler Message Queue is empty", __func__);
  165. } else {
  166. listptr = msg_q->mq_list.anchor.next;
  167. msg_wrapper = (struct scheduler_msg_wrapper *)
  168. qdf_container_of(listptr,
  169. struct scheduler_msg_wrapper,
  170. msg_node);
  171. qdf_list_remove_node(&msg_q->mq_list, listptr);
  172. }
  173. qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
  174. return msg_wrapper;
  175. }
  176. bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q)
  177. {
  178. bool is_empty = false;
  179. if (msg_q == NULL) {
  180. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  181. "%s: NULL pointer passed", __func__);
  182. return QDF_STATUS_E_FAILURE;
  183. }
  184. qdf_spin_lock_irqsave(&msg_q->mq_lock);
  185. is_empty = qdf_list_empty(&msg_q->mq_list) ? true : false;
  186. qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
  187. return is_empty;
  188. }
  189. QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
  190. {
  191. return scheduler_all_queues_deinit(sched_ctx);
  192. }
  193. QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
  194. {
  195. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  196. int i;
  197. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Enter"));
  198. if (!sched_ctx) {
  199. QDF_ASSERT(0);
  200. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  201. "%s: Null params being passed", __func__);
  202. return QDF_STATUS_E_FAILURE;
  203. }
  204. status = scheduler_all_queues_init(sched_ctx);
  205. if (QDF_STATUS_SUCCESS != status) {
  206. scheduler_all_queues_deinit(sched_ctx);
  207. QDF_ASSERT(0);
  208. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
  209. FL("Failed to initialize the msg queues"));
  210. return status;
  211. }
  212. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  213. QDF_TRACE_LEVEL_ERROR, FL("Queue init passed"));
  214. for (i = 0; i < SCHEDULER_CORE_MAX_MESSAGES; i++) {
  215. (sched_ctx->queue_ctx.msg_wrappers[i]).msg_buf =
  216. &(sched_ctx->queue_ctx.msg_buffers[i]);
  217. qdf_init_list_head(
  218. &sched_ctx->queue_ctx.msg_wrappers[i].msg_node);
  219. scheduler_mq_put(&sched_ctx->queue_ctx.free_msg_q,
  220. &(sched_ctx->queue_ctx.msg_wrappers[i]));
  221. }
  222. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Exit"));
  223. return status;
  224. }
  225. static void scheduler_core_return_msg(struct scheduler_ctx *sch_ctx,
  226. struct scheduler_msg_wrapper *msg_wrapper)
  227. {
  228. if (!sch_ctx) {
  229. QDF_ASSERT(0);
  230. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  231. "%s: gp_cds_context != p_cds_context", __func__);
  232. return;
  233. }
  234. QDF_ASSERT(NULL != msg_wrapper);
  235. if (msg_wrapper == NULL) {
  236. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  237. FL("msg_wrapper == NULL in function"));
  238. return;
  239. }
  240. /*
  241. * Return the message on the free message queue
  242. */
  243. qdf_init_list_head(&msg_wrapper->msg_node);
  244. scheduler_mq_put(&sch_ctx->queue_ctx.free_msg_q, msg_wrapper);
  245. }
  246. static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
  247. bool *shutdown)
  248. {
  249. int i;
  250. QDF_STATUS vStatus = QDF_STATUS_E_FAILURE;
  251. struct scheduler_msg_wrapper *pMsgWrapper = NULL;
  252. if (!sch_ctx) {
  253. QDF_ASSERT(0);
  254. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  255. FL("sch_ctx null"));
  256. return;
  257. }
  258. /* start with highest priority queue : timer queue at index 0 */
  259. i = 0;
  260. while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  261. /* Check if MC needs to shutdown */
  262. if (qdf_test_bit(MC_SHUTDOWN_EVENT_MASK,
  263. &sch_ctx->sch_event_flag)) {
  264. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  265. QDF_TRACE_LEVEL_ERROR,
  266. "%s: scheduler thread signaled to shutdown",
  267. __func__);
  268. *shutdown = true;
  269. /* Check for any Suspend Indication */
  270. if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
  271. &sch_ctx->sch_event_flag)) {
  272. /* Unblock anyone waiting on suspend */
  273. if (gp_sched_ctx->hdd_callback)
  274. gp_sched_ctx->hdd_callback();
  275. }
  276. break;
  277. }
  278. if (scheduler_is_mq_empty(&sch_ctx->queue_ctx.sch_msg_q[i])) {
  279. /* check next queue */
  280. i++;
  281. continue;
  282. }
  283. pMsgWrapper =
  284. scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
  285. if (pMsgWrapper == NULL) {
  286. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  287. QDF_TRACE_LEVEL_ERROR,
  288. "%s: pMsgWrapper is NULL", __func__);
  289. QDF_ASSERT(0);
  290. return;
  291. }
  292. if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
  293. vStatus = sch_ctx->queue_ctx.
  294. scheduler_msg_process_fn[i](
  295. pMsgWrapper->msg_buf);
  296. if (QDF_IS_STATUS_ERROR(vStatus)) {
  297. QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
  298. QDF_TRACE_LEVEL_ERROR,
  299. FL("Failed processing Qid[%d] message"),
  300. sch_ctx->queue_ctx.sch_msg_q[i].qid);
  301. }
  302. /* return message to the Core */
  303. scheduler_core_return_msg(sch_ctx, pMsgWrapper);
  304. }
  305. /* start again with highest priority queue at index 0 */
  306. i = 0;
  307. continue;
  308. }
  309. /* Check for any Suspend Indication */
  310. if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
  311. &sch_ctx->sch_event_flag)) {
  312. qdf_spin_lock(&sch_ctx->sch_thread_lock);
  313. qdf_event_reset(&sch_ctx->resume_sch_event);
  314. /* controller thread suspend completion callback */
  315. if (gp_sched_ctx->hdd_callback)
  316. gp_sched_ctx->hdd_callback();
  317. qdf_spin_unlock(&sch_ctx->sch_thread_lock);
  318. /* Wait for resume indication */
  319. qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
  320. }
  321. return; /* Nothing to process wait on wait queue */
  322. }
  323. int scheduler_thread(void *arg)
  324. {
  325. struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
  326. int retWaitStatus = 0;
  327. bool shutdown = false;
  328. if (arg == NULL) {
  329. QDF_ASSERT(0);
  330. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  331. "%s: Bad Args passed", __func__);
  332. return 0;
  333. }
  334. qdf_set_user_nice(current, -2);
  335. /* Ack back to the context from which the main controller thread
  336. * has been created
  337. */
  338. qdf_event_set(&sch_ctx->sch_start_event);
  339. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  340. "%s: scheduler_thread %d (%s) starting up", __func__, current->pid,
  341. current->comm);
  342. while (!shutdown) {
  343. /* This implements the execution model algorithm */
  344. retWaitStatus = qdf_wait_queue_interruptible(
  345. sch_ctx->sch_wait_queue,
  346. qdf_test_bit(MC_POST_EVENT_MASK,
  347. &sch_ctx->sch_event_flag) ||
  348. qdf_test_bit(MC_SUSPEND_EVENT_MASK,
  349. &sch_ctx->sch_event_flag));
  350. if (retWaitStatus == -ERESTARTSYS) {
  351. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  352. "%s: wait_event_interruptible returned -ERESTARTSYS",
  353. __func__);
  354. QDF_BUG(0);
  355. }
  356. qdf_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
  357. scheduler_thread_process_queues(sch_ctx, &shutdown);
  358. }
  359. /* If we get here the MC thread must exit */
  360. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  361. "%s: Scheduler thread exiting!!!!", __func__);
  362. qdf_event_set(&sch_ctx->sch_shutdown);
  363. qdf_exit_thread(QDF_STATUS_SUCCESS);
  364. return 0;
  365. }
  366. void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx)
  367. {
  368. struct scheduler_msg_wrapper *msg_wrapper = NULL;
  369. if (!sch_ctx) {
  370. QDF_ASSERT(0);
  371. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
  372. "%s: Null params being passed", __func__);
  373. return;
  374. }
  375. while ((msg_wrapper =
  376. scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[idx]))) {
  377. if (msg_wrapper->msg_buf != NULL) {
  378. QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
  379. "%s: Freeing MC WMA MSG message type %d",
  380. __func__, msg_wrapper->msg_buf->type);
  381. if (msg_wrapper->msg_buf->bodyptr)
  382. qdf_mem_free(
  383. (void *)msg_wrapper->msg_buf->bodyptr);
  384. msg_wrapper->msg_buf->bodyptr = NULL;
  385. msg_wrapper->msg_buf->bodyval = 0;
  386. msg_wrapper->msg_buf->type = 0;
  387. }
  388. scheduler_core_return_msg(sch_ctx, msg_wrapper);
  389. }
  390. }