scheduler_core.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. /*
  2. * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include <scheduler_core.h>
  20. #include <qdf_atomic.h>
  21. #include "qdf_flex_mem.h"
  22. static struct scheduler_ctx g_sched_ctx;
  23. static struct scheduler_ctx *gp_sched_ctx;
  24. DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
  25. WLAN_SCHED_REDUCTION_LIMIT);
  26. #ifdef WLAN_SCHED_HISTORY_SIZE
  27. #define SCHEDULER_HISTORY_HEADER "|Callback "\
  28. "|Message Type" \
  29. "|Queue Duration(us)|Queue Depth" \
  30. "|Run Duration(us)|"
  31. #define SCHEDULER_HISTORY_LINE "--------------------------------------" \
  32. "--------------------------------------" \
  33. "--------------------------------------"
  34. /**
  35. * struct sched_history_item - metrics for a scheduler message
  36. * @callback: the message's execution callback
  37. * @type_id: the message's type_id
  38. * @queue_id: Id of the queue the message was added to
  39. * @queue_start_us: timestamp when the message was queued in microseconds
  40. * @queue_duration_us: duration the message was queued in microseconds
  41. * @queue_depth: depth of the queue when the message was queued
  42. * @run_start_us: timesatmp when the message started execution in microseconds
  43. * @run_duration_us: duration the message was executed in microseconds
  44. */
  45. struct sched_history_item {
  46. void *callback;
  47. uint32_t type_id;
  48. QDF_MODULE_ID queue_id;
  49. uint64_t queue_start_us;
  50. uint32_t queue_duration_us;
  51. uint32_t queue_depth;
  52. uint64_t run_start_us;
  53. uint32_t run_duration_us;
  54. };
  55. static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
  56. static uint32_t sched_history_index;
  57. static void sched_history_queue(struct scheduler_mq_type *queue,
  58. struct scheduler_msg *msg)
  59. {
  60. msg->queue_id = queue->qid;
  61. msg->queue_depth = qdf_list_size(&queue->mq_list);
  62. msg->queued_at_us = qdf_get_log_timestamp_usecs();
  63. }
  64. static void sched_history_start(struct scheduler_msg *msg)
  65. {
  66. uint64_t started_at_us = qdf_get_log_timestamp_usecs();
  67. struct sched_history_item hist = {
  68. .callback = msg->callback,
  69. .type_id = msg->type,
  70. .queue_start_us = msg->queued_at_us,
  71. .queue_duration_us = started_at_us - msg->queued_at_us,
  72. .queue_depth = msg->queue_depth,
  73. .run_start_us = started_at_us,
  74. };
  75. sched_history[sched_history_index] = hist;
  76. }
  77. static void sched_history_stop(void)
  78. {
  79. struct sched_history_item *hist = &sched_history[sched_history_index];
  80. uint64_t stopped_at_us = qdf_get_log_timestamp_usecs();
  81. hist->run_duration_us = stopped_at_us - hist->run_start_us;
  82. sched_history_index++;
  83. sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
  84. }
  85. void sched_history_print(void)
  86. {
  87. struct sched_history_item *history, *item;
  88. uint32_t history_idx;
  89. uint32_t idx, index;
  90. history = qdf_mem_malloc(sizeof(*history) * WLAN_SCHED_HISTORY_SIZE);
  91. if (!history) {
  92. sched_err("Mem alloc failed");
  93. return;
  94. }
  95. qdf_mem_copy(history, &sched_history,
  96. (sizeof(*history) * WLAN_SCHED_HISTORY_SIZE));
  97. history_idx = sched_history_index;
  98. sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
  99. sched_nofl_fatal(SCHEDULER_HISTORY_HEADER);
  100. sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
  101. for (idx = 0; idx < WLAN_SCHED_HISTORY_SIZE; idx++) {
  102. index = (history_idx + idx) % WLAN_SCHED_HISTORY_SIZE;
  103. item = history + index;
  104. if (!item->callback)
  105. continue;
  106. sched_nofl_fatal("%40pF|%12d|%18d|%11d|%16d|",
  107. item->callback, item->type_id,
  108. item->queue_duration_us,
  109. item->queue_depth,
  110. item->run_duration_us);
  111. }
  112. sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
  113. qdf_mem_free(history);
  114. }
  115. #else /* WLAN_SCHED_HISTORY_SIZE */
  116. static inline void sched_history_queue(struct scheduler_mq_type *queue,
  117. struct scheduler_msg *msg) { }
  118. static inline void sched_history_start(struct scheduler_msg *msg) { }
  119. static inline void sched_history_stop(void) { }
  120. void sched_history_print(void) { }
  121. #endif /* WLAN_SCHED_HISTORY_SIZE */
  122. QDF_STATUS scheduler_create_ctx(void)
  123. {
  124. qdf_flex_mem_init(&sched_pool);
  125. gp_sched_ctx = &g_sched_ctx;
  126. return QDF_STATUS_SUCCESS;
  127. }
  128. QDF_STATUS scheduler_destroy_ctx(void)
  129. {
  130. gp_sched_ctx = NULL;
  131. qdf_flex_mem_deinit(&sched_pool);
  132. return QDF_STATUS_SUCCESS;
  133. }
  134. struct scheduler_ctx *scheduler_get_context(void)
  135. {
  136. QDF_BUG(gp_sched_ctx);
  137. return gp_sched_ctx;
  138. }
  139. static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
  140. {
  141. sched_enter();
  142. qdf_spinlock_create(&msg_q->mq_lock);
  143. qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
  144. sched_exit();
  145. return QDF_STATUS_SUCCESS;
  146. }
  147. static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
  148. {
  149. sched_enter();
  150. qdf_list_destroy(&msg_q->mq_list);
  151. qdf_spinlock_destroy(&msg_q->mq_lock);
  152. sched_exit();
  153. }
  154. static qdf_atomic_t __sched_queue_depth;
  155. static qdf_atomic_t __sched_dup_fail_count;
  156. static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
  157. {
  158. QDF_STATUS status;
  159. int i;
  160. sched_enter();
  161. QDF_BUG(sched_ctx);
  162. if (!sched_ctx)
  163. return QDF_STATUS_E_FAILURE;
  164. qdf_atomic_set(&__sched_queue_depth, 0);
  165. /* Initialize all message queues */
  166. for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
  167. status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
  168. if (QDF_STATUS_SUCCESS != status)
  169. return status;
  170. }
  171. /* Initialize all qid to qidx mapping to invalid values */
  172. for (i = 0; i < QDF_MODULE_ID_MAX; i++)
  173. sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
  174. SCHEDULER_NUMBER_OF_MSG_QUEUE;
  175. sched_exit();
  176. return status;
  177. }
  178. static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
  179. {
  180. int i;
  181. sched_enter();
  182. QDF_BUG(sched_ctx);
  183. if (!sched_ctx)
  184. return QDF_STATUS_E_FAILURE;
  185. /* De-Initialize all message queues */
  186. for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
  187. scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
  188. /* Initialize all qid to qidx mapping to invalid values */
  189. for (i = 0; i < QDF_MODULE_ID_MAX; i++)
  190. sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
  191. SCHEDULER_NUMBER_OF_MSG_QUEUE;
  192. sched_exit();
  193. return QDF_STATUS_SUCCESS;
  194. }
  195. void scheduler_mq_put(struct scheduler_mq_type *msg_q,
  196. struct scheduler_msg *msg)
  197. {
  198. qdf_spin_lock_irqsave(&msg_q->mq_lock);
  199. sched_history_queue(msg_q, msg);
  200. qdf_list_insert_back(&msg_q->mq_list, &msg->node);
  201. qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
  202. }
  203. void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
  204. struct scheduler_msg *msg)
  205. {
  206. qdf_spin_lock_irqsave(&msg_q->mq_lock);
  207. sched_history_queue(msg_q, msg);
  208. qdf_list_insert_front(&msg_q->mq_list, &msg->node);
  209. qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
  210. }
  211. struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
  212. {
  213. QDF_STATUS status;
  214. qdf_list_node_t *node;
  215. qdf_spin_lock_irqsave(&msg_q->mq_lock);
  216. status = qdf_list_remove_front(&msg_q->mq_list, &node);
  217. qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
  218. if (QDF_IS_STATUS_ERROR(status))
  219. return NULL;
  220. return qdf_container_of(node, struct scheduler_msg, node);
  221. }
  222. QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
  223. {
  224. return scheduler_all_queues_deinit(sched_ctx);
  225. }
  226. QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
  227. {
  228. QDF_STATUS status;
  229. sched_enter();
  230. QDF_BUG(sched_ctx);
  231. if (!sched_ctx)
  232. return QDF_STATUS_E_FAILURE;
  233. status = scheduler_all_queues_init(sched_ctx);
  234. if (QDF_IS_STATUS_ERROR(status)) {
  235. scheduler_all_queues_deinit(sched_ctx);
  236. sched_err("Failed to initialize the msg queues");
  237. return status;
  238. }
  239. sched_debug("Queue init passed");
  240. sched_exit();
  241. return QDF_STATUS_SUCCESS;
  242. }
  243. struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
  244. {
  245. struct scheduler_msg *dup;
  246. if (qdf_atomic_inc_return(&__sched_queue_depth) >
  247. SCHEDULER_CORE_MAX_MESSAGES)
  248. goto buffer_full;
  249. dup = qdf_flex_mem_alloc(&sched_pool);
  250. if (!dup) {
  251. sched_err("out of memory");
  252. goto dec_queue_count;
  253. }
  254. qdf_mem_copy(dup, msg, sizeof(*dup));
  255. qdf_atomic_set(&__sched_dup_fail_count, 0);
  256. return dup;
  257. buffer_full:
  258. if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
  259. SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
  260. QDF_DEBUG_PANIC("Scheduler buffer is full");
  261. dec_queue_count:
  262. qdf_atomic_dec(&__sched_queue_depth);
  263. return NULL;
  264. }
  265. void scheduler_core_msg_free(struct scheduler_msg *msg)
  266. {
  267. qdf_flex_mem_free(&sched_pool, msg);
  268. qdf_atomic_dec(&__sched_queue_depth);
  269. }
  270. static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
  271. bool *shutdown)
  272. {
  273. int i;
  274. QDF_STATUS status;
  275. struct scheduler_msg *msg;
  276. if (!sch_ctx) {
  277. QDF_DEBUG_PANIC("sch_ctx is null");
  278. return;
  279. }
  280. /* start with highest priority queue : timer queue at index 0 */
  281. i = 0;
  282. while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
  283. /* Check if MC needs to shutdown */
  284. if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
  285. &sch_ctx->sch_event_flag)) {
  286. sched_debug("scheduler thread signaled to shutdown");
  287. *shutdown = true;
  288. /* Check for any Suspend Indication */
  289. if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
  290. &sch_ctx->sch_event_flag)) {
  291. /* Unblock anyone waiting on suspend */
  292. if (gp_sched_ctx->hdd_callback)
  293. gp_sched_ctx->hdd_callback();
  294. }
  295. break;
  296. }
  297. msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
  298. if (!msg) {
  299. /* check next queue */
  300. i++;
  301. continue;
  302. }
  303. if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
  304. sch_ctx->watchdog_msg_type = msg->type;
  305. sch_ctx->watchdog_callback = msg->callback;
  306. sched_history_start(msg);
  307. qdf_timer_start(&sch_ctx->watchdog_timer,
  308. sch_ctx->timeout);
  309. status = sch_ctx->queue_ctx.
  310. scheduler_msg_process_fn[i](msg);
  311. qdf_timer_stop(&sch_ctx->watchdog_timer);
  312. sched_history_stop();
  313. if (QDF_IS_STATUS_ERROR(status))
  314. sched_err("Failed processing Qid[%d] message",
  315. sch_ctx->queue_ctx.sch_msg_q[i].qid);
  316. scheduler_core_msg_free(msg);
  317. }
  318. /* start again with highest priority queue at index 0 */
  319. i = 0;
  320. }
  321. /* Check for any Suspend Indication */
  322. if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
  323. &sch_ctx->sch_event_flag)) {
  324. qdf_spin_lock(&sch_ctx->sch_thread_lock);
  325. qdf_event_reset(&sch_ctx->resume_sch_event);
  326. /* controller thread suspend completion callback */
  327. if (gp_sched_ctx->hdd_callback)
  328. gp_sched_ctx->hdd_callback();
  329. qdf_spin_unlock(&sch_ctx->sch_thread_lock);
  330. /* Wait for resume indication */
  331. qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
  332. }
  333. return; /* Nothing to process wait on wait queue */
  334. }
  335. int scheduler_thread(void *arg)
  336. {
  337. struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
  338. int retWaitStatus = 0;
  339. bool shutdown = false;
  340. if (!arg) {
  341. QDF_DEBUG_PANIC("arg is null");
  342. return 0;
  343. }
  344. qdf_set_user_nice(current, -2);
  345. /* Ack back to the context from which the main controller thread
  346. * has been created
  347. */
  348. qdf_event_set(&sch_ctx->sch_start_event);
  349. sched_debug("scheduler thread %d (%s) starting up",
  350. current->pid, current->comm);
  351. while (!shutdown) {
  352. /* This implements the execution model algorithm */
  353. retWaitStatus = qdf_wait_queue_interruptible(
  354. sch_ctx->sch_wait_queue,
  355. qdf_atomic_test_bit(MC_POST_EVENT_MASK,
  356. &sch_ctx->sch_event_flag) ||
  357. qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
  358. &sch_ctx->sch_event_flag));
  359. if (retWaitStatus == -ERESTARTSYS)
  360. QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
  361. qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
  362. scheduler_thread_process_queues(sch_ctx, &shutdown);
  363. }
  364. /* If we get here the scheduler thread must exit */
  365. sched_debug("Scheduler thread exiting");
  366. qdf_event_set(&sch_ctx->sch_shutdown);
  367. return 0;
  368. }
  369. static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
  370. {
  371. struct scheduler_msg *msg;
  372. QDF_STATUS (*flush_cb)(struct scheduler_msg *);
  373. while ((msg = scheduler_mq_get(mq))) {
  374. if (msg->flush_callback) {
  375. sched_debug("Calling flush callback; type: %x",
  376. msg->type);
  377. flush_cb = msg->flush_callback;
  378. flush_cb(msg);
  379. } else if (msg->bodyptr) {
  380. sched_debug("Freeing scheduler msg bodyptr; type: %x",
  381. msg->type);
  382. qdf_mem_free(msg->bodyptr);
  383. }
  384. scheduler_core_msg_free(msg);
  385. }
  386. }
  387. void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
  388. {
  389. struct scheduler_mq_type *mq;
  390. int i;
  391. sched_debug("Flushing scheduler message queues");
  392. for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
  393. mq = &sched_ctx->queue_ctx.sch_msg_q[i];
  394. scheduler_flush_single_queue(mq);
  395. }
  396. }