Forráskód Böngészése

qcacmn: Use qdf_flex_mem for scheduler messages

Currently, the scheduler thread keeps a large, pre-allocated array of
messages for use in message posting. The vast majority of the time,
however, the scheduler thread has zero or one messages pending in the
queue. This leads to a huge memory overhead for nominal driver
operation.

Replace the current pre-allocated scheduler message pool with a
hybrid static/dynamic approach.

Change-Id: Ie942bacfef43edf142a9f35ad0309069096cda90
CRs-Fixed: 2204172
Dustin Brown 7 éve
szülő
commit
c7ee85c4a8

+ 1 - 0
qdf/src/qdf_flex_mem.c

@@ -195,3 +195,4 @@ void qdf_flex_mem_release(struct qdf_flex_mem_pool *pool)
 	qdf_spin_unlock_bh(&pool->lock);
 }
 qdf_export_symbol(qdf_flex_mem_release);
+

+ 2 - 0
scheduler/inc/scheduler_api.h

@@ -67,6 +67,7 @@ typedef enum {
  *   like PSOC, PDEV, VDEV and PEER. A component needs to populate flush
  *   callback in message body pointer for those messages which have taken ref
  *   count for above mentioned common objects.
+ * @node: list node for queue membership
  */
 struct scheduler_msg {
 	uint16_t type;
@@ -75,6 +76,7 @@ struct scheduler_msg {
 	void *bodyptr;
 	void *callback;
 	void *flush_callback;
+	qdf_list_node_t node;
 };
 
 typedef QDF_STATUS (*scheduler_msg_process_fn_t) (struct scheduler_msg  *msg);

+ 36 - 59
scheduler/inc/scheduler_core.h

@@ -63,29 +63,13 @@ struct scheduler_mq_type {
 	QDF_MODULE_ID qid;
 };
 
-/**
- * struct scheduler_msg_wrapper - scheduler message wrapper
- * @msg_node: message node
- * @msg_buf: message buffer pointer
- */
-struct scheduler_msg_wrapper {
-	qdf_list_node_t msg_node;
-	struct scheduler_msg *msg_buf;
-};
-
 /**
  * struct scheduler_mq_ctx - scheduler message queue context
- * @msg_buffers: array of message buffers
- * @msg_wrappers: array of message wrappers
- * @free_msg_q: free message queue
  * @sch_msg_q: scheduler message queue
  * @scheduler_msg_qid_to_qidx: message qid to qidx mapping
  * @scheduler_msg_process_fn: array of message queue handler function pointers
  */
 struct scheduler_mq_ctx {
-	struct scheduler_msg msg_buffers[SCHEDULER_CORE_MAX_MESSAGES];
-	struct scheduler_msg_wrapper msg_wrappers[SCHEDULER_CORE_MAX_MESSAGES];
-	struct scheduler_mq_type free_msg_q;
 	struct scheduler_mq_type sch_msg_q[SCHEDULER_NUMBER_OF_MSG_QUEUE];
 	uint8_t scheduler_msg_qid_to_qidx[QDF_MODULE_ID_MAX];
 	QDF_STATUS (*scheduler_msg_process_fn[SCHEDULER_NUMBER_OF_MSG_QUEUE])
@@ -128,6 +112,23 @@ struct scheduler_ctx {
 	void *watchdog_callback;
 };
 
+/**
+ * scheduler_core_msg_dup() duplicate the given scheduler message
+ * @msg: the message to duplicated
+ *
+ * Note: Duplicated messages must be freed using scheduler_core_msg_free().
+ *
+ * Return: pointer to the duplicated message
+ */
+struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg);
+
+/**
+ * scheduler_core_msg_free() - free the given scheduler message
+ * @msg: the duplicated message to free
+ *
+ * Return: None
+ */
+void scheduler_core_msg_free(struct scheduler_msg *msg);
 
 /**
  * scheduler_get_context() - to get scheduler context
@@ -137,6 +138,7 @@ struct scheduler_ctx {
  * Return: Pointer to scheduler context
  */
 struct scheduler_ctx *scheduler_get_context(void);
+
 /**
  * scheduler_thread() - spawned thread will execute this routine
  * @arg: pointer to scheduler context
@@ -147,17 +149,6 @@ struct scheduler_ctx *scheduler_get_context(void);
  */
 int scheduler_thread(void *arg);
 
-/**
- * scheduler_cleanup_queues() - to clean up the given module's queue
- * @sch_ctx: pointer to scheduler context
- * @idx: index of the queue which needs to be cleanup.
- *
- * This routine  is used to clean the module's queue provided by
- * user through idx field
- *
- * Return: none
- */
-void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx);
 /**
  * scheduler_create_ctx() - to create scheduler context
  *
@@ -174,28 +165,11 @@ QDF_STATUS scheduler_create_ctx(void);
  * Return: QDF_STATUS based on success or failure
  */
 QDF_STATUS scheduler_destroy_ctx(void);
-/**
- * scheduler_mq_init() - initialize scheduler message queue
- * @msg_q: Pointer to the message queue
- *
- * This function initializes the Message queue.
- *
- * Return: qdf status
- */
-QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q);
-/**
- * scheduler_mq_deinit() - de-initialize scheduler message queue
- * @msg_q: Pointer to the message queue
- *
- * This function de-initializes scheduler message queue
- *
- *  Return: none
- */
-void scheduler_mq_deinit(struct scheduler_mq_type *msg_q);
+
 /**
  * scheduler_mq_put() - put message in the back of queue
  * @msg_q: Pointer to the message queue
- * @msg_wrapper: pointer to message wrapper
+ * @msg: the message to enqueue
  *
  * This function is used to put message in back of provided message
  * queue
@@ -203,11 +177,11 @@ void scheduler_mq_deinit(struct scheduler_mq_type *msg_q);
  *  Return: none
  */
 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
-			struct scheduler_msg_wrapper *msg_wrapper);
+		      struct scheduler_msg *msg);
 /**
  * scheduler_mq_put_front() - put message in the front of queue
  * @msg_q: Pointer to the message queue
- * @msg_wrapper: pointer to message wrapper
+ * @msg: the message to enqueue
  *
  * This function is used to put message in front of provided message
  * queue
@@ -215,7 +189,7 @@ void scheduler_mq_put(struct scheduler_mq_type *msg_q,
  *  Return: none
  */
 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
-			struct scheduler_msg_wrapper *msg_wrapper);
+			    struct scheduler_msg *msg);
 /**
  * scheduler_mq_get() - to get message from message queue
  * @msg_q: Pointer to the message queue
@@ -224,16 +198,8 @@ void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
  *
  *  Return: none
  */
-struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q);
-/**
- * scheduler_is_mq_empty() - to check if message queue is empty
- * @msg_q: Pointer to the message queue
- *
- * This function is used to check if message queue is empty
- *
- * Return: true or false
- */
-bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q);
+struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q);
+
 /**
  * scheduler_queues_init() - to initialize all the modules' queues
  * @sched_ctx: pointer to scheduler context
@@ -243,6 +209,7 @@ bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q);
  * Return: QDF_STATUS based on success of failure
  */
 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx);
+
 /**
  * scheduler_queues_deinit() - to de-initialize all the modules' queues
  * @sched_ctx: pointer to scheduler context
@@ -252,4 +219,14 @@ QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx);
  * Return: QDF_STATUS based on success of failure
  */
 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *gp_sch_ctx);
+
+/**
+ * scheduler_queues_flush() - flush all of the scheduler queues
+ * @sch_ctx: pointer to scheduler context
+ *
+ * This routine  is used to clean the module's queues
+ *
+ * Return: none
+ */
+void scheduler_queues_flush(struct scheduler_ctx *sched_ctx);
 #endif

+ 26 - 60
scheduler/src/scheduler_api.c

@@ -20,28 +20,6 @@
 #include <scheduler_core.h>
 #include <qdf_atomic.h>
 
-/* Debug variable to detect if controller thread is stuck */
-static qdf_atomic_t scheduler_msg_post_fail_count;
-
-static void scheduler_flush_mqs(struct scheduler_ctx *sched_ctx)
-{
-	int i;
-
-	/* Here each of the MC thread MQ shall be drained and returned to the
-	 * Core. Before returning a wrapper to the Core, the Scheduler message
-	 * shall be freed first
-	 */
-	sched_info("Flushing scheduler message queue");
-
-	QDF_ASSERT(sched_ctx);
-	if (!sched_ctx) {
-		sched_err("sched_ctx is NULL");
-		return;
-	}
-	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
-		scheduler_cleanup_queues(sched_ctx, i);
-}
-
 QDF_STATUS scheduler_disable(void)
 {
 	struct scheduler_ctx *sched_ctx;
@@ -65,7 +43,7 @@ QDF_STATUS scheduler_disable(void)
 	sched_ctx->sch_thread = NULL;
 
 	/* flush any unprocessed scheduler messages */
-	scheduler_flush_mqs(sched_ctx);
+	scheduler_queues_flush(sched_ctx);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -247,19 +225,20 @@ QDF_STATUS scheduler_deinit(void)
 }
 
 QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
-		struct scheduler_msg *pMsg, bool is_high_priority)
+					  struct scheduler_msg *msg,
+					  bool is_high_priority)
 {
 	uint8_t qidx;
-	uint32_t msg_wrapper_fail_count;
-	struct scheduler_mq_type *target_mq = NULL;
-	struct scheduler_msg_wrapper *msg_wrapper = NULL;
-	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+	struct scheduler_mq_type *target_mq;
+	struct scheduler_msg *queue_msg;
+	struct scheduler_ctx *sched_ctx;
 
-	if (!pMsg) {
-		sched_err("pMsg is null");
+	if (!msg) {
+		sched_err("msg is null");
 		return QDF_STATUS_E_INVAL;
 	}
 
+	sched_ctx = scheduler_get_context();
 	if (!sched_ctx) {
 		sched_err("sched_ctx is null");
 		return QDF_STATUS_E_INVAL;
@@ -270,9 +249,9 @@ QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	if ((0 != pMsg->reserved) && (SYS_MSG_COOKIE != pMsg->reserved)) {
-		sched_err("Un-initialized message pointer.. please initialize it");
-		QDF_BUG(0);
+	if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) {
+		sched_err("Uninitialized scheduler message. Please initialize it");
+		QDF_DEBUG_PANIC();
 		return QDF_STATUS_E_FAILURE;
 	}
 
@@ -288,7 +267,7 @@ QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
 	 * handled in right order.
 	 */
 	if (QDF_MODULE_ID_WMA == qid) {
-		pMsg->callback = NULL;
+		msg->callback = NULL;
 		/* change legacy WMA message id to new target_if mq id */
 		qid = QDF_MODULE_ID_TARGET_IF;
 	}
@@ -306,36 +285,15 @@ QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
 	}
 
 	target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
-	QDF_ASSERT(target_mq);
-	if (target_mq == NULL) {
-		sched_err("target_mq == NULL");
-		return QDF_STATUS_E_FAILURE;
-	}
-
-	/* Try and get a free Msg wrapper */
-	msg_wrapper = scheduler_mq_get(&sched_ctx->queue_ctx.free_msg_q);
-	if (NULL == msg_wrapper) {
-		msg_wrapper_fail_count =
-			qdf_atomic_inc_return(&scheduler_msg_post_fail_count);
-		/* log only 1st failure to avoid over running log buffer */
-		if (msg_wrapper_fail_count == 1)
-			sched_err("Scheduler message wrapper empty");
 
-		if (SCHEDULER_WRAPPER_MAX_FAIL_COUNT == msg_wrapper_fail_count)
-			QDF_BUG(0);
-
-		return QDF_STATUS_E_RESOURCES;
-	}
-	qdf_atomic_set(&scheduler_msg_post_fail_count, 0);
-
-	/* Copy the message now */
-	qdf_mem_copy((void *)msg_wrapper->msg_buf,
-			(void *)pMsg, sizeof(struct scheduler_msg));
+	queue_msg = scheduler_core_msg_dup(msg);
+	if (!queue_msg)
+		return QDF_STATUS_E_NOMEM;
 
 	if (is_high_priority)
-		scheduler_mq_put_front(target_mq, msg_wrapper);
+		scheduler_mq_put_front(target_mq, queue_msg);
 	else
-		scheduler_mq_put(target_mq, msg_wrapper);
+		scheduler_mq_put(target_mq, queue_msg);
 
 	qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
 	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
@@ -601,6 +559,11 @@ QDF_STATUS scheduler_deregister_sys_legacy_handler(void)
 	return QDF_STATUS_SUCCESS;
 }
 
+static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
 void scheduler_mc_timer_callback(unsigned long data)
 {
 	qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data;
@@ -682,6 +645,9 @@ void scheduler_mc_timer_callback(unsigned long data)
 	msg.bodyptr = user_data;
 	msg.bodyval = 0;
 
+	/* bodyptr points to user data, do not free it during msg flush */
+	msg.flush_callback = scheduler_msg_flush_noop;
+
 	if (scheduler_post_msg(QDF_MODULE_ID_SYS, &msg) == QDF_STATUS_SUCCESS)
 		return;
 	sched_err("Could not enqueue timer to timer queue");

+ 108 - 173
scheduler/src/scheduler_core.c

@@ -18,12 +18,21 @@
 
 #include <scheduler_core.h>
 #include <qdf_atomic.h>
+#include "qdf_flex_mem.h"
 
 static struct scheduler_ctx g_sched_ctx;
 static struct scheduler_ctx *gp_sched_ctx;
 
+#ifndef WLAN_SCHED_REDUCTION_LIMIT
+#define WLAN_SCHED_REDUCTION_LIMIT 0
+#endif
+
+DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
+			 WLAN_SCHED_REDUCTION_LIMIT);
+
 QDF_STATUS scheduler_create_ctx(void)
 {
+	qdf_flex_mem_init(&sched_pool);
 	gp_sched_ctx = &g_sched_ctx;
 
 	return QDF_STATUS_SUCCESS;
@@ -32,15 +41,41 @@ QDF_STATUS scheduler_create_ctx(void)
 QDF_STATUS scheduler_destroy_ctx(void)
 {
 	gp_sched_ctx = NULL;
+	qdf_flex_mem_deinit(&sched_pool);
 
 	return QDF_STATUS_SUCCESS;
 }
 
 struct scheduler_ctx *scheduler_get_context(void)
 {
+	QDF_BUG(gp_sched_ctx);
+
 	return gp_sched_ctx;
 }
 
+static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
+{
+	sched_enter();
+
+	qdf_spinlock_create(&msg_q->mq_lock);
+	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
+
+	sched_exit();
+
+	return QDF_STATUS_SUCCESS;
+}
+
+static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
+{
+	sched_enter();
+
+	qdf_list_destroy(&msg_q->mq_list);
+	qdf_spinlock_destroy(&msg_q->mq_lock);
+
+	sched_exit();
+}
+
+static qdf_atomic_t __sched_queue_depth;
 
 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
 {
@@ -55,11 +90,7 @@ static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	status = scheduler_mq_init(&sched_ctx->queue_ctx.free_msg_q);
-	if (QDF_STATUS_SUCCESS != status)
-		return status;
-
-	sched_debug("free msg queue init complete");
+	qdf_atomic_set(&__sched_queue_depth, 0);
 
 	/* Initialize all message queues */
 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
@@ -78,7 +109,6 @@ static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
 	return status;
 }
 
-
 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
 {
 	int i;
@@ -91,10 +121,6 @@ static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	scheduler_mq_deinit(&sched_ctx->queue_ctx.free_msg_q);
-
-	sched_debug("free msg queue inited");
-
 	/* De-Initialize all message queues */
 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
@@ -109,108 +135,35 @@ static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
 	return QDF_STATUS_SUCCESS;
 }
 
-QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
-{
-	sched_enter();
-
-	if (!msg_q) {
-		sched_err("msg_q is null");
-		return QDF_STATUS_E_FAILURE;
-	}
-
-	/* Now initialize the lock */
-	qdf_spinlock_create(&msg_q->mq_lock);
-
-	/* Now initialize the List data structure */
-	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
-
-	sched_exit();
-
-	return QDF_STATUS_SUCCESS;
-}
-
-void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
-{
-	if (!msg_q)
-		sched_err("msg_q is null");
-}
-
 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
-		      struct scheduler_msg_wrapper *msg_wrapper)
+		      struct scheduler_msg *msg)
 {
-	if (!msg_q) {
-		sched_err("msg_q is null");
-		return;
-	}
-
-	if (!msg_wrapper) {
-		sched_err("msg_wrapper is null");
-		return;
-	}
-
 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
-	qdf_list_insert_back(&msg_q->mq_list, &msg_wrapper->msg_node);
+	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
 }
 
 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
-			    struct scheduler_msg_wrapper *msg_wrapper)
+			    struct scheduler_msg *msg)
 {
-	if (!msg_q) {
-		sched_err("msg_q is null");
-		return;
-	}
-
-	if (!msg_wrapper) {
-		sched_err("msg_wrapper is null");
-		return;
-	}
-
 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
-	qdf_list_insert_front(&msg_q->mq_list, &msg_wrapper->msg_node);
+	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
 }
 
-struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q)
+struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
 {
-	qdf_list_node_t *listptr;
-	struct scheduler_msg_wrapper *msg_wrapper = NULL;
-
-	if (!msg_q) {
-		sched_err("msg_q is null");
-		return NULL;
-	}
+	QDF_STATUS status;
+	qdf_list_node_t *node;
 
 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
-	if (qdf_list_empty(&msg_q->mq_list)) {
-		sched_warn("Scheduler Message Queue is empty");
-	} else {
-		listptr = msg_q->mq_list.anchor.next;
-		msg_wrapper = (struct scheduler_msg_wrapper *)
-					qdf_container_of(listptr,
-						struct scheduler_msg_wrapper,
-						msg_node);
-		qdf_list_remove_node(&msg_q->mq_list, listptr);
-	}
+	status = qdf_list_remove_front(&msg_q->mq_list, &node);
 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
 
-	return msg_wrapper;
-}
-
-bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q)
-{
-	bool is_empty;
-
-	if (!msg_q) {
-		sched_err("msg_q is null");
-		return true;
-	}
-
-	qdf_spin_lock_irqsave(&msg_q->mq_lock);
-	is_empty = qdf_list_empty(&msg_q->mq_list);
-	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
+	if (QDF_IS_STATUS_ERROR(status))
+		return NULL;
 
-	return is_empty;
+	return qdf_container_of(node, struct scheduler_msg, node);
 }
 
 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
@@ -221,7 +174,6 @@ QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
 {
 	QDF_STATUS status;
-	int i;
 
 	sched_enter();
 
@@ -232,7 +184,7 @@ QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
 	}
 
 	status = scheduler_all_queues_init(sched_ctx);
-	if (QDF_STATUS_SUCCESS != status) {
+	if (QDF_IS_STATUS_ERROR(status)) {
 		scheduler_all_queues_deinit(sched_ctx);
 		sched_err("Failed to initialize the msg queues");
 		return status;
@@ -240,40 +192,43 @@ QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
 
 	sched_debug("Queue init passed");
 
-	for (i = 0; i < SCHEDULER_CORE_MAX_MESSAGES; i++) {
-		(sched_ctx->queue_ctx.msg_wrappers[i]).msg_buf =
-			&(sched_ctx->queue_ctx.msg_buffers[i]);
-		qdf_init_list_head(
-			&sched_ctx->queue_ctx.msg_wrappers[i].msg_node);
-		scheduler_mq_put(&sched_ctx->queue_ctx.free_msg_q,
-			   &(sched_ctx->queue_ctx.msg_wrappers[i]));
-	}
-
 	sched_exit();
 
 	return QDF_STATUS_SUCCESS;
 }
 
-static void scheduler_core_return_msg(struct scheduler_ctx *sch_ctx,
-				      struct scheduler_msg_wrapper *msg_wrapper)
+struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
 {
-	if (!sch_ctx) {
-		sched_err("sch_ctx is null");
-		QDF_DEBUG_PANIC();
-		return;
-	}
+	struct scheduler_msg *dup;
 
-	QDF_ASSERT(msg_wrapper);
-	if (!msg_wrapper) {
-		sched_err("msg_wrapper is null");
-		return;
+	if (qdf_atomic_inc_return(&__sched_queue_depth) >
+	    SCHEDULER_CORE_MAX_MESSAGES)
+		goto buffer_full;
+
+	dup = qdf_flex_mem_alloc(&sched_pool);
+	if (!dup) {
+		sched_err("out of memory");
+		goto dec_queue_count;
 	}
 
-	/*
-	 * Return the message on the free message queue
-	 */
-	qdf_init_list_head(&msg_wrapper->msg_node);
-	scheduler_mq_put(&sch_ctx->queue_ctx.free_msg_q, msg_wrapper);
+	qdf_mem_copy(dup, msg, sizeof(*dup));
+
+	return dup;
+
+buffer_full:
+	sched_err("Scheduler buffer is full");
+	QDF_DEBUG_PANIC();
+
+dec_queue_count:
+	qdf_atomic_dec(&__sched_queue_depth);
+
+	return NULL;
+}
+
+void scheduler_core_msg_free(struct scheduler_msg *msg)
+{
+	qdf_flex_mem_free(&sched_pool, msg);
+	qdf_atomic_dec(&__sched_queue_depth);
 }
 
 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
@@ -281,7 +236,7 @@ static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
 {
 	int i;
 	QDF_STATUS status;
-	struct scheduler_msg_wrapper *msg_wrapper;
+	struct scheduler_msg *msg;
 
 	if (!sch_ctx) {
 		sched_err("sch_ctx is null");
@@ -309,23 +264,14 @@ static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
 			break;
 		}
 
-		if (scheduler_is_mq_empty(&sch_ctx->queue_ctx.sch_msg_q[i])) {
+		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
+		if (!msg) {
 			/* check next queue */
 			i++;
 			continue;
 		}
 
-		msg_wrapper =
-			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
-		if (!msg_wrapper) {
-			sched_err("msg_wrapper is NULL");
-			QDF_ASSERT(0);
-			return;
-		}
-
 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
-			struct scheduler_msg *msg = msg_wrapper->msg_buf;
-
 			sch_ctx->watchdog_msg_type = msg->type;
 			sch_ctx->watchdog_callback = msg->callback;
 			qdf_timer_start(&sch_ctx->watchdog_timer,
@@ -338,14 +284,11 @@ static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
 				sched_err("Failed processing Qid[%d] message",
 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
 
-			/* return message to the Core */
-			scheduler_core_return_msg(sch_ctx, msg_wrapper);
+			scheduler_core_msg_free(msg);
 		}
 
 		/* start again with highest priority queue at index 0 */
 		i = 0;
-
-		continue;
 	}
 
 	/* Check for any Suspend Indication */
@@ -410,47 +353,39 @@ int scheduler_thread(void *arg)
 	return 0;
 }
 
-void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx)
+static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
 {
-	struct scheduler_msg_wrapper *msg_wrapper;
-	QDF_STATUS (*scheduler_flush_callback) (struct scheduler_msg *);
+	struct scheduler_msg *msg;
+	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
+
+	while ((msg = scheduler_mq_get(mq))) {
+		if (msg->flush_callback) {
+			sched_info("Calling flush callback; type: %x",
+				   msg->type);
+			flush_cb = msg->flush_callback;
+			flush_cb(msg);
+		} else if (msg->bodyptr) {
+			sched_info("Freeing scheduler msg bodyptr; type: %x",
+				   msg->type);
+			qdf_mem_free(msg->bodyptr);
+		}
 
-	if (!sch_ctx) {
-		sched_err("sch_ctx is null");
-		QDF_DEBUG_PANIC();
-		return;
+		scheduler_core_msg_free(msg);
 	}
+}
 
-	while ((msg_wrapper =
-			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[idx]))) {
-		if (msg_wrapper->msg_buf) {
-			if ((QDF_MODULE_ID_SYS ==
-				sch_ctx->queue_ctx.sch_msg_q[idx].qid) &&
-			    (SYS_MSG_ID_MC_TIMER ==
-				msg_wrapper->msg_buf->type)) {
-				sched_debug("Timer is freed by each module, not here");
-				continue;
-			}
-			sched_info("Freeing MC MSG message type %d, module id:%d",
-				   msg_wrapper->msg_buf->type,
-				   sch_ctx->queue_ctx.sch_msg_q[idx].qid);
-			if (msg_wrapper->msg_buf->flush_callback) {
-				sched_debug("Flush callback called for type-%x",
-					    msg_wrapper->msg_buf->type);
-				scheduler_flush_callback =
-					msg_wrapper->msg_buf->flush_callback;
-				scheduler_flush_callback(msg_wrapper->msg_buf);
-			} else if (msg_wrapper->msg_buf->bodyptr) {
-				sched_debug("noflush cb given for type-%x",
-					    msg_wrapper->msg_buf->type);
-				qdf_mem_free(msg_wrapper->msg_buf->bodyptr);
-			}
+void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
+{
+	struct scheduler_mq_type *mq;
+	int i;
 
-			msg_wrapper->msg_buf->bodyptr = NULL;
-			msg_wrapper->msg_buf->bodyval = 0;
-			msg_wrapper->msg_buf->type = 0;
-		}
+	sched_info("Flushing scheduler message queues");
 
-		scheduler_core_return_msg(sch_ctx, msg_wrapper);
+	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
+		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
+		scheduler_flush_single_queue(mq);
 	}
+
+	qdf_flex_mem_release(&sched_pool);
 }
+