فهرست منبع

qcacmn: Add control path scheduler to common driver

Add control path converged scheduler functionality to common
driver.

Change-Id: I2087b985b4bed661c03e667dbcc082714add1266
CRs-Fixed: 1095867
Krunal Soni 8 سال پیش
والد
کامیت
66eabcfdcc

+ 15 - 12
init_deinit/dispatcher/src/dispatcher_init_deinit.c

@@ -19,6 +19,9 @@
 #include <qdf_types.h>
 #include <qdf_trace.h>
 #include <dispatcher_init_deinit.h>
+#ifdef NAPIER_CODE
+#include <scheduler_api.h>
+#endif
 
 /**
  * DOC: This file provides various init/deinit trigger point for new
@@ -71,18 +74,6 @@ static QDF_STATUS tdls_deinit(void)
 	return QDF_STATUS_SUCCESS;
 }
 
-
-static QDF_STATUS scheduler_init(void)
-{
-	return QDF_STATUS_SUCCESS;
-}
-
-static QDF_STATUS scheduler_deinit(void)
-{
-	return QDF_STATUS_SUCCESS;
-}
-
-
 static QDF_STATUS scm_psoc_open(void)
 {
 	return QDF_STATUS_SUCCESS;
@@ -146,6 +137,18 @@ static QDF_STATUS tdls_psoc_disable(void)
 	return QDF_STATUS_SUCCESS;
 }
 
+#ifndef NAPIER_CODE
+static QDF_STATUS scheduler_init(void)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
+static QDF_STATUS scheduler_deinit(void)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
 QDF_STATUS dispatcher_init(void)
 {
 	if (QDF_STATUS_SUCCESS != obj_manager_init())

+ 8 - 0
qdf/inc/qdf_event.h

@@ -57,6 +57,14 @@ QDF_STATUS qdf_event_destroy(qdf_event_t *event);
 
 QDF_STATUS qdf_wait_single_event(qdf_event_t *event,
 				 uint32_t timeout);
+/**
+ * qdf_event_complete_and_exit() - complete event and exit
+ * @event: Pointer to an event to complete and exit
+ * @reason_code: Reason code for exit
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_event_complete_and_exit(qdf_event_t *event, long reason_code);
 
 #ifdef __cplusplus
 }

+ 13 - 1
qdf/inc/qdf_list.h

@@ -54,6 +54,7 @@
 
 typedef __qdf_list_node_t qdf_list_node_t;
 typedef __qdf_list_t qdf_list_t;
+
 /* Function declarations */
 QDF_STATUS qdf_list_insert_front(qdf_list_t *list, qdf_list_node_t *node);
 
@@ -66,9 +67,10 @@ QDF_STATUS qdf_list_peek_next(qdf_list_t *list,	qdf_list_node_t *node,
 			      qdf_list_node_t **node1);
 
 /**
- * qdf_list_create() - Initialize list head
+ * qdf_list_create() - Create qdf list and initialize list head
  * @list: object of list
  * @max_size: max size of the list
+ *
  * Return: none
  */
 static inline void qdf_list_create(__qdf_list_t *list, uint32_t max_size)
@@ -76,6 +78,16 @@ static inline void qdf_list_create(__qdf_list_t *list, uint32_t max_size)
 	__qdf_list_create(list, max_size);
 }
 
+/**
+ * qdf_init_list_head() - initialize list head
+ * @list_head: pointer to list head
+ *
+ * Return: none
+ */
+static inline void qdf_init_list_head(__qdf_list_node_t *list_head)
+{
+	__qdf_init_list_head(list_head);
+}
 
 /**
  * qdf_list_destroy() - Destroy the list

+ 1 - 0
qdf/inc/qdf_mc_timer.h

@@ -277,4 +277,5 @@ void qdf_timer_module_deinit(void);
  * Return: None
  */
 void qdf_get_time_of_the_day_in_hr_min_sec_usec(char *tbuf, int len);
+void qdf_register_mc_timer_callback(void (*callback) (unsigned long data));
 #endif /* __QDF_MC_TIMER_H */

+ 28 - 0
qdf/inc/qdf_threads.h

@@ -34,6 +34,7 @@
 #define __QDF_THREADS_H
 
 #include <qdf_types.h>
+#include <qdf_util.h>
 
 /* Function declarations and documenation */
 
@@ -43,4 +44,31 @@ void qdf_sleep_us(uint32_t us_interval);
 
 void qdf_busy_wait(uint32_t us_interval);
 
+/**
+ * qdf_set_user_nice() - set thread's nice value
+ * @thread: pointer to thread
+ * @nice: nice value
+ *
+ * Return: none
+ */
+void qdf_set_user_nice(qdf_thread_t *thread, long nice);
+
+/**
+ * qdf_create_thread() - create a kernel thread
+ * @thread: pointer to thread
+ * @nice: nice value
+ *
+ * Return: pointer to created kernel thread
+ */
+qdf_thread_t *qdf_create_thread(int (*thread_handler)(void *data), void *data,
+				const char thread_name[]);
+
+/**
+ * qdf_wake_up_process() - wake up given thread
+ * @thread: pointer to thread which needs to be woken up
+ *
+ * Return: none
+ */
+int qdf_wake_up_process(qdf_thread_t *thread);
+
 #endif /* __QDF_THREADS_H */

+ 8 - 0
qdf/inc/qdf_types.h

@@ -239,6 +239,11 @@ typedef void (*qdf_timer_func_t)(void *);
  * @QDF_MODULE_ID_BMI: BMI module ID
  * @QDF_MODULE_ID_EPPING: EPPING module ID
  * @QDF_MODULE_ID_QVIT: QVIT module ID
+ * @QDF_MODULE_ID_DP: Data path module ID
+ * @QDF_MODULE_ID_SOC: SOC module ID
+ * @QDF_MODULE_ID_OS_IF: Scheduler OS interface queue module ID
+ * @QDF_MODULE_ID_TARGET_IF: Scheduler target interface queue module ID
+ * @QDF_MODULE_ID_SCHEDULER: Scheduler's module ID
  * @QDF_MODULE_ID_MAX: Max place holder module ID
  *
  * These are generic IDs that identify the various modules in the software
@@ -271,6 +276,9 @@ typedef enum {
 	QDF_MODULE_ID_QVIT = 23,
 	QDF_MODULE_ID_DP = 24,
 	QDF_MODULE_ID_SOC = 25,
+	QDF_MODULE_ID_OS_IF = 26,
+	QDF_MODULE_ID_TARGET_IF = 27,
+	QDF_MODULE_ID_SCHEDULER = 28,
 	QDF_MODULE_ID_MAX
 } QDF_MODULE_ID;
 

+ 39 - 0
qdf/inc/qdf_util.h

@@ -41,6 +41,9 @@
 #define QDF_MAX_AVAILABLE_CPU	1
 #endif
 
+typedef __qdf_thread_t qdf_thread_t;
+typedef __qdf_wait_queue_head_t qdf_wait_queue_head_t;
+
 /**
  * qdf_unlikely - Compiler-dependent macro denoting code likely to execute
  * @_expr: expression to be checked
@@ -111,6 +114,42 @@ static inline int qdf_status_to_os_return(QDF_STATUS status)
  */
 #define qdf_set_bit(nr, addr)    __qdf_set_bit(nr, addr)
 
+/**
+ * qdf_clear_bit() - clear bit in address
+ * @nr: bit number to be clear
+ * @addr: address buffer pointer
+ *
+ * Return: none
+ */
+#define qdf_clear_bit(nr, addr)    __qdf_clear_bit(nr, addr)
+
+/**
+ * qdf_test_bit() - test bit position in address
+ * @nr: bit number to be tested
+ * @addr: address buffer pointer
+ *
+ * Return: none
+ */
+#define qdf_test_bit(nr, addr)    __qdf_test_bit(nr, addr)
+
+/**
+ * qdf_test_and_clear_bit() - test and clear bit position in address
+ * @nr: bit number to be tested
+ * @addr: address buffer pointer
+ *
+ * Return: none
+ */
+#define qdf_test_and_clear_bit(nr, addr)    __qdf_test_and_clear_bit(nr, addr)
+
+#define qdf_wait_queue_interruptible(wait_queue, condition) \
+		__qdf_wait_queue_interruptible(wait_queue, condition)
+
+#define qdf_init_waitqueue_head(_q) __qdf_init_waitqueue_head(_q)
+
+#define qdf_wake_up_interruptible(_q) __qdf_wake_up_interruptible(_q)
+
+#define qdf_wake_up_completion(_q) __qdf_wake_up_completion(_q)
+
 /**
  * qdf_container_of - cast a member of a structure out to the containing
  * structure

+ 13 - 1
qdf/linux/src/i_qdf_list.h

@@ -47,9 +47,10 @@ typedef struct qdf_list_s {
 } __qdf_list_t;
 
 /**
- * __qdf_list_create() - Initialize list head
+ * __qdf_list_create() - Create qdf list and initialize list head
  * @list: object of list
  * @max_size: max size of the list
+ *
  * Return: none
  */
 static inline void __qdf_list_create(__qdf_list_t *list, uint32_t max_size)
@@ -59,5 +60,16 @@ static inline void __qdf_list_create(__qdf_list_t *list, uint32_t max_size)
 	list->max_size = max_size;
 }
 
+/**
+ * __qdf_init_list_head() - initialize list head
+ * @list_head: pointer to list head
+ *
+ * Return: none
+ */
+static inline void __qdf_init_list_head(__qdf_list_node_t *list_head)
+{
+	INIT_LIST_HEAD(list_head);
+}
+
 bool qdf_list_has_node(__qdf_list_t *list, __qdf_list_node_t *node);
 #endif

+ 29 - 3
qdf/linux/src/i_qdf_util.h

@@ -64,9 +64,19 @@
 #include <linux/byteorder/generic.h>
 #endif
 
-/*
- * Generic compiler-dependent macros if defined by the OS
- */
+typedef struct task_struct __qdf_thread_t;
+typedef wait_queue_head_t __qdf_wait_queue_head_t;
+
+/* Generic compiler-dependent macros if defined by the OS */
+#define __qdf_wait_queue_interruptible(wait_queue, condition) \
+		wait_event_interruptible(wait_queue, condition)
+
+#define __qdf_init_waitqueue_head(_q) init_waitqueue_head(_q)
+
+#define __qdf_wake_up_interruptible(_q) wake_up_interruptible(_q)
+
+#define __qdf_wake_up_completion(_q) wake_up_completion(_q)
+
 #define __qdf_unlikely(_expr)   unlikely(_expr)
 #define __qdf_likely(_expr)     likely(_expr)
 
@@ -141,6 +151,22 @@ static inline void __qdf_set_bit(unsigned int nr, unsigned long *addr)
 	__set_bit(nr, addr);
 }
 
+static inline void __qdf_clear_bit(unsigned int nr, unsigned long *addr)
+{
+	__clear_bit(nr, addr);
+}
+
+static inline bool __qdf_test_bit(unsigned int nr, unsigned long *addr)
+{
+	return test_bit(nr, addr);
+}
+
+static inline bool __qdf_test_and_clear_bit(unsigned int nr,
+					unsigned long *addr)
+{
+	return __test_and_clear_bit(nr, addr);
+}
+
 /**
  * __qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast'
  * @mac_addr: pointer to the qdf MacAddress to set to broadcast

+ 32 - 0
qdf/linux/src/qdf_event.c

@@ -267,3 +267,35 @@ QDF_STATUS qdf_wait_single_event(qdf_event_t *event, uint32_t timeout)
 	return QDF_STATUS_SUCCESS;
 }
 EXPORT_SYMBOL(qdf_wait_single_event);
+
+QDF_STATUS qdf_event_complete_and_exit(qdf_event_t *event, long reason_code)
+{
+	if (in_interrupt()) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* check for null pointer */
+	if (NULL == event) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "NULL event passed into %s", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* check if cookie is same as that of initialized event */
+	if (LINUX_EVENT_COOKIE != event->cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "Uninitialized event passed into %s", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	complete_and_exit(&event->complete, reason_code);
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_event_complete_and_exit);

+ 16 - 3
qdf/linux/src/qdf_mc_timer.c

@@ -37,11 +37,13 @@
 #include "qdf_list.h"
 #include "qdf_mem.h"
 #include <linux/export.h>
+#ifndef NAPIER_CODE
 #ifdef CONFIG_MCL
 #include <cds_mc_timer.h>
 #endif
-/* Preprocessor definitions and constants */
+#endif
 
+/* Preprocessor definitions and constants */
 #define LINUX_TIMER_COOKIE 0x12341234
 #define LINUX_INVALID_TIMER_COOKIE 0xfeedface
 #define TMR_INVALID_ID (0)
@@ -59,6 +61,13 @@
 static unsigned int persistent_timer_count;
 static qdf_mutex_t persistent_timer_count_lock;
 
+static void (*scheduler_timer_callback) (unsigned long data);
+void qdf_register_mc_timer_callback(void (*callback) (unsigned long data))
+{
+	scheduler_timer_callback = callback;
+}
+EXPORT_SYMBOL(qdf_register_mc_timer_callback);
+
 /* Function declarations and documenation */
 
 /**
@@ -287,7 +296,9 @@ QDF_STATUS qdf_mc_timer_init_debug(qdf_mc_timer_t *timer,
 		init_timer_deferrable(&(timer->platform_info.timer));
 	else
 		init_timer(&(timer->platform_info.timer));
-#ifdef CONFIG_MCL
+#ifdef NAPIER_CODE
+	timer->platform_info.timer.function = scheduler_timer_callback;
+#elif CONFIG_MCL
 	timer->platform_info.timer.function = cds_linux_timer_callback;
 #else
 	timer->platform_info.timer.function = NULL;
@@ -323,7 +334,9 @@ QDF_STATUS qdf_mc_timer_init(qdf_mc_timer_t *timer, QDF_TIMER_TYPE timer_type,
 		init_timer_deferrable(&(timer->platform_info.timer));
 	else
 		init_timer(&(timer->platform_info.timer));
-#ifdef CONFIG_MCL
+#ifdef NAPIER_CODE
+	timer->platform_info.timer.function = scheduler_timer_callback;
+#elif CONFIG_MCL
 	timer->platform_info.timer.function = cds_linux_timer_callback;
 #else
 	timer->platform_info.timer.function = NULL;

+ 20 - 0
qdf/linux/src/qdf_threads.c

@@ -39,6 +39,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/export.h>
+#include <linux/kthread.h>
 
 /* Function declarations and documenation */
 
@@ -104,3 +105,22 @@ void qdf_busy_wait(uint32_t us_interval)
 	udelay(us_interval);
 }
 EXPORT_SYMBOL(qdf_busy_wait);
+
+void qdf_set_user_nice(qdf_thread_t *thread, long nice)
+{
+	set_user_nice(thread, nice);
+}
+EXPORT_SYMBOL(qdf_set_user_nice);
+
+qdf_thread_t *qdf_create_thread(int (*thread_handler)(void *data), void *data,
+				const char thread_name[])
+{
+	return kthread_create(thread_handler, data, thread_name);
+}
+EXPORT_SYMBOL(qdf_create_thread);
+
+int qdf_wake_up_process(qdf_thread_t *thread)
+{
+	return wake_up_process(thread);
+}
+EXPORT_SYMBOL(qdf_wake_up_process);

+ 294 - 0
sch/inc/scheduler_api.h

@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+#if !defined(__SCHEDULER_API_H)
+#define __SCHEDULER_API_H
+
+
+#include <qdf_event.h>
+#include <qdf_types.h>
+#include <qdf_lock.h>
+#include <qdf_mc_timer.h>
+#include <qdf_status.h>
+#include <osdep.h>
+
+
+/* Controller thread various event masks
+ * MC_POST_EVENT_MASK: wake up thread after posting message
+ * MC_SUSPEND_EVENT_MASK: signal thread to suspend during kernel pm suspend
+ * MC_SHUTDOWN_EVENT_MASK: signal thread to shutdown and exit during unload
+ */
+#define MC_POST_EVENT_MASK               0x001
+#define MC_SUSPEND_EVENT_MASK            0x002
+#define MC_SHUTDOWN_EVENT_MASK           0x010
+
+/*
+ * Cookie for timer messages.  Note that anyone posting a timer message
+ * has to write the COOKIE in the reserved field of the message.  The
+ * timer queue handler relies on this COOKIE
+ */
+#define SYS_MSG_COOKIE      0xFACE
+
+
+/**
+ * enum CDS_MQ_ID - message queues enum
+ * @CDS_MQ_ID_SME: Legacy SME message queue ID
+ * @CDS_MQ_ID_PE: Legacy PE message queue ID
+ * @CDS_MQ_ID_WMA: Legacy WMA message queue ID
+ * @CDS_MQ_ID_SYS: Legacy SYS message queue ID
+ * @CDS_MQ_ID_OS_IF: OS IF(north interface) message queue ID
+ * @CDS_MQ_ID_TARGET_IF: Target IF(south interface) message queue ID
+ */
+typedef enum {
+	CDS_MQ_ID_SME = QDF_MODULE_ID_SME,
+	CDS_MQ_ID_PE = QDF_MODULE_ID_PE,
+	CDS_MQ_ID_WMA = QDF_MODULE_ID_WMA,
+	CDS_MQ_ID_SYS = QDF_MODULE_ID_SYS,
+	CDS_MQ_ID_OS_IF = QDF_MODULE_ID_OS_IF,
+	CDS_MQ_ID_TARGET_IF = QDF_MODULE_ID_TARGET_IF,
+} CDS_MQ_ID;
+
+
+typedef enum {
+	SYS_MSG_ID_MC_START,
+	SYS_MSG_ID_MC_THR_PROBE,
+	SYS_MSG_ID_MC_TIMER,
+	SYS_MSG_ID_MC_STOP,
+	SYS_MSG_ID_FTM_RSP,
+	SYS_MSG_ID_QVIT,
+
+} SYS_MSG_ID;
+
+/**
+ * struct scheduler_msg: scheduler message structure
+ * @type: message type
+ * @reserved: reserved field
+ * @bodyptr: message body pointer based on the type either a bodyptr pointer
+ *     into memory or bodyval as a 32 bit data is used. bodyptr is always a
+ *     freeable pointer, one should always make sure that bodyptr is always
+ *     freeable.
+ * Messages should use either bodyptr or bodyval; not both !!!
+ * @bodyval: message body val
+ * @callback: callback to be called by scheduler thread once message is posted
+ *   and scheduler thread has started processing the message.
+ */
+struct scheduler_msg {
+	uint16_t type;
+	uint16_t reserved;
+	void *bodyptr;
+	uint32_t bodyval;
+	void *callback;
+};
+
+typedef QDF_STATUS (*scheduler_msg_process_fn_t) (struct scheduler_msg  *msg);
+typedef void (*hdd_suspend_callback)(void);
+
+/**
+ * scheduler_init() - initialize control path scheduler
+ *
+ * This API initializes control path scheduler.
+ *
+ * Return: QDF status
+ */
+QDF_STATUS scheduler_init(void);
+
+/**
+ * scheduler_deinit() - de-initialize control path scheduler
+ *
+ * This API de-initializes control path scheduler.
+ *
+ * Return: QDF status
+ */
+QDF_STATUS scheduler_deinit(void);
+
+/**
+ * scheduler_register_module() - register input module/queue id
+ * @qid: queue id to get registered
+ * @callback: queue message to be called when a message is posted
+ *
+ * Return: QDF status
+ */
+QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
+		scheduler_msg_process_fn_t callback);
+
+/**
+ * scheduler_deregister_module() - deregister input module/queue id
+ * @qid: queue id to get deregistered
+ *
+ * Return: QDF status
+ */
+QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid);
+
+/**
+ * scheduler_post_msg_by_priority() - post messages by priority
+ * @qid: queue id to to post message
+ * @msg: mesage pointer
+ * @is_high_priority: set to true for high priority message else false
+ *
+ * IMPORTANT NOTE:
+ * 1) Legacy MCL modules continue posting messages to following legacy
+ *    message queue IDs:
+ *    a) CDS_MQ_ID_SME : SME module message queue
+ *    b) CDS_MQ_ID_PE : PE module message queue
+ *    c) CDS_MQ_ID_WMA : WMA module message queue
+ *    d) CDS_MQ_ID_SYS : SYS module message queue
+ * 2) All new components like SCM, P2P, TDLS, etc. needs to post messages
+ *    to following new message queue ids:
+ *    a) CDS_MQ_ID_OS_IF : North interface message queue for request comign
+ *       from operating systems
+ *    b) CDS_MQ_ID_TARGET_IF : South interface message queue for messages
+ *       and events coming from target(firmware)
+ *
+ * Return: QDF status
+ */
+QDF_STATUS scheduler_post_msg_by_priority(CDS_MQ_ID qid,
+		struct scheduler_msg *msg, bool is_high_priority);
+
+/**
+ * scheduler_post_msg() - post normal messages(no priority)
+ * @qid: queue id to to post message
+ * @msg: mesage pointer
+ *
+ * IMPORTANT NOTE:
+ * 1) Legacy MCL modules continue posting messages to following legacy
+ *    message queue IDs:
+ *    a) CDS_MQ_ID_SME : SME module message queue
+ *    b) CDS_MQ_ID_PE : PE module message queue
+ *    c) CDS_MQ_ID_WMA : WMA module message queue
+ *    d) CDS_MQ_ID_SYS : SYS module message queue
+ * 2) All new components like SCM, P2P, TDLS, etc. needs to post messages
+ *    to following new message queue ids:
+ *    a) CDS_MQ_ID_OS_IF : North interface message queue for request comign
+ *       from operating systems
+ *    b) CDS_MQ_ID_TARGET_IF : South interface message queue for messages
+ *       and events coming from target(firmware)
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS scheduler_post_msg(CDS_MQ_ID qid,
+		struct scheduler_msg *msg)
+{
+	return scheduler_post_msg_by_priority(qid, msg, false);
+}
+
+/**
+ * scheduler_resume_complete() - resume scheduler thread
+ *
+ * Complete scheduler thread resume wait event such that scheduler
+ * thread can wake up and process message queues
+ *
+ * Return: none
+ */
+void scheduler_resume_complete(void);
+
+/**
+ * scheduler_register_hdd_suspend_callback() - suspend callback to hdd
+ * @callback: hdd callback to be called when controllred thread is suspended
+ *
+ * Return: none
+ */
+void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback);
+
+/**
+ * scheduler_wake_up_controller_thread() - wake up controller thread
+ *
+ * Wake up controller thread to process a critical message.
+ *
+ * Return: none
+ */
+void scheduler_wake_up_controller_thread(void);
+
+/**
+ * scheduler_set_event_mask() - set given event mask
+ * @event_mask: event mask to set
+ *
+ * Set given event mask such that controller scheduler thread can do
+ * specified work after wake up.
+ *
+ * Return: none
+ */
+void scheduler_set_event_mask(uint32_t event_mask);
+
+/**
+ * scheduler_clear_event_mask() - clear given event mask
+ * @event_mask: event mask to set
+ *
+ * Return: none
+ */
+void scheduler_clear_event_mask(uint32_t event_mask);
+
+/**
+ * scheduler_target_if_mq_handler() - top level message queue handler for
+ *                                    target_if message queue
+ * @msg: pointer to actual message being handled
+ *
+ * Return: none
+ */
+QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg);
+
+/**
+ * scheduler_os_if_mq_handler() - top level message queue handler for
+ *                                os_if message queue
+ * @msg: pointer to actual message being handled
+ *
+ * Return: none
+ */
+QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg);
+
+/**
+ * scheduler_timer_q_mq_handler() - top level message queue handler for
+ *                                timer queue
+ * @msg: pointer to actual message being handled
+ *
+ * Return: none
+ */
+QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg);
+
+/**
+ * scheduler_register_wma_legacy_handler() - register legacy wma handler
+ * @callback: legacy wma handler to be called for WMA messages
+ *
+ * Return: QDF status
+ */
+QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
+						callback);
+
+/**
+ * scheduler_register_sys_legacy_handler() - register legacy sys handler
+ * @callback: legacy sys handler to be called for sys messages
+ *
+ * Return: QDF status
+ */
+QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
+						callback);
+/**
+ * scheduler_mc_timer_callback() - timer callback, gets called at time out
+ * @data: unsigned long, holds the timer object.
+ *
+ * Return: None
+ */
+void scheduler_mc_timer_callback(unsigned long data);
+#endif

+ 208 - 0
sch/inc/scheduler_core.h

@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+#if !defined(__SCHEDULER_CORE_H)
+#define __SCHEDULER_CORE_H
+
+#include <qdf_threads.h>
+#include <scheduler_api.h>
+#include <qdf_list.h>
+
+#define SCHEDULER_CORE_MAX_MESSAGES 8000
+#define SCHEDULER_NUMBER_OF_MSG_QUEUE 5
+
+/**
+ * QDF Message queue definition.
+ */
+struct scheduler_mq_type {
+	qdf_spinlock_t mq_lock;
+	qdf_list_t mq_list;
+	QDF_MODULE_ID qid;
+};
+
+struct scheduler_msg_wrapper {
+	/* Message node */
+	qdf_list_node_t msg_node;
+	/* message it is associated to */
+	struct scheduler_msg *msg_buf;
+};
+
+struct scheduler_mq_ctx {
+	/* Messages buffers */
+	struct scheduler_msg msg_buffers[SCHEDULER_CORE_MAX_MESSAGES];
+	struct scheduler_msg_wrapper msg_wrappers[SCHEDULER_CORE_MAX_MESSAGES];
+	struct scheduler_mq_type free_msg_q;
+	struct scheduler_mq_type sch_msg_q[SCHEDULER_NUMBER_OF_MSG_QUEUE];
+	uint8_t scheduler_msg_qid_to_qidx[QDF_MODULE_ID_MAX];
+	QDF_STATUS (*scheduler_msg_process_fn[SCHEDULER_NUMBER_OF_MSG_QUEUE])
+					(struct scheduler_msg *msg);
+};
+
+struct scheduler_ctx {
+	struct scheduler_mq_ctx queue_ctx;
+	/* Handle of Event for MC thread to signal startup */
+	qdf_event_t sch_start_event;
+	qdf_thread_t *sch_thread;
+	/* completion object for MC thread shutdown */
+	qdf_event_t sch_shutdown;
+	/* Wait queue for MC thread */
+	qdf_wait_queue_head_t sch_wait_queue;
+	unsigned long sch_event_flag;
+	/* Completion object to resume Mc thread */
+	qdf_event_t resume_sch_event;
+	/* lock to make sure that McThread suspend/resume mechanism is insync */
+	qdf_spinlock_t sch_thread_lock;
+	uint8_t sch_last_qidx;
+	hdd_suspend_callback hdd_callback;
+	scheduler_msg_process_fn_t legacy_wma_handler;
+	scheduler_msg_process_fn_t legacy_sys_handler;
+};
+
+
+/**
+ * scheduler_get_context() - to get scheduler context
+ *
+ * This routine is used retrieve scheduler context
+ *
+ * Return: Pointer to scheduler context
+ */
+struct scheduler_ctx *scheduler_get_context(void);
+/**
+ * scheduler_thread() - spawned thread will execute this routine
+ * @arg: pointer to scheduler context
+ *
+ * Newly created thread will use this routine to perform its duty
+ *
+ * Return: none
+ */
+int scheduler_thread(void *arg);
+
+/**
+ * scheduler_cleanup_queues() - to clean up the given module's queue
+ * @sch_ctx: pointer to scheduler context
+ * @idx: index of the queue which needs to be cleanup.
+ *
+ * This routine  is used to clean the module's queue provided by
+ * user through idx field
+ *
+ * Return: none
+ */
+void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx);
+/**
+ * scheduler_create_ctx() - to create scheduler context
+ *
+ * This routine is used to create scheduler context
+ *
+ * Return: QDF_STATUS based on success or failure
+ */
+QDF_STATUS scheduler_create_ctx(void);
+/**
+ * scheduler_destroy_ctx() - to destroy scheduler context
+ *
+ * This routine is used to destroy scheduler context
+ *
+ * Return: QDF_STATUS based on success or failure
+ */
+QDF_STATUS scheduler_destroy_ctx(void);
+/**
+ * scheduler_mq_init() - initialize scheduler message queue
+ * @msg_q: Pointer to the message queue
+ *
+ * This function initializes the Message queue.
+ *
+ * Return: qdf status
+ */
+QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q);
+/**
+ * scheduler_mq_deinit() - de-initialize scheduler message queue
+ * @msg_q: Pointer to the message queue
+ *
+ * This function de-initializes scheduler message queue
+ *
+ *  Return: none
+ */
+void scheduler_mq_deinit(struct scheduler_mq_type *msg_q);
+/**
+ * scheduler_mq_put() - put message in the back of queue
+ * @msg_q: Pointer to the message queue
+ * @msg_wrapper: pointer to message wrapper
+ *
+ * This function is used to put message in back of provided message
+ * queue
+ *
+ *  Return: none
+ */
+void scheduler_mq_put(struct scheduler_mq_type *msg_q,
+			struct scheduler_msg_wrapper *msg_wrapper);
+/**
+ * scheduler_mq_put_front() - put message in the front of queue
+ * @msg_q: Pointer to the message queue
+ * @msg_wrapper: pointer to message wrapper
+ *
+ * This function is used to put message in front of provided message
+ * queue
+ *
+ *  Return: none
+ */
+void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
+			struct scheduler_msg_wrapper *msg_wrapper);
+/**
+ * scheduler_mq_get() - to get message from message queue
+ * @msg_q: Pointer to the message queue
+ *
+ * This function is used to get message from given message queue
+ *
+ *  Return: none
+ */
+struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q);
+/**
+ * scheduler_is_mq_empty() - to check if message queue is empty
+ * @msg_q: Pointer to the message queue
+ *
+ * This function is used to check if message queue is empty
+ *
+ * Return: true or false
+ */
+bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q);
+/**
+ * scheduler_queues_init() - to initialize all the modules' queues
+ * @sched_ctx: pointer to scheduler context
+ *
+ * This function is used to initialize the queues for all the modules
+ *
+ * Return: QDF_STATUS based on success of failure
+ */
+QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx);
+/**
+ * scheduler_queues_deinit() - to de-initialize all the modules' queues
+ * @sched_ctx: pointer to scheduler context
+ *
+ * This function is used to de-initialize the queues for all the modules
+ *
+ * Return: QDF_STATUS based on success of failure
+ */
+QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *gp_sch_ctx);
+#endif

+ 548 - 0
sch/src/scheduler_api.c

@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <scheduler_api.h>
+#include <scheduler_core.h>
+
+static void scheduler_flush_mqs(struct scheduler_ctx *sched_ctx)
+{
+	int i;
+
+	/* Here each of the MC thread MQ shall be drained and returned to the
+	 * Core. Before returning a wrapper to the Core, the Scheduler message
+	 * shall be freed first
+	 */
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
+		  ("Flushing scheduler message queue"));
+
+	if (!sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: sched_ctx is NULL", __func__);
+		return;
+	}
+	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
+		scheduler_cleanup_queues(sched_ctx, i);
+}
+
+static QDF_STATUS scheduler_close(struct scheduler_ctx *sched_ctx)
+{
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
+			"%s: invoked", __func__);
+	if (!sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				"%s: sched_ctx == NULL", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+	/* shut down scheduler thread */
+	qdf_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
+	qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
+	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
+	/* Wait for MC to exit */
+	qdf_wait_single_event(&sched_ctx->sch_shutdown, 0);
+	sched_ctx->sch_thread = 0;
+
+	/* Clean up message queues of MC thread */
+	scheduler_flush_mqs(sched_ctx);
+
+	/* Deinit all the queues */
+	scheduler_queues_deinit(sched_ctx);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+static QDF_STATUS scheduler_open(struct scheduler_ctx *sched_ctx)
+{
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Opening the QDF Scheduler", __func__);
+	/* Sanity checks */
+	if (!sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+	/* Initialize the helper events and event queues */
+	qdf_event_create(&sched_ctx->sch_start_event);
+	qdf_event_create(&sched_ctx->sch_shutdown);
+	qdf_event_create(&sched_ctx->resume_sch_event);
+	qdf_spinlock_create(&sched_ctx->sch_thread_lock);
+	qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue);
+	sched_ctx->sch_event_flag = 0;
+	/* Create the Scheduler Main Controller thread */
+	sched_ctx->sch_thread = qdf_create_thread(scheduler_thread,
+					sched_ctx, "scheduler_thread");
+	if (IS_ERR(sched_ctx->sch_thread)) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
+			  "%s: Could not Create QDF Main Thread Controller",
+			  __func__);
+		scheduler_queues_deinit(sched_ctx);
+		return QDF_STATUS_E_RESOURCES;
+	}
+	/* start the thread here */
+	qdf_wake_up_process(sched_ctx->sch_thread);
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		  "%s: QDF Main Controller thread Created", __func__);
+
+	/*
+	 * Now make sure all threads have started before we exit.
+	 * Each thread should normally ACK back when it starts.
+	 */
+	qdf_wait_single_event(&sched_ctx->sch_start_event, 0);
+	/* We're good now: Let's get the ball rolling!!! */
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		  "%s: Scheduler thread has started", __func__);
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS scheduler_init(void)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct scheduler_ctx *sched_ctx;
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
+			FL("Opening Scheduler"));
+	status = scheduler_create_ctx();
+	if (QDF_STATUS_SUCCESS != status) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				FL("can't create scheduler ctx"));
+		return status;
+	}
+	sched_ctx = scheduler_get_context();
+	status = scheduler_queues_init(sched_ctx);
+	if (QDF_STATUS_SUCCESS != status) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				FL("Queue init failed"));
+		scheduler_destroy_ctx();
+		return status;
+	}
+	status = scheduler_open(sched_ctx);
+	if (!QDF_IS_STATUS_SUCCESS(status)) {
+		/* Critical Error ...  Cannot proceed further */
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
+				"Failed to open QDF Scheduler");
+		QDF_ASSERT(0);
+		scheduler_queues_deinit(sched_ctx);
+		scheduler_destroy_ctx();
+	}
+	qdf_register_mc_timer_callback(scheduler_mc_timer_callback);
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS scheduler_deinit(void)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO_HIGH,
+			FL("Closing Scheduler"));
+	status = scheduler_close(sched_ctx);
+	if (QDF_STATUS_SUCCESS != status) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				FL("Scheduler close failed"));
+		return status;
+	}
+	return scheduler_destroy_ctx();
+}
+
+
+QDF_STATUS scheduler_post_msg_by_priority(CDS_MQ_ID qid,
+		struct scheduler_msg *pMsg, bool is_high_priority)
+{
+	uint8_t qidx;
+	struct scheduler_mq_type *target_mq = NULL;
+	struct scheduler_msg_wrapper *msg_wrapper = NULL;
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	if (!sched_ctx || !pMsg) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				"%s: Null params or global sch context is null",
+				__func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	/* Target_If is a special message queue in phase 3 convergence beacause
+	 * its used by both legacy WMA and as well as new UMAC components which
+	 * directly populate callback handlers in message body.
+	 * 1) WMA legacy messages should not have callback
+	 * 2) New target_if message needs to have valid callback
+	 * Clear callback handler for legacy WMA messages such that in case
+	 * if someone is sending legacy WMA message from stack which has
+	 * uninitialized callback then its handled properly. Also change
+	 * legacy WMA message queue id to target_if queue such that its  always
+	 * handled in right order.
+	 */
+	if (CDS_MQ_ID_WMA == qid) {
+		pMsg->callback = NULL;
+		/* change legacy WMA message id to new target_if mq id */
+		qid = CDS_MQ_ID_TARGET_IF;
+	}
+
+	qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid];
+	if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				FL("Scheduler is deinitialized ignore msg"));
+		return QDF_STATUS_E_FAILURE;
+	}
+	if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				FL("callback not registered for qid[%d]"), qid);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAILURE;
+	}
+	target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]);
+	QDF_ASSERT(target_mq);
+	if (target_mq == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				"%s: target_mq == NULL", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	/* Try and get a free Msg wrapper */
+	msg_wrapper = scheduler_mq_get(&sched_ctx->queue_ctx.free_msg_q);
+
+	if (NULL == msg_wrapper) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  FL("message wrapper empty"));
+		return QDF_STATUS_E_RESOURCES;
+	}
+
+	/* Copy the message now */
+	qdf_mem_copy((void *)msg_wrapper->msg_buf,
+			(void *)pMsg, sizeof(struct scheduler_msg));
+
+	if (is_high_priority)
+		scheduler_mq_put_front(target_mq, msg_wrapper);
+	else
+		scheduler_mq_put(target_mq, msg_wrapper);
+
+	qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
+	qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid,
+		scheduler_msg_process_fn_t callback)
+{
+	struct scheduler_mq_ctx *ctx;
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		FL("Enter"));
+	if (!sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			FL("sched_ctx is NULL"));
+		return QDF_STATUS_E_FAILURE;
+	}
+	ctx = &sched_ctx->queue_ctx;
+	ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx;
+	ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid;
+	ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback;
+	sched_ctx->sch_last_qidx++;
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		FL("Exit"));
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid)
+{
+	struct scheduler_mq_ctx *ctx;
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+	uint8_t qidx;
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		FL("Enter"));
+	if (!sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				FL("sched_ctx is NULL"));
+		return QDF_STATUS_E_FAILURE;
+	}
+	ctx = &sched_ctx->queue_ctx;
+	qidx = ctx->scheduler_msg_qid_to_qidx[qid];
+	ctx->scheduler_msg_process_fn[qidx] = NULL;
+	ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE;
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		FL("Exit"));
+	return QDF_STATUS_SUCCESS;
+}
+
+void scheduler_resume_complete(void)
+{
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	if (sched_ctx)
+		qdf_event_set(&sched_ctx->resume_sch_event);
+}
+
+void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback)
+{
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	if (sched_ctx)
+		sched_ctx->hdd_callback = callback;
+}
+void scheduler_wake_up_controller_thread(void)
+{
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	if (sched_ctx)
+		qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
+}
+void scheduler_set_event_mask(uint32_t event_mask)
+{
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	if (sched_ctx)
+		qdf_set_bit(event_mask, &sched_ctx->sch_event_flag);
+}
+
+void scheduler_clear_event_mask(uint32_t event_mask)
+{
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	if (sched_ctx)
+		qdf_clear_bit(event_mask, &sched_ctx->sch_event_flag);
+}
+
+QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
+{
+	QDF_STATUS status;
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+	QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *);
+
+	if (NULL == msg || NULL == sched_ctx) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+			QDF_TRACE_LEVEL_ERROR, FL("msg %p sch %p"),
+			msg, sched_ctx);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	target_if_msg_handler = msg->callback;
+
+	/* Target_If is a special message queue in phase 3 convergence beacause
+	 * its used by both legacy WMA and as well as new UMAC components. New
+	 * UMAC components directly pass their message handlers as callback in
+	 * message body.
+	 * 1) All Legacy WMA messages do not contain message callback so invoke
+	 *    registered legacy WMA handler. Scheduler message posting APIs
+	 *    makes sure legacy WMA messages do not have callbacks.
+	 * 2) For new messages which have valid callbacks invoke their callbacks
+	 *    directly.
+	 */
+	if (NULL == target_if_msg_handler)
+		status = sched_ctx->legacy_wma_handler(msg);
+	else
+		status = target_if_msg_handler(msg);
+
+	return status;
+}
+
+QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg)
+{
+	QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *);
+
+	if (NULL == msg) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+			QDF_TRACE_LEVEL_ERROR, FL("Msg is NULL"));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	os_if_msg_handler = msg->callback;
+
+	if (NULL == os_if_msg_handler) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+			QDF_TRACE_LEVEL_ERROR, FL("Msg callback is NULL"));
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAILURE;
+	}
+	os_if_msg_handler(msg);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg)
+{
+	QDF_STATUS status;
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+	qdf_mc_timer_callback_t timer_q_msg_handler;
+
+	if (NULL == msg || NULL == sched_ctx) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+			QDF_TRACE_LEVEL_ERROR, FL("msg %p sch %p"),
+			msg, sched_ctx);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	timer_q_msg_handler = msg->callback;
+
+	/* Timer message handler */
+	if (SYS_MSG_COOKIE == msg->reserved &&
+		SYS_MSG_ID_MC_TIMER == msg->type) {
+		if (timer_q_msg_handler) {
+			status = QDF_STATUS_SUCCESS;
+			timer_q_msg_handler(msg->bodyptr);
+		} else {
+			QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+				QDF_TRACE_LEVEL_ERROR, FL("Timer cb is null"));
+			status = QDF_STATUS_E_FAILURE;
+		}
+		return status;
+	} else {
+		/* Legacy sys message handler */
+		status = sched_ctx->legacy_sys_handler(msg);
+		return status;
+	}
+}
+
+QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t
+						wma_callback)
+{
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	if (NULL == sched_ctx) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+			QDF_TRACE_LEVEL_ERROR, FL("scheduler context is null"));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	sched_ctx->legacy_wma_handler = wma_callback;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t
+						sys_callback)
+{
+	struct scheduler_ctx *sched_ctx = scheduler_get_context();
+
+	if (NULL == sched_ctx) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+			QDF_TRACE_LEVEL_ERROR, FL("scheduler context is null"));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	sched_ctx->legacy_sys_handler = sys_callback;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+void scheduler_mc_timer_callback(unsigned long data)
+{
+	qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data;
+	struct scheduler_msg msg;
+	QDF_STATUS status;
+
+	qdf_mc_timer_callback_t callback = NULL;
+	void *user_data = NULL;
+	QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW;
+
+	QDF_ASSERT(timer);
+
+	if (timer == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s Null pointer passed in!", __func__);
+		return;
+	}
+
+	qdf_spin_lock_irqsave(&timer->platform_info.spinlock);
+
+	switch (timer->state) {
+	case QDF_TIMER_STATE_STARTING:
+		/* we are in this state because someone just started the timer,
+		 * MC timer got started and expired, but the time content have
+		 * not been updated this is a rare race condition!
+		 */
+		timer->state = QDF_TIMER_STATE_STOPPED;
+		status = QDF_STATUS_E_ALREADY;
+		break;
+
+	case QDF_TIMER_STATE_STOPPED:
+		status = QDF_STATUS_E_ALREADY;
+		break;
+
+	case QDF_TIMER_STATE_UNUSED:
+		status = QDF_STATUS_E_EXISTS;
+		break;
+
+	case QDF_TIMER_STATE_RUNNING:
+		/* need to go to stop state here because the call-back function
+		 * may restart timer (to emulate periodic timer)
+		 */
+		timer->state = QDF_TIMER_STATE_STOPPED;
+		/* copy the relevant timer information to local variables;
+		 * once we exits from this critical section, the timer content
+		 * may be modified by other tasks
+		 */
+		callback = timer->callback;
+		user_data = timer->user_data;
+		type = timer->type;
+		status = QDF_STATUS_SUCCESS;
+		break;
+
+	default:
+		QDF_ASSERT(0);
+		status = QDF_STATUS_E_FAULT;
+		break;
+	}
+
+	qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
+
+	if (QDF_STATUS_SUCCESS != status) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "TIMER callback called in a wrong state=%d",
+			  timer->state);
+		return;
+	}
+
+	qdf_try_allowing_sleep(type);
+
+	if (callback == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: No TIMER callback, Couldn't enqueue timer to any queue",
+			  __func__);
+		QDF_ASSERT(0);
+		return;
+	}
+
+	/* serialize to scheduler controller thread */
+	msg.type = SYS_MSG_ID_MC_TIMER;
+	msg.reserved = SYS_MSG_COOKIE;
+	msg.callback = callback;
+	msg.bodyptr = user_data;
+	msg.bodyval = 0;
+
+	if (scheduler_post_msg(CDS_MQ_ID_SYS, &msg) == QDF_STATUS_SUCCESS)
+		return;
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		  "%s: Could not enqueue timer to timer queue", __func__);
+}

+ 444 - 0
sch/src/scheduler_core.c

@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <scheduler_core.h>
+#include <osdep.h>
+
+struct scheduler_ctx *gp_sched_ctx = NULL;
+
+QDF_STATUS scheduler_create_ctx(void)
+{
+	if (gp_sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  FL("there is a already gp_sched_ctx mem allocated"));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	gp_sched_ctx = qdf_mem_malloc(sizeof(struct scheduler_ctx));
+	if (!gp_sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  FL("gp_sched_ctx can't alloc mememory"));
+		return QDF_STATUS_E_FAILURE;
+	}
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS scheduler_destroy_ctx(void)
+{
+	if (gp_sched_ctx)
+		qdf_mem_free(gp_sched_ctx);
+	gp_sched_ctx = NULL;
+	return QDF_STATUS_SUCCESS;
+}
+
+struct scheduler_ctx *scheduler_get_context(void)
+{
+	return gp_sched_ctx;
+}
+
+
+static QDF_STATUS scheduler_all_queues_init(
+			struct scheduler_ctx *sched_ctx)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	int i;
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("enter"));
+	if (!sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	status = scheduler_mq_init(&sched_ctx->queue_ctx.free_msg_q);
+	if (QDF_STATUS_SUCCESS != status)
+		return status;
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+		QDF_TRACE_LEVEL_ERROR, FL("free msg queue init complete"));
+
+	/* Initialize all message queues */
+	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
+		status = scheduler_mq_init(
+				&sched_ctx->queue_ctx.sch_msg_q[i]);
+		if (QDF_STATUS_SUCCESS != status)
+			return status;
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("exit"));
+
+	return status;
+}
+
+
+static QDF_STATUS scheduler_all_queues_deinit(
+		struct scheduler_ctx *sched_ctx)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	int i;
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("enter"));
+	if (!sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	scheduler_mq_deinit(&sched_ctx->queue_ctx.free_msg_q);
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+		  QDF_TRACE_LEVEL_ERROR, FL("free msg queue inited"));
+
+	/* De-Initialize all message queues */
+	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
+		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("exit"));
+	return status;
+}
+
+QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
+{
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Enter"));
+	if (msg_q == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+	/* Now initialize the lock */
+	qdf_spinlock_create(&msg_q->mq_lock);
+	/* Now initialize the List data structure */
+	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Exit"));
+
+	return QDF_STATUS_SUCCESS;
+}
+
+void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
+{
+	if (msg_q == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				"%s: NULL pointer passed", __func__);
+		return;
+	}
+}
+
+void scheduler_mq_put(struct scheduler_mq_type *msg_q,
+			struct scheduler_msg_wrapper *msg_wrapper)
+{
+	if (msg_q == NULL || msg_wrapper == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				"%s: NULL pointer passed", __func__);
+		return;
+	}
+	qdf_spin_lock_irqsave(&msg_q->mq_lock);
+	qdf_list_insert_back(&msg_q->mq_list, &msg_wrapper->msg_node);
+	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
+
+}
+
+void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
+			struct scheduler_msg_wrapper *msg_wrapper)
+{
+	if ((msg_q == NULL) || (msg_wrapper == NULL)) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				"%s: NULL pointer passed", __func__);
+		return;
+	}
+	qdf_spin_lock_irqsave(&msg_q->mq_lock);
+	qdf_list_insert_front(&msg_q->mq_list, &msg_wrapper->msg_node);
+	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
+}
+
+struct scheduler_msg_wrapper *scheduler_mq_get(struct scheduler_mq_type *msg_q)
+{
+	qdf_list_node_t *listptr;
+	struct scheduler_msg_wrapper *msg_wrapper = NULL;
+
+	if (msg_q == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				"%s: NULL pointer passed", __func__);
+		return NULL;
+	}
+
+	qdf_spin_lock_irqsave(&msg_q->mq_lock);
+	if (qdf_list_empty(&msg_q->mq_list)) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_WARN,
+			  "%s: Scheduler Message Queue is empty", __func__);
+	} else {
+		listptr = msg_q->mq_list.anchor.next;
+		msg_wrapper = (struct scheduler_msg_wrapper *)
+					qdf_container_of(listptr,
+						struct scheduler_msg_wrapper,
+						msg_node);
+		qdf_list_remove_node(&msg_q->mq_list, listptr);
+	}
+	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
+	return msg_wrapper;
+
+}
+
+bool scheduler_is_mq_empty(struct scheduler_mq_type *msg_q)
+{
+	bool is_empty = false;
+
+	if (msg_q == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				"%s: NULL pointer passed", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+	qdf_spin_lock_irqsave(&msg_q->mq_lock);
+	is_empty = qdf_list_empty(&msg_q->mq_list) ? true : false;
+	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
+	return is_empty;
+}
+
+QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
+{
+	return scheduler_all_queues_deinit(sched_ctx);
+}
+
+QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
+{
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+	int i;
+
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Enter"));
+	if (!sched_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+	status = scheduler_all_queues_init(sched_ctx);
+	if (QDF_STATUS_SUCCESS != status) {
+		scheduler_all_queues_deinit(sched_ctx);
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_FATAL,
+				FL("Failed to initialize the msg queues"));
+		return status;
+	}
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+		QDF_TRACE_LEVEL_ERROR, FL("Queue init passed"));
+
+	for (i = 0; i < SCHEDULER_CORE_MAX_MESSAGES; i++) {
+		(sched_ctx->queue_ctx.msg_wrappers[i]).msg_buf =
+			&(sched_ctx->queue_ctx.msg_buffers[i]);
+		qdf_init_list_head(
+			&sched_ctx->queue_ctx.msg_wrappers[i].msg_node);
+		scheduler_mq_put(&sched_ctx->queue_ctx.free_msg_q,
+			   &(sched_ctx->queue_ctx.msg_wrappers[i]));
+	}
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR, FL("Exit"));
+	return status;
+}
+
+static void scheduler_core_return_msg(struct scheduler_ctx *sch_ctx,
+			struct scheduler_msg_wrapper *msg_wrapper)
+{
+	if (!sch_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			"%s: gp_cds_context != p_cds_context", __func__);
+		return;
+	}
+
+	QDF_ASSERT(NULL != msg_wrapper);
+
+	if (msg_wrapper == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			FL("msg_wrapper == NULL in function"));
+		return;
+	}
+
+	/*
+	 * Return the message on the free message queue
+	 */
+	qdf_init_list_head(&msg_wrapper->msg_node);
+	scheduler_mq_put(&sch_ctx->queue_ctx.free_msg_q, msg_wrapper);
+}
+
+static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
+						bool *shutdown)
+{
+	int i;
+	QDF_STATUS vStatus = QDF_STATUS_E_FAILURE;
+	struct scheduler_msg_wrapper *pMsgWrapper = NULL;
+
+	if (!sch_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+				FL("sch_ctx null"));
+		return;
+	}
+
+	/* start with highest priority queue : timer queue at index 0 */
+	i = 0;
+	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
+		/* Check if MC needs to shutdown */
+		if (qdf_test_bit(MC_SHUTDOWN_EVENT_MASK,
+					&sch_ctx->sch_event_flag)) {
+			QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+				QDF_TRACE_LEVEL_ERROR,
+				"%s: scheduler thread signaled to shutdown",
+				__func__);
+			*shutdown = true;
+			/* Check for any Suspend Indication */
+			if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
+						&sch_ctx->sch_event_flag)) {
+				/* Unblock anyone waiting on suspend */
+				if (gp_sched_ctx->hdd_callback)
+					gp_sched_ctx->hdd_callback();
+			}
+			break;
+		}
+		if (scheduler_is_mq_empty(&sch_ctx->queue_ctx.sch_msg_q[i])) {
+			/* check next queue */
+			i++;
+			continue;
+		}
+		pMsgWrapper =
+			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
+		if (pMsgWrapper == NULL) {
+			QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+				QDF_TRACE_LEVEL_ERROR,
+				"%s: pMsgWrapper is NULL", __func__);
+			QDF_ASSERT(0);
+			return;
+		}
+		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
+			vStatus = sch_ctx->queue_ctx.
+					scheduler_msg_process_fn[i](
+							pMsgWrapper->msg_buf);
+			if (QDF_IS_STATUS_ERROR(vStatus)) {
+				QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
+					QDF_TRACE_LEVEL_ERROR,
+					FL("Failed processing Qid[%d] message"),
+					sch_ctx->queue_ctx.sch_msg_q[i].qid);
+			}
+			/* return message to the Core */
+			scheduler_core_return_msg(sch_ctx, pMsgWrapper);
+		}
+
+		/* start again with highest priority queue at index 0 */
+		i = 0;
+		continue;
+	}
+	/* Check for any Suspend Indication */
+	if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
+			&sch_ctx->sch_event_flag)) {
+		qdf_spin_lock(&sch_ctx->sch_thread_lock);
+		qdf_event_reset(&sch_ctx->resume_sch_event);
+		/* controller thread suspend completion callback */
+		if (gp_sched_ctx->hdd_callback)
+			gp_sched_ctx->hdd_callback();
+		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
+		/* Wait for resume indication */
+		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
+	}
+
+	return;  /* Nothing to process wait on wait queue */
+}
+
+int scheduler_thread(void *arg)
+{
+	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
+	int retWaitStatus = 0;
+	bool shutdown = false;
+
+	if (arg == NULL) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Bad Args passed", __func__);
+		return 0;
+	}
+	qdf_set_user_nice(current, -2);
+
+	/* Ack back to the context from which the main controller thread
+	 * has been created
+	 */
+	qdf_event_set(&sch_ctx->sch_start_event);
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		  "%s: scheduler_thread %d (%s) starting up", __func__, current->pid,
+		  current->comm);
+
+	while (!shutdown) {
+		/* This implements the execution model algorithm */
+		retWaitStatus = qdf_wait_queue_interruptible(
+					sch_ctx->sch_wait_queue,
+					qdf_test_bit(MC_POST_EVENT_MASK,
+						&sch_ctx->sch_event_flag) ||
+					qdf_test_bit(MC_SUSPEND_EVENT_MASK,
+						&sch_ctx->sch_event_flag));
+
+		if (retWaitStatus == -ERESTARTSYS) {
+			QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+					"%s: wait_event_interruptible returned -ERESTARTSYS",
+					__func__);
+			QDF_BUG(0);
+		}
+		qdf_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
+		scheduler_thread_process_queues(sch_ctx, &shutdown);
+	}
+	/* If we get here the MC thread must exit */
+	QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+		  "%s: Scheduler thread exiting!!!!", __func__);
+	qdf_event_complete_and_exit(&sch_ctx->sch_shutdown, 0);
+	return 0;
+}
+
+void scheduler_cleanup_queues(struct scheduler_ctx *sch_ctx, int idx)
+{
+	struct scheduler_msg_wrapper *msg_wrapper = NULL;
+
+	if (!sch_ctx) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		return;
+	}
+
+	while ((msg_wrapper =
+			scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[idx]))) {
+		if (msg_wrapper->msg_buf != NULL) {
+			QDF_TRACE(QDF_MODULE_ID_SCHEDULER, QDF_TRACE_LEVEL_INFO,
+				"%s: Freeing MC WMA MSG message type %d",
+				__func__, msg_wrapper->msg_buf->type);
+			if (msg_wrapper->msg_buf->bodyptr)
+				qdf_mem_free(
+					(void *)msg_wrapper->msg_buf->bodyptr);
+			msg_wrapper->msg_buf->bodyptr = NULL;
+			msg_wrapper->msg_buf->bodyval = 0;
+			msg_wrapper->msg_buf->type = 0;
+		}
+		scheduler_core_return_msg(sch_ctx, msg_wrapper);
+	}
+}