Browse Source

qcacmn: Restructure and add new files in wmi layer

Add new directory structure and new files for
tlv and non-tlv implementation.
wmi layer shall not use global context of other layer,
remove global context and unused methods.

CRs-Fixed: 978547
Change-Id: I230f59052d9db8e1660cc087b09e03641227f490
Govind Singh 9 năm trước cách đây
commit
6b411b537b
4 tập tin đã thay đổi với 2917 bổ sung0 xóa
  1. 1322 0
      wmi_tlv_helper.c
  2. 54 0
      wmi_tlv_platform.c
  3. 1503 0
      wmi_unified.c
  4. 38 0
      wmi_version_whitelist.c

+ 1322 - 0
wmi_tlv_helper.c

@@ -0,0 +1,1322 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include "wmi_tlv_platform.c"
+#include "wmi_tlv_defs.h"
+#include "wmi_version.h"
+
+#define WMITLV_GET_ATTRIB_NUM_TLVS  0xFFFFFFFF
+
+#define WMITLV_GET_CMDID(val) (val & 0x00FFFFFF)
+#define WMITLV_GET_NUM_TLVS(val) ((val >> 24) & 0xFF)
+
+#define WMITLV_GET_TAGID(val) (val & 0x00000FFF)
+#define WMITLV_GET_TAG_STRUCT_SIZE(val) ((val >> 12) & 0x000001FF)
+#define WMITLV_GET_TAG_ARRAY_SIZE(val) ((val >> 21) & 0x000001FF)
+#define WMITLV_GET_TAG_VARIED(val) ((val >> 30) & 0x00000001)
+
+#define WMITLV_SET_ATTRB0(id) ((WMITLV_GET_TAG_NUM_TLV_ATTRIB(id) << 24) | \
+				(id & 0x00FFFFFF))
+#define WMITLV_SET_ATTRB1(tagID, tagStructSize, tagArraySize, tagVaried) \
+	(((tagVaried&0x1)<<30) | ((tagArraySize&0x1FF)<<21) | \
+	((tagStructSize&0x1FF)<<12) | (tagID&0xFFF))
+
+#define WMITLV_OP_SET_TLV_ATTRIB_macro(param_ptr, param_len, wmi_cmd_event_id, \
+	elem_tlv_tag, elem_struc_type, elem_name, var_len, arr_size)  \
+	WMITLV_SET_ATTRB1(elem_tlv_tag, sizeof(elem_struc_type), arr_size, var_len),
+
+#define WMITLV_GET_CMD_EVT_ATTRB_LIST(id) \
+	WMITLV_SET_ATTRB0(id), \
+	WMITLV_TABLE(id,SET_TLV_ATTRIB, NULL, 0)
+
+A_UINT32 cmd_attr_list[] = {
+	WMITLV_ALL_CMD_LIST(WMITLV_GET_CMD_EVT_ATTRB_LIST)
+};
+
+A_UINT32 evt_attr_list[] = {
+	WMITLV_ALL_EVT_LIST(WMITLV_GET_CMD_EVT_ATTRB_LIST)
+};
+
+#ifdef NO_DYNAMIC_MEM_ALLOC
+static wmitlv_cmd_param_info *g_wmi_static_cmd_param_info_buf;
+A_UINT32 g_wmi_static_max_cmd_param_tlvs;
+#endif
+
+
+/**
+ * wmitlv_set_static_param_tlv_buf() - tlv helper function
+ * @param_tlv_buf: tlv buffer parameter
+ * @max_tlvs_accomodated: max no of tlv entries
+ *
+ *
+ * WMI TLV Helper function to set the static cmd_param_tlv structure
+ * and number of TLVs that can be accomodated in the structure.
+ * This function should be used when dynamic memory allocation is not
+ * supported. When dynamic memory allocation is not supported by any
+ * component then NO_DYNAMIC_MEMALLOC macro has to be defined in respective
+ * tlv_platform.c file. And respective component has to allocate
+ * cmd_param_tlv structure buffer to accomodate whatever number of TLV's.
+ * Both the buffer address and number of TLV's that can be accomodated in
+ * the buffer should be sent as arguments to this function.
+ *
+ * Return None
+ */
+void
+wmitlv_set_static_param_tlv_buf(void *param_tlv_buf,
+				A_UINT32 max_tlvs_accomodated)
+{
+#ifdef NO_DYNAMIC_MEM_ALLOC
+	g_wmi_static_cmd_param_info_buf = param_tlv_buf;
+	g_wmi_static_max_cmd_param_tlvs = max_tlvs_accomodated;
+#endif
+}
+
+/**
+ * wmitlv_get_attributes() - tlv helper function
+ * @is_cmd_id: boolean for command attribute
+ * @cmd_event_id: command event id
+ * @curr_tlv_order: tlv order
+ * @tlv_attr_ptr: pointer to tlv attribute
+ *
+ *
+ * WMI TLV Helper functions to find the attributes of the
+ * Command/Event TLVs.
+ *
+ * Return: 0 if success. Return >=1 if failure.
+ */
+A_UINT32 wmitlv_get_attributes(A_UINT32 is_cmd_id, A_UINT32 cmd_event_id,
+			       A_UINT32 curr_tlv_order,
+			       wmitlv_attributes_struc *tlv_attr_ptr)
+{
+	A_UINT32 i, base_index, num_tlvs, num_entries;
+	A_UINT32 *pAttrArrayList;
+
+	if (is_cmd_id) {
+		pAttrArrayList = &cmd_attr_list[0];
+		num_entries = CDF_ARRAY_SIZE(cmd_attr_list);
+	} else {
+		pAttrArrayList = &evt_attr_list[0];
+		num_entries = CDF_ARRAY_SIZE(evt_attr_list);
+	}
+
+	for (i = 0; i < num_entries; i++) {
+		num_tlvs = WMITLV_GET_NUM_TLVS(pAttrArrayList[i]);
+		if (WMITLV_GET_CMDID(cmd_event_id) ==
+		    WMITLV_GET_CMDID(pAttrArrayList[i])) {
+			tlv_attr_ptr->cmd_num_tlv = num_tlvs;
+			/* Return success from here when only number of TLVS for
+			 * this command/event is required */
+			if (curr_tlv_order == WMITLV_GET_ATTRIB_NUM_TLVS) {
+				wmi_tlv_print_verbose
+					("%s: WMI TLV attribute definitions for %s:0x%x found; num_of_tlvs:%d\n",
+					__func__, (is_cmd_id ? "Cmd" : "Evt"),
+					cmd_event_id, num_tlvs);
+				return 0;
+			}
+
+			/* Return failure if tlv_order is more than the expected
+			 * number of TLVs */
+			if (curr_tlv_order >= num_tlvs) {
+				wmi_tlv_print_error
+					("%s: ERROR: TLV order %d greater than num_of_tlvs:%d for %s:0x%x\n",
+					__func__, curr_tlv_order, num_tlvs,
+					(is_cmd_id ? "Cmd" : "Evt"), cmd_event_id);
+				return 1;
+			}
+
+			base_index = i + 1;     /* index to first TLV attributes */
+			wmi_tlv_print_verbose
+				("%s: WMI TLV attributes for %s:0x%x tlv[%d]:0x%x\n",
+				__func__, (is_cmd_id ? "Cmd" : "Evt"),
+				cmd_event_id, curr_tlv_order,
+				pAttrArrayList[(base_index + curr_tlv_order)]);
+			tlv_attr_ptr->tag_order = curr_tlv_order;
+			tlv_attr_ptr->tag_id =
+				WMITLV_GET_TAGID(pAttrArrayList
+						 [(base_index + curr_tlv_order)]);
+			tlv_attr_ptr->tag_struct_size =
+				WMITLV_GET_TAG_STRUCT_SIZE(pAttrArrayList
+							   [(base_index +
+							     curr_tlv_order)]);
+			tlv_attr_ptr->tag_varied_size =
+				WMITLV_GET_TAG_VARIED(pAttrArrayList
+						      [(base_index +
+							curr_tlv_order)]);
+			tlv_attr_ptr->tag_array_size =
+				WMITLV_GET_TAG_ARRAY_SIZE(pAttrArrayList
+							  [(base_index +
+							    curr_tlv_order)]);
+			return 0;
+		}
+		i += num_tlvs;
+	}
+
+	wmi_tlv_print_error
+		("%s: ERROR: Didn't found WMI TLV attribute definitions for %s:0x%x\n",
+		__func__, (is_cmd_id ? "Cmd" : "Evt"), cmd_event_id);
+	return 1;
+}
+
+/**
+ * wmitlv_check_tlv_params() - tlv helper function
+ * @os_handle: os context handle
+ * @param_struc_ptr: pointer to tlv structure
+ * @is_cmd_id: boolean for command attribute
+ * @wmi_cmd_event_id: command event id
+ *
+ *
+ * Helper Function to vaidate the prepared TLV's for
+ * an WMI event/command to be sent.
+ *
+ * Return: 0 if success. Return < 0 if failure.
+ */
+static int
+wmitlv_check_tlv_params(void *os_handle, void *param_struc_ptr,
+			A_UINT32 param_buf_len, A_UINT32 is_cmd_id,
+			A_UINT32 wmi_cmd_event_id)
+{
+	wmitlv_attributes_struc attr_struct_ptr;
+	A_UINT32 buf_idx = 0;
+	A_UINT32 tlv_index = 0;
+	A_UINT8 *buf_ptr = (unsigned char *)param_struc_ptr;
+	A_UINT32 expected_num_tlvs, expected_tlv_len;
+	A_INT32 error = -1;
+
+	/* Get the number of TLVs for this command/event */
+	if (wmitlv_get_attributes
+		    (is_cmd_id, wmi_cmd_event_id, WMITLV_GET_ATTRIB_NUM_TLVS,
+		    &attr_struct_ptr) != 0) {
+		wmi_tlv_print_error
+			("%s: ERROR: Couldn't get expected number of TLVs for Cmd=%d\n",
+			__func__, wmi_cmd_event_id);
+		goto Error_wmitlv_check_tlv_params;
+	}
+
+	/* NOTE: the returned number of TLVs is in "attr_struct_ptr.cmd_num_tlv" */
+
+	expected_num_tlvs = attr_struct_ptr.cmd_num_tlv;
+
+	while ((buf_idx + WMI_TLV_HDR_SIZE) <= param_buf_len) {
+		A_UINT32 curr_tlv_tag =
+			WMITLV_GET_TLVTAG(WMITLV_GET_HDR(buf_ptr));
+		A_UINT32 curr_tlv_len =
+			WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr));
+
+		if ((buf_idx + WMI_TLV_HDR_SIZE + curr_tlv_len) > param_buf_len) {
+			wmi_tlv_print_error
+				("%s: ERROR: Invalid TLV length for Cmd=%d Tag_order=%d buf_idx=%d Tag:%d Len:%d TotalLen:%d\n",
+				__func__, wmi_cmd_event_id, tlv_index, buf_idx,
+				curr_tlv_tag, curr_tlv_len, param_buf_len);
+			goto Error_wmitlv_check_tlv_params;
+		}
+
+		/* Get the attributes of the TLV with the given order in "tlv_index" */
+		wmi_tlv_OS_MEMZERO(&attr_struct_ptr,
+				   sizeof(wmitlv_attributes_struc));
+		if (wmitlv_get_attributes
+			    (is_cmd_id, wmi_cmd_event_id, tlv_index,
+			    &attr_struct_ptr) != 0) {
+			wmi_tlv_print_error
+				("%s: ERROR: No TLV attributes found for Cmd=%d Tag_order=%d\n",
+				__func__, wmi_cmd_event_id, tlv_index);
+			goto Error_wmitlv_check_tlv_params;
+		}
+
+		/* Found the TLV that we wanted */
+		wmi_tlv_print_verbose("%s: [tlv %d]: tag=%d, len=%d\n",
+				      __func__, tlv_index, curr_tlv_tag,
+				      curr_tlv_len);
+
+		/* Validating Tag ID order */
+		if (curr_tlv_tag != attr_struct_ptr.tag_id) {
+			wmi_tlv_print_error
+				("%s: ERROR: TLV has wrong tag in order for Cmd=0x%x. Given=%d, Expected=%d.\n",
+				__func__, wmi_cmd_event_id, curr_tlv_tag,
+				attr_struct_ptr.tag_id);
+			goto Error_wmitlv_check_tlv_params;
+		}
+
+		/* Validate Tag length */
+		/* Array TLVs length checking needs special handling */
+		if ((curr_tlv_tag >= WMITLV_TAG_FIRST_ARRAY_ENUM)
+		    && (curr_tlv_tag <= WMITLV_TAG_LAST_ARRAY_ENUM)) {
+			if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) {
+				/* Array size can't be invalid for fixed size Array TLV */
+				if (WMITLV_ARR_SIZE_INVALID ==
+				    attr_struct_ptr.tag_array_size) {
+					wmi_tlv_print_error
+						("%s: ERROR: array_size can't be invalid for Array TLV Cmd=0x%x Tag=%d\n",
+						__func__, wmi_cmd_event_id,
+						curr_tlv_tag);
+					goto Error_wmitlv_check_tlv_params;
+				}
+
+				expected_tlv_len =
+					attr_struct_ptr.tag_array_size *
+					attr_struct_ptr.tag_struct_size;
+				/* Paddding is only required for Byte array Tlvs all other
+				 * array tlv's should be aligned to 4 bytes during their
+				 * definition */
+				if (WMITLV_TAG_ARRAY_BYTE ==
+				    attr_struct_ptr.tag_id) {
+					expected_tlv_len =
+						roundup(expected_tlv_len,
+							sizeof(A_UINT32));
+				}
+
+				if (curr_tlv_len != expected_tlv_len) {
+					wmi_tlv_print_error
+						("%s: ERROR: TLV has wrong length for Cmd=0x%x. Tag_order=%d  Tag=%d, Given_Len:%d Expected_Len=%d.\n",
+						__func__, wmi_cmd_event_id,
+						tlv_index, curr_tlv_tag,
+						curr_tlv_len, expected_tlv_len);
+					goto Error_wmitlv_check_tlv_params;
+				}
+			} else {
+				/* Array size should be invalid for variable size Array TLV */
+				if (WMITLV_ARR_SIZE_INVALID !=
+				    attr_struct_ptr.tag_array_size) {
+					wmi_tlv_print_error
+						("%s: ERROR: array_size should be invalid for Array TLV Cmd=0x%x Tag=%d\n",
+						__func__, wmi_cmd_event_id,
+						curr_tlv_tag);
+					goto Error_wmitlv_check_tlv_params;
+				}
+
+				/* Incase of variable length TLV's, there is no expectation
+				 * on the length field so do whatever checking you can
+				 * depending on the TLV tag if TLV length is non-zero */
+				if (curr_tlv_len != 0) {
+					/* Verify TLV length is aligned to the size of structure */
+					if ((curr_tlv_len %
+					     attr_struct_ptr.tag_struct_size) !=
+					    0) {
+						wmi_tlv_print_error
+							("%s: ERROR: TLV length %d for Cmd=0x%x is not aligned to size of structure(%d bytes)\n",
+							__func__, curr_tlv_len,
+							wmi_cmd_event_id,
+							attr_struct_ptr.
+							tag_struct_size);
+						goto Error_wmitlv_check_tlv_params;
+					}
+
+					if (curr_tlv_tag ==
+					    WMITLV_TAG_ARRAY_STRUC) {
+						A_UINT8 *tlv_buf_ptr = NULL;
+						A_UINT32 in_tlv_len;
+						A_UINT32 idx;
+						A_UINT32 num_of_elems;
+
+						/* Verify length of inner TLVs */
+
+						num_of_elems =
+							curr_tlv_len /
+							attr_struct_ptr.
+							tag_struct_size;
+						/* Set tlv_buf_ptr to the first inner TLV address */
+						tlv_buf_ptr =
+							buf_ptr + WMI_TLV_HDR_SIZE;
+						for (idx = 0;
+						     idx < num_of_elems;
+						     idx++) {
+							in_tlv_len =
+								WMITLV_GET_TLVLEN
+									(WMITLV_GET_HDR
+										(tlv_buf_ptr));
+							if ((in_tlv_len +
+							     WMI_TLV_HDR_SIZE)
+							    !=
+							    attr_struct_ptr.
+							    tag_struct_size) {
+								wmi_tlv_print_error
+									("%s: ERROR: TLV has wrong length for Cmd=0x%x. Tag_order=%d  Tag=%d, Given_Len:%zu Expected_Len=%d.\n",
+									__func__,
+									wmi_cmd_event_id,
+									tlv_index,
+									curr_tlv_tag,
+									(in_tlv_len
+									 +
+									 WMI_TLV_HDR_SIZE),
+									attr_struct_ptr.
+									tag_struct_size);
+								goto Error_wmitlv_check_tlv_params;
+							}
+							tlv_buf_ptr +=
+								in_tlv_len +
+								WMI_TLV_HDR_SIZE;
+						}
+					} else
+					if ((curr_tlv_tag ==
+					     WMITLV_TAG_ARRAY_UINT32)
+					    || (curr_tlv_tag ==
+						WMITLV_TAG_ARRAY_BYTE)
+					    || (curr_tlv_tag ==
+						WMITLV_TAG_ARRAY_FIXED_STRUC)) {
+						/* Nothing to verify here */
+					} else {
+						wmi_tlv_print_error
+							("%s ERROR Need to handle the Array tlv %d for variable length for Cmd=0x%x\n",
+							__func__,
+							attr_struct_ptr.tag_id,
+							wmi_cmd_event_id);
+						goto Error_wmitlv_check_tlv_params;
+					}
+				}
+			}
+		} else {
+			/* Non-array TLV. */
+
+			if ((curr_tlv_len + WMI_TLV_HDR_SIZE) !=
+			    attr_struct_ptr.tag_struct_size) {
+				wmi_tlv_print_error
+					("%s: ERROR: TLV has wrong length for Cmd=0x%x. Given=%zu, Expected=%d.\n",
+					__func__, wmi_cmd_event_id,
+					(curr_tlv_len + WMI_TLV_HDR_SIZE),
+					attr_struct_ptr.tag_struct_size);
+				goto Error_wmitlv_check_tlv_params;
+			}
+		}
+
+		/* Check TLV length is aligned to 4 bytes or not */
+		if ((curr_tlv_len % sizeof(A_UINT32)) != 0) {
+			wmi_tlv_print_error
+				("%s: ERROR: TLV length %d for Cmd=0x%x is not aligned to %zu bytes\n",
+				__func__, curr_tlv_len, wmi_cmd_event_id,
+				sizeof(A_UINT32));
+			goto Error_wmitlv_check_tlv_params;
+		}
+
+		tlv_index++;
+		buf_ptr += curr_tlv_len + WMI_TLV_HDR_SIZE;
+		buf_idx += curr_tlv_len + WMI_TLV_HDR_SIZE;
+	}
+
+	if (tlv_index != expected_num_tlvs) {
+		wmi_tlv_print_verbose
+			("%s: INFO: Less number of TLVs filled for Cmd=0x%x Filled %d Expected=%d\n",
+			__func__, wmi_cmd_event_id, tlv_index, expected_num_tlvs);
+	}
+
+	return 0;
+Error_wmitlv_check_tlv_params:
+	return error;
+}
+
+/**
+ * wmitlv_check_event_tlv_params() - tlv helper function
+ * @os_handle: os context handle
+ * @param_struc_ptr: pointer to tlv structure
+ * @is_cmd_id: boolean for command attribute
+ * @wmi_cmd_event_id: command event id
+ *
+ *
+ * Helper Function to vaidate the prepared TLV's for
+ * an WMI event/command to be sent.
+ *
+ * Return: 0 if success. Return < 0 if failure.
+ */
+int
+wmitlv_check_event_tlv_params(void *os_handle, void *param_struc_ptr,
+			      A_UINT32 param_buf_len, A_UINT32 wmi_cmd_event_id)
+{
+	A_UINT32 is_cmd_id = 0;
+
+	return wmitlv_check_tlv_params
+			(os_handle, param_struc_ptr, param_buf_len, is_cmd_id,
+			wmi_cmd_event_id);
+}
+
+/**
+ * wmitlv_check_command_tlv_params() - tlv helper function
+ * @os_handle: os context handle
+ * @param_struc_ptr: pointer to tlv structure
+ * @is_cmd_id: boolean for command attribute
+ * @wmi_cmd_event_id: command event id
+ *
+ *
+ * Helper Function to vaidate the prepared TLV's for
+ * an WMI event/command to be sent.
+ *
+ * Return: 0 if success. Return < 0 if failure.
+ */
+int
+wmitlv_check_command_tlv_params(void *os_handle, void *param_struc_ptr,
+				A_UINT32 param_buf_len,
+				A_UINT32 wmi_cmd_event_id)
+{
+	A_UINT32 is_cmd_id = 1;
+
+	return wmitlv_check_tlv_params
+			(os_handle, param_struc_ptr, param_buf_len, is_cmd_id,
+			wmi_cmd_event_id);
+}
+
+/**
+ * wmitlv_check_and_pad_tlvs() - tlv helper function
+ * @os_handle: os context handle
+ * @param_buf_len: length of tlv parameter
+ * @param_struc_ptr: pointer to tlv structure
+ * @is_cmd_id: boolean for command attribute
+ * @wmi_cmd_event_id: command event id
+ * @wmi_cmd_struct_ptr: wmi command structure
+ *
+ *
+ * vaidate the TLV's coming for an event/command and
+ * also pads data to TLV's if necessary
+ *
+ * Return: 0 if success. Return < 0 if failure.
+ */
+static int
+wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr,
+			  A_UINT32 param_buf_len, A_UINT32 is_cmd_id,
+			  A_UINT32 wmi_cmd_event_id, void **wmi_cmd_struct_ptr)
+{
+	wmitlv_attributes_struc attr_struct_ptr;
+	A_UINT32 buf_idx = 0;
+	A_UINT32 tlv_index = 0;
+	A_UINT32 num_of_elems = 0;
+	int tlv_size_diff = 0;
+	A_UINT8 *buf_ptr = (unsigned char *)param_struc_ptr;
+	wmitlv_cmd_param_info *cmd_param_tlvs_ptr = NULL;
+	A_UINT32 remaining_expected_tlvs = 0xFFFFFFFF;
+	A_UINT32 len_wmi_cmd_struct_buf;
+	A_INT32 error = -1;
+
+	/* Get the number of TLVs for this command/event */
+	if (wmitlv_get_attributes
+		    (is_cmd_id, wmi_cmd_event_id, WMITLV_GET_ATTRIB_NUM_TLVS,
+		    &attr_struct_ptr) != 0) {
+		wmi_tlv_print_error
+			("%s: ERROR: Couldn't get expected number of TLVs for Cmd=%d\n",
+			__func__, wmi_cmd_event_id);
+		return error;
+	}
+	/* NOTE: the returned number of TLVs is in "attr_struct_ptr.cmd_num_tlv" */
+
+	/* Create base structure of format wmi_cmd_event_id##_param_tlvs */
+	len_wmi_cmd_struct_buf =
+		attr_struct_ptr.cmd_num_tlv * sizeof(wmitlv_cmd_param_info);
+#ifndef NO_DYNAMIC_MEM_ALLOC
+	/* Dynamic memory allocation supported */
+	wmi_tlv_os_mem_alloc(os_handle, *wmi_cmd_struct_ptr,
+			     len_wmi_cmd_struct_buf);
+#else
+	/* Dynamic memory allocation is not supported. Use the buffer
+	 * g_wmi_static_cmd_param_info_buf, which should be set using
+	 * wmi_tlv_set_static_param_tlv_buf(),
+	 * for base structure of format wmi_cmd_event_id##_param_tlvs */
+	*wmi_cmd_struct_ptr = g_wmi_static_cmd_param_info_buf;
+	if (attr_struct_ptr.cmd_num_tlv > g_wmi_static_max_cmd_param_tlvs) {
+		/* Error: Expecting more TLVs that accomodated for static structure  */
+		wmi_tlv_print_error
+			("%s: Error: Expecting more TLVs that accomodated for static structure. Expected:%d Accomodated:%d\n",
+			__func__, attr_struct_ptr.cmd_num_tlv,
+			g_wmi_static_max_cmd_param_tlvs);
+		return error;
+	}
+#endif
+	if (*wmi_cmd_struct_ptr == NULL) {
+		/* Error: unable to alloc memory */
+		wmi_tlv_print_error
+			("%s: Error: unable to alloc memory (size=%d) for TLV\n",
+			__func__, len_wmi_cmd_struct_buf);
+		return error;
+	}
+
+	cmd_param_tlvs_ptr = (wmitlv_cmd_param_info *) *wmi_cmd_struct_ptr;
+	wmi_tlv_OS_MEMZERO(cmd_param_tlvs_ptr, len_wmi_cmd_struct_buf);
+	remaining_expected_tlvs = attr_struct_ptr.cmd_num_tlv;
+
+	while (((buf_idx + WMI_TLV_HDR_SIZE) <= param_buf_len)
+	       && (remaining_expected_tlvs)) {
+		A_UINT32 curr_tlv_tag =
+			WMITLV_GET_TLVTAG(WMITLV_GET_HDR(buf_ptr));
+		A_UINT32 curr_tlv_len =
+			WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr));
+		int num_padding_bytes = 0;
+
+		/* Get the attributes of the TLV with the given order in "tlv_index" */
+		wmi_tlv_OS_MEMZERO(&attr_struct_ptr,
+				   sizeof(wmitlv_attributes_struc));
+		if (wmitlv_get_attributes
+			    (is_cmd_id, wmi_cmd_event_id, tlv_index,
+			    &attr_struct_ptr) != 0) {
+			wmi_tlv_print_error
+				("%s: ERROR: No TLV attributes found for Cmd=%d Tag_order=%d\n",
+				__func__, wmi_cmd_event_id, tlv_index);
+			goto Error_wmitlv_check_and_pad_tlvs;
+		}
+
+		/* Found the TLV that we wanted */
+		wmi_tlv_print_verbose("%s: [tlv %d]: tag=%d, len=%d\n",
+				      __func__, tlv_index, curr_tlv_tag,
+				      curr_tlv_len);
+
+		/* Validating Tag order */
+		if (curr_tlv_tag != attr_struct_ptr.tag_id) {
+			wmi_tlv_print_error
+				("%s: ERROR: TLV has wrong tag in order for Cmd=0x%x. Given=%d, Expected=%d.\n",
+				__func__, wmi_cmd_event_id, curr_tlv_tag,
+				attr_struct_ptr.tag_id);
+			goto Error_wmitlv_check_and_pad_tlvs;
+		}
+
+		if ((curr_tlv_tag >= WMITLV_TAG_FIRST_ARRAY_ENUM)
+		    && (curr_tlv_tag <= WMITLV_TAG_LAST_ARRAY_ENUM)) {
+			/* Current Tag is an array of some kind. */
+			/* Skip the TLV header of this array */
+			buf_ptr += WMI_TLV_HDR_SIZE;
+			buf_idx += WMI_TLV_HDR_SIZE;
+		} else {
+			/* Non-array TLV. */
+			curr_tlv_len += WMI_TLV_HDR_SIZE;
+		}
+
+		if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) {
+			/* This TLV is fixed length */
+			if (WMITLV_ARR_SIZE_INVALID ==
+			    attr_struct_ptr.tag_array_size) {
+				tlv_size_diff =
+					curr_tlv_len -
+					attr_struct_ptr.tag_struct_size;
+				num_of_elems =
+					(curr_tlv_len > WMI_TLV_HDR_SIZE) ? 1 : 0;
+			} else {
+				tlv_size_diff =
+					curr_tlv_len -
+					(attr_struct_ptr.tag_struct_size *
+					 attr_struct_ptr.tag_array_size);
+				num_of_elems = attr_struct_ptr.tag_array_size;
+			}
+		} else {
+			/* This TLV has a variable number of elements */
+			if (WMITLV_TAG_ARRAY_STRUC == attr_struct_ptr.tag_id) {
+				A_UINT32 in_tlv_len = 0;
+
+				if (curr_tlv_len != 0) {
+					in_tlv_len =
+						WMITLV_GET_TLVLEN(WMITLV_GET_HDR
+									  (buf_ptr));
+					in_tlv_len += WMI_TLV_HDR_SIZE;
+					tlv_size_diff =
+						in_tlv_len -
+						attr_struct_ptr.tag_struct_size;
+					num_of_elems =
+						curr_tlv_len / in_tlv_len;
+					wmi_tlv_print_verbose
+						("%s: WARN: TLV array of structures in_tlv_len=%d struct_size:%d diff:%d num_of_elems=%d \n",
+						__func__, in_tlv_len,
+						attr_struct_ptr.tag_struct_size,
+						tlv_size_diff, num_of_elems);
+				} else {
+					tlv_size_diff = 0;
+					num_of_elems = 0;
+				}
+			} else
+			if ((WMITLV_TAG_ARRAY_UINT32 ==
+			     attr_struct_ptr.tag_id)
+			    || (WMITLV_TAG_ARRAY_BYTE ==
+				attr_struct_ptr.tag_id)
+			    || (WMITLV_TAG_ARRAY_FIXED_STRUC ==
+				attr_struct_ptr.tag_id)) {
+				tlv_size_diff = 0;
+				num_of_elems =
+					curr_tlv_len /
+					attr_struct_ptr.tag_struct_size;
+			} else {
+				wmi_tlv_print_error
+					("%s ERROR Need to handle this tag ID for variable length %d\n",
+					__func__, attr_struct_ptr.tag_id);
+				goto Error_wmitlv_check_and_pad_tlvs;
+			}
+		}
+
+		if ((WMITLV_TAG_ARRAY_STRUC == attr_struct_ptr.tag_id) &&
+		    (tlv_size_diff != 0)) {
+			void *new_tlv_buf = NULL;
+			A_UINT8 *tlv_buf_ptr = NULL;
+			A_UINT32 in_tlv_len;
+			A_UINT32 i;
+
+			if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) {
+				/* This is not allowed. The tag WMITLV_TAG_ARRAY_STRUC can
+				 * only be used with variable-length structure array
+				 * should not have a fixed number of elements (contradicting).
+				 * Use WMITLV_TAG_ARRAY_FIXED_STRUC tag for fixed size
+				 * structure array(where structure never change without
+				 * breaking compatibility) */
+				wmi_tlv_print_error
+					("%s: ERROR: TLV (tag=%d) should be variable-length and not fixed length\n",
+					__func__, curr_tlv_tag);
+				goto Error_wmitlv_check_and_pad_tlvs;
+			}
+
+			/* Warning: Needs to allocate a larger structure and pad with zeros */
+			wmi_tlv_print_error
+				("%s: WARN: TLV array of structures needs padding. tlv_size_diff=%d\n",
+				__func__, tlv_size_diff);
+
+			/* incoming structure length */
+			in_tlv_len =
+				WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)) +
+				WMI_TLV_HDR_SIZE;
+#ifndef NO_DYNAMIC_MEM_ALLOC
+			wmi_tlv_os_mem_alloc(os_handle, new_tlv_buf,
+					     (num_of_elems *
+					      attr_struct_ptr.tag_struct_size));
+			if (new_tlv_buf == NULL) {
+				/* Error: unable to alloc memory */
+				wmi_tlv_print_error
+					("%s: Error: unable to alloc memory (size=%d) for padding the TLV array %d\n",
+					__func__,
+					(num_of_elems *
+					 attr_struct_ptr.tag_struct_size),
+					curr_tlv_tag);
+				goto Error_wmitlv_check_and_pad_tlvs;
+			}
+
+			wmi_tlv_OS_MEMZERO(new_tlv_buf,
+					   (num_of_elems *
+					    attr_struct_ptr.tag_struct_size));
+			tlv_buf_ptr = (A_UINT8 *) new_tlv_buf;
+			for (i = 0; i < num_of_elems; i++) {
+				if (tlv_size_diff > 0) {
+					/* Incoming structure size is greater than expected
+					 * structure size. so copy the number of bytes equal
+					 * to expected structure size */
+					wmi_tlv_OS_MEMCPY(tlv_buf_ptr,
+							  (void *)(buf_ptr +
+								   i *
+								   in_tlv_len),
+							  attr_struct_ptr.
+							  tag_struct_size);
+				} else {
+					/* Incoming structure size is smaller than expected
+					 * structure size. so copy the number of bytes equal
+					 * to incoming structure size */
+					wmi_tlv_OS_MEMCPY(tlv_buf_ptr,
+							  (void *)(buf_ptr +
+								   i *
+								   in_tlv_len),
+							  in_tlv_len);
+				}
+				tlv_buf_ptr += attr_struct_ptr.tag_struct_size;
+			}
+#else
+			{
+				A_UINT8 *src_addr;
+				A_UINT8 *dst_addr;
+				A_UINT32 buf_mov_len;
+
+				if (tlv_size_diff < 0) {
+					/* Incoming structure size is smaller than expected size
+					 * then this needs padding for each element in the array */
+
+					/* Find amount of bytes to be padded for one element */
+					num_padding_bytes = tlv_size_diff * -1;
+
+					/* Move subsequent TLVs by number of bytes to be padded
+					 * for all elements */
+					if (param_buf_len >
+					    (buf_idx + curr_tlv_len)) {
+						src_addr =
+							buf_ptr + curr_tlv_len;
+						dst_addr =
+							buf_ptr + curr_tlv_len +
+							(num_padding_bytes *
+							 num_of_elems);
+						buf_mov_len =
+							param_buf_len - (buf_idx +
+									 curr_tlv_len);
+
+						wmi_tlv_OS_MEMMOVE(dst_addr,
+								   src_addr,
+								   buf_mov_len);
+					}
+
+					/* Move subsequent elements of array down by number of
+					 * bytes to be padded for one element and alse set
+					 * padding bytes to zero */
+					tlv_buf_ptr = buf_ptr;
+					for (i = 0; i < num_of_elems; i++) {
+						src_addr =
+							tlv_buf_ptr + in_tlv_len;
+						if (i != (num_of_elems - 1)) {
+							/* Need not move anything for last element
+							 * in the array */
+							dst_addr =
+								tlv_buf_ptr +
+								in_tlv_len +
+								num_padding_bytes;
+							buf_mov_len =
+								curr_tlv_len -
+								((i +
+								  1) * in_tlv_len);
+
+							wmi_tlv_OS_MEMMOVE
+								(dst_addr, src_addr,
+								buf_mov_len);
+						}
+
+						/* Set the padding bytes to zeroes */
+						wmi_tlv_OS_MEMZERO(src_addr,
+								   num_padding_bytes);
+
+						tlv_buf_ptr +=
+							attr_struct_ptr.
+							tag_struct_size;
+					}
+
+					/* Update the number of padding bytes to total number
+					 * of bytes padded for all elements in the array */
+					num_padding_bytes =
+						num_padding_bytes * num_of_elems;
+
+					new_tlv_buf = buf_ptr;
+				} else {
+					/* Incoming structure size is greater than expected size
+					 * then this needs shrinking for each element in the array */
+
+					/* Find amount of bytes to be shrinked for one element */
+					num_padding_bytes = tlv_size_diff * -1;
+
+					/* Move subsequent elements of array up by number of bytes
+					 * to be shrinked for one element */
+					tlv_buf_ptr = buf_ptr;
+					for (i = 0; i < (num_of_elems - 1); i++) {
+						src_addr =
+							tlv_buf_ptr + in_tlv_len;
+						dst_addr =
+							tlv_buf_ptr + in_tlv_len +
+							num_padding_bytes;
+						buf_mov_len =
+							curr_tlv_len -
+							((i + 1) * in_tlv_len);
+
+						wmi_tlv_OS_MEMMOVE(dst_addr,
+								   src_addr,
+								   buf_mov_len);
+
+						tlv_buf_ptr +=
+							attr_struct_ptr.
+							tag_struct_size;
+					}
+
+					/* Move subsequent TLVs by number of bytes to be shrinked
+					 * for all elements */
+					if (param_buf_len >
+					    (buf_idx + curr_tlv_len)) {
+						src_addr =
+							buf_ptr + curr_tlv_len;
+						dst_addr =
+							buf_ptr + curr_tlv_len +
+							(num_padding_bytes *
+							 num_of_elems);
+						buf_mov_len =
+							param_buf_len - (buf_idx +
+									 curr_tlv_len);
+
+						wmi_tlv_OS_MEMMOVE(dst_addr,
+								   src_addr,
+								   buf_mov_len);
+					}
+
+					/* Update the number of padding bytes to total number of
+					 * bytes shrinked for all elements in the array */
+					num_padding_bytes =
+						num_padding_bytes * num_of_elems;
+
+					new_tlv_buf = buf_ptr;
+				}
+			}
+#endif
+			cmd_param_tlvs_ptr[tlv_index].tlv_ptr = new_tlv_buf;
+			cmd_param_tlvs_ptr[tlv_index].num_elements =
+				num_of_elems;
+			cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 1;     /* Indicates that buffer is allocated */
+
+		} else if (tlv_size_diff >= 0) {
+			/* Warning: some parameter truncation */
+			if (tlv_size_diff > 0) {
+				wmi_tlv_print_verbose
+					("%s: WARN: TLV truncated. tlv_size_diff=%d, curr_tlv_len=%d\n",
+					__func__, tlv_size_diff, curr_tlv_len);
+			}
+			/* TODO: this next line needs more comments and explanation */
+			cmd_param_tlvs_ptr[tlv_index].tlv_ptr =
+				(attr_struct_ptr.tag_varied_size
+				 && !curr_tlv_len) ? NULL : (void *)buf_ptr;
+			cmd_param_tlvs_ptr[tlv_index].num_elements =
+				num_of_elems;
+			cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 0;     /* Indicates that buffer is not allocated */
+		} else {
+			void *new_tlv_buf = NULL;
+
+			/* Warning: Needs to allocate a larger structure and pad with zeros */
+			wmi_tlv_print_verbose
+				("%s: WARN: TLV needs padding. tlv_size_diff=%d\n",
+				__func__, tlv_size_diff);
+#ifndef NO_DYNAMIC_MEM_ALLOC
+			/* Dynamic memory allocation is supported */
+			wmi_tlv_os_mem_alloc(os_handle, new_tlv_buf,
+					     (curr_tlv_len - tlv_size_diff));
+			if (new_tlv_buf == NULL) {
+				/* Error: unable to alloc memory */
+				wmi_tlv_print_error
+					("%s: Error: unable to alloc memory (size=%d) for padding the TLV %d\n",
+					__func__, (curr_tlv_len - tlv_size_diff),
+					curr_tlv_tag);
+				goto Error_wmitlv_check_and_pad_tlvs;
+			}
+
+			wmi_tlv_OS_MEMZERO(new_tlv_buf,
+					   (curr_tlv_len - tlv_size_diff));
+			wmi_tlv_OS_MEMCPY(new_tlv_buf, (void *)buf_ptr,
+					  curr_tlv_len);
+#else
+			/* Dynamic memory allocation is not supported. Padding has
+			 * to be done with in the existing buffer assuming we have
+			 * enough space to grow */
+			{
+				/* Note: tlv_size_diff is a value less than zero */
+				/* Move the Subsequent TLVs by amount of bytes needs to be padded */
+				A_UINT8 *src_addr;
+				A_UINT8 *dst_addr;
+				A_UINT32 src_len;
+
+				num_padding_bytes = (tlv_size_diff * -1);
+
+				src_addr = buf_ptr + curr_tlv_len;
+				dst_addr =
+					buf_ptr + curr_tlv_len + num_padding_bytes;
+				src_len =
+					param_buf_len - (buf_idx + curr_tlv_len);
+
+				wmi_tlv_OS_MEMMOVE(dst_addr, src_addr, src_len);
+
+				/* Set the padding bytes to zeroes */
+				wmi_tlv_OS_MEMZERO(src_addr, num_padding_bytes);
+
+				new_tlv_buf = buf_ptr;
+			}
+#endif
+			cmd_param_tlvs_ptr[tlv_index].tlv_ptr = new_tlv_buf;
+			cmd_param_tlvs_ptr[tlv_index].num_elements =
+				num_of_elems;
+			cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 1;     /* Indicates that buffer is allocated */
+		}
+
+		tlv_index++;
+		remaining_expected_tlvs--;
+		buf_ptr += curr_tlv_len + num_padding_bytes;
+		buf_idx += curr_tlv_len + num_padding_bytes;
+	}
+
+	return 0;
+Error_wmitlv_check_and_pad_tlvs:
+	if (is_cmd_id) {
+		wmitlv_free_allocated_command_tlvs(wmi_cmd_event_id,
+						   wmi_cmd_struct_ptr);
+	} else {
+		wmitlv_free_allocated_event_tlvs(wmi_cmd_event_id,
+						 wmi_cmd_struct_ptr);
+	}
+	*wmi_cmd_struct_ptr = NULL;
+	return error;
+}
+
+/**
+ * wmitlv_check_and_pad_event_tlvs() - tlv helper function
+ * @os_handle: os context handle
+ * @param_struc_ptr: pointer to tlv structure
+ * @param_buf_len: length of tlv parameter
+ * @wmi_cmd_event_id: command event id
+ * @wmi_cmd_struct_ptr: wmi command structure
+ *
+ *
+ * validate and pad(if necessary) for incoming WMI Event TLVs
+ *
+ * Return: 0 if success. Return < 0 if failure.
+ */
+int
+wmitlv_check_and_pad_event_tlvs(void *os_handle, void *param_struc_ptr,
+				A_UINT32 param_buf_len,
+				A_UINT32 wmi_cmd_event_id,
+				void **wmi_cmd_struct_ptr)
+{
+	A_UINT32 is_cmd_id = 0;
+	return wmitlv_check_and_pad_tlvs
+			(os_handle, param_struc_ptr, param_buf_len, is_cmd_id,
+			wmi_cmd_event_id, wmi_cmd_struct_ptr);
+}
+
+/**
+ * wmitlv_check_and_pad_command_tlvs() - tlv helper function
+ * @os_handle: os context handle
+ * @param_struc_ptr: pointer to tlv structure
+ * @param_buf_len: length of tlv parameter
+ * @wmi_cmd_event_id: command event id
+ * @wmi_cmd_struct_ptr: wmi command structure
+ *
+ *
+ * validate and pad(if necessary) for incoming WMI Command TLVs
+ *
+ * Return: 0 if success. Return < 0 if failure.
+ */
+int
+wmitlv_check_and_pad_command_tlvs(void *os_handle, void *param_struc_ptr,
+				  A_UINT32 param_buf_len,
+				  A_UINT32 wmi_cmd_event_id,
+				  void **wmi_cmd_struct_ptr)
+{
+	A_UINT32 is_cmd_id = 1;
+	return wmitlv_check_and_pad_tlvs
+			(os_handle, param_struc_ptr, param_buf_len, is_cmd_id,
+			wmi_cmd_event_id, wmi_cmd_struct_ptr);
+}
+
+/**
+ * wmitlv_free_allocated_tlvs() - tlv helper function
+ * @is_cmd_id: bollean to check if cmd or event tlv
+ * @cmd_event_id: command or event id
+ * @wmi_cmd_struct_ptr: wmi command structure
+ *
+ *
+ * free any allocated buffers for WMI Event/Command TLV processing
+ *
+ * Return: none
+ */
+static void wmitlv_free_allocated_tlvs(A_UINT32 is_cmd_id,
+				       A_UINT32 cmd_event_id,
+				       void **wmi_cmd_struct_ptr)
+{
+	void *ptr = *wmi_cmd_struct_ptr;
+
+	if (!ptr) {
+		wmi_tlv_print_error("%s: Nothing to free for CMD/Event 0x%x\n",
+				    __func__, cmd_event_id);
+		return;
+	}
+#ifndef NO_DYNAMIC_MEM_ALLOC
+
+/* macro to free that previously allocated memory for this TLV. When (op==FREE_TLV_ELEM). */
+#define WMITLV_OP_FREE_TLV_ELEM_macro(param_ptr, param_len, wmi_cmd_event_id, elem_tlv_tag, elem_struc_type, elem_name, var_len, arr_size)  \
+	if ((((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->WMITLV_FIELD_BUF_IS_ALLOCATED(elem_name)) &&	\
+	    (((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->elem_name != NULL)) \
+	{ \
+		wmi_tlv_os_mem_free(((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->elem_name); \
+	}
+
+#define WMITLV_FREE_TLV_ELEMS(id)	     \
+case id: \
+{ \
+	WMITLV_TABLE(id, FREE_TLV_ELEM, NULL, 0)     \
+} \
+break;
+
+	if (is_cmd_id) {
+		switch (cmd_event_id) {
+			WMITLV_ALL_CMD_LIST(WMITLV_FREE_TLV_ELEMS);
+		default:
+			wmi_tlv_print_error
+				("%s: ERROR: Cannot find the TLVs attributes for Cmd=0x%x, %d\n",
+				__func__, cmd_event_id, cmd_event_id);
+		}
+	} else {
+		switch (cmd_event_id) {
+			WMITLV_ALL_EVT_LIST(WMITLV_FREE_TLV_ELEMS);
+		default:
+			wmi_tlv_print_error
+				("%s: ERROR: Cannot find the TLVs attributes for Cmd=0x%x, %d\n",
+				__func__, cmd_event_id, cmd_event_id);
+		}
+	}
+
+	wmi_tlv_os_mem_free(*wmi_cmd_struct_ptr);
+	*wmi_cmd_struct_ptr = NULL;
+#endif
+
+	return;
+}
+
+/**
+ * wmitlv_free_allocated_command_tlvs() - tlv helper function
+ * @cmd_event_id: command or event id
+ * @wmi_cmd_struct_ptr: wmi command structure
+ *
+ *
+ * free any allocated buffers for WMI Event/Command TLV processing
+ *
+ * Return: none
+ */
+void wmitlv_free_allocated_command_tlvs(A_UINT32 cmd_event_id,
+					void **wmi_cmd_struct_ptr)
+{
+	wmitlv_free_allocated_tlvs(1, cmd_event_id, wmi_cmd_struct_ptr);
+}
+
+/**
+ * wmitlv_free_allocated_event_tlvs() - tlv helper function
+ * @cmd_event_id: command or event id
+ * @wmi_cmd_struct_ptr: wmi command structure
+ *
+ *
+ * free any allocated buffers for WMI Event/Command TLV processing
+ *
+ * Return: none
+ */
+void wmitlv_free_allocated_event_tlvs(A_UINT32 cmd_event_id,
+				      void **wmi_cmd_struct_ptr)
+{
+	wmitlv_free_allocated_tlvs(0, cmd_event_id, wmi_cmd_struct_ptr);
+}
+
+/**
+ * wmi_versions_are_compatible() - tlv helper function
+ * @vers1: host wmi version
+ * @vers2: target wmi version
+ *
+ *
+ * check if two given wmi versions are compatible
+ *
+ * Return: none
+ */
+int
+wmi_versions_are_compatible(wmi_abi_version *vers1, wmi_abi_version *vers2)
+{
+	if ((vers1->abi_version_ns_0 != vers2->abi_version_ns_0) ||
+	    (vers1->abi_version_ns_1 != vers2->abi_version_ns_1) ||
+	    (vers1->abi_version_ns_2 != vers2->abi_version_ns_2) ||
+	    (vers1->abi_version_ns_3 != vers2->abi_version_ns_3)) {
+		/* The namespaces are different. Incompatible. */
+		return 0;
+	}
+
+	if (vers1->abi_version_0 != vers2->abi_version_0) {
+		/* The major or minor versions are different. Incompatible */
+		return 0;
+	}
+	/* We ignore the build version */
+	return 1;
+}
+
+/**
+ * wmi_versions_can_downgrade() - tlv helper function
+ * @version_whitelist_table: version table
+ * @my_vers: host version
+ * @opp_vers: target version
+ * @out_vers: downgraded version
+ *
+ *
+ * check if target wmi version can be downgraded
+ *
+ * Return: 0 if success. Return < 0 if failure.
+ */
+int
+wmi_versions_can_downgrade(int num_whitelist,
+			   wmi_whitelist_version_info *version_whitelist_table,
+			   wmi_abi_version *my_vers,
+			   wmi_abi_version *opp_vers,
+			   wmi_abi_version *out_vers)
+{
+	A_UINT8 can_try_to_downgrade;
+	A_UINT32 my_major_vers = WMI_VER_GET_MAJOR(my_vers->abi_version_0);
+	A_UINT32 my_minor_vers = WMI_VER_GET_MINOR(my_vers->abi_version_0);
+	A_UINT32 opp_major_vers = WMI_VER_GET_MAJOR(opp_vers->abi_version_0);
+	A_UINT32 opp_minor_vers = WMI_VER_GET_MINOR(opp_vers->abi_version_0);
+	A_UINT32 downgraded_minor_vers;
+
+	if ((my_vers->abi_version_ns_0 != opp_vers->abi_version_ns_0) ||
+	    (my_vers->abi_version_ns_1 != opp_vers->abi_version_ns_1) ||
+	    (my_vers->abi_version_ns_2 != opp_vers->abi_version_ns_2) ||
+	    (my_vers->abi_version_ns_3 != opp_vers->abi_version_ns_3)) {
+		/* The namespaces are different. Incompatible. */
+		can_try_to_downgrade = false;
+	} else if (my_major_vers != opp_major_vers) {
+		/* Major version is different. Incompatible and cannot downgrade. */
+		can_try_to_downgrade = false;
+	} else {
+		/* Same major version. */
+
+		if (my_minor_vers < opp_minor_vers) {
+			/* Opposite party is newer. Incompatible and cannot downgrade. */
+			can_try_to_downgrade = false;
+		} else if (my_minor_vers > opp_minor_vers) {
+			/* Opposite party is older. Check whitelist if we can downgrade */
+			can_try_to_downgrade = true;
+		} else {
+			/* Same version */
+			wmi_tlv_OS_MEMCPY(out_vers, my_vers,
+					  sizeof(wmi_abi_version));
+			return 1;
+		}
+	}
+
+	if (!can_try_to_downgrade) {
+		wmi_tlv_print_error("%s: Warning: incompatible WMI version.\n",
+				    __func__);
+		wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version));
+		return 0;
+	}
+	/* Try to see we can downgrade the supported version */
+	downgraded_minor_vers = my_minor_vers;
+	while (downgraded_minor_vers > opp_minor_vers) {
+		A_UINT8 downgraded = false;
+		int i;
+
+		for (i = 0; i < num_whitelist; i++) {
+			if (version_whitelist_table[i].major != my_major_vers) {
+				continue;       /* skip */
+			}
+			if ((version_whitelist_table[i].namespace_0 !=
+			     my_vers->abi_version_ns_0)
+			    || (version_whitelist_table[i].namespace_1 !=
+				my_vers->abi_version_ns_1)
+			    || (version_whitelist_table[i].namespace_2 !=
+				my_vers->abi_version_ns_2)
+			    || (version_whitelist_table[i].namespace_3 !=
+				my_vers->abi_version_ns_3)) {
+				continue;       /* skip */
+			}
+			if (version_whitelist_table[i].minor ==
+			    downgraded_minor_vers) {
+				/* Found the next version that I can downgrade */
+				wmi_tlv_print_error
+					("%s: Note: found a whitelist entry to downgrade. wh. list ver: %d,%d,0x%x 0x%x 0x%x 0x%x\n",
+					__func__, version_whitelist_table[i].major,
+					version_whitelist_table[i].minor,
+					version_whitelist_table[i].namespace_0,
+					version_whitelist_table[i].namespace_1,
+					version_whitelist_table[i].namespace_2,
+					version_whitelist_table[i].namespace_3);
+				downgraded_minor_vers--;
+				downgraded = true;
+				break;
+			}
+		}
+		if (!downgraded) {
+			break;  /* Done since we did not find any whitelist to downgrade version */
+		}
+	}
+	wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version));
+	out_vers->abi_version_0 =
+		WMI_VER_GET_VERSION_0(my_major_vers, downgraded_minor_vers);
+	if (downgraded_minor_vers != opp_minor_vers) {
+		wmi_tlv_print_error
+			("%s: Warning: incompatible WMI version and cannot downgrade.\n",
+			__func__);
+		return 0;       /* Incompatible */
+	} else {
+		return 1;       /* Compatible */
+	}
+}
+
+/**
+ * wmi_cmp_and_set_abi_version() - tlv helper function
+ * @version_whitelist_table: version table
+ * @my_vers: host version
+ * @opp_vers: target version
+ * @out_vers: downgraded version
+ *
+ * This routine will compare and set the WMI ABI version.
+ * First, compare my version with the opposite side's version.
+ * If incompatible, then check the whitelist to see if our side can downgrade.
+ * Finally, fill in the final ABI version into the output, out_vers.
+ * Return 0 if the output version is compatible
+ * Else return 1 if the output version is incompatible
+ *
+ * Return: 0 if the output version is compatible else < 0.
+ */
+int
+wmi_cmp_and_set_abi_version(int num_whitelist,
+			    wmi_whitelist_version_info *
+			    version_whitelist_table,
+			    struct _wmi_abi_version *my_vers,
+			    struct _wmi_abi_version *opp_vers,
+			    struct _wmi_abi_version *out_vers)
+{
+	wmi_tlv_print_verbose
+		("%s: Our WMI Version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n",
+		__func__, WMI_VER_GET_MAJOR(my_vers->abi_version_0),
+		WMI_VER_GET_MINOR(my_vers->abi_version_0), my_vers->abi_version_1,
+		my_vers->abi_version_ns_0, my_vers->abi_version_ns_1,
+		my_vers->abi_version_ns_2, my_vers->abi_version_ns_3);
+
+	wmi_tlv_print_verbose
+		("%s: Opposite side WMI Version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n",
+		__func__, WMI_VER_GET_MAJOR(opp_vers->abi_version_0),
+		WMI_VER_GET_MINOR(opp_vers->abi_version_0),
+		opp_vers->abi_version_1, opp_vers->abi_version_ns_0,
+		opp_vers->abi_version_ns_1, opp_vers->abi_version_ns_2,
+		opp_vers->abi_version_ns_3);
+
+	/* By default, the output version is our version. */
+	wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version));
+	if (!wmi_versions_are_compatible(my_vers, opp_vers)) {
+		/* Our host version and the given firmware version are incompatible. */
+		if (wmi_versions_can_downgrade
+			    (num_whitelist, version_whitelist_table, my_vers, opp_vers,
+			    out_vers)) {
+			/* We can downgrade our host versions to match firmware. */
+			wmi_tlv_print_error
+				("%s: Host downgraded WMI Versions to match fw. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n",
+				__func__,
+				WMI_VER_GET_MAJOR(out_vers->abi_version_0),
+				WMI_VER_GET_MINOR(out_vers->abi_version_0),
+				out_vers->abi_version_1,
+				out_vers->abi_version_ns_0,
+				out_vers->abi_version_ns_1,
+				out_vers->abi_version_ns_2,
+				out_vers->abi_version_ns_3);
+			return 0;       /* Compatible */
+		} else {
+			/* Warn: We cannot downgrade our host versions to match firmware. */
+			wmi_tlv_print_error
+				("%s: WARN: Host WMI Versions mismatch with fw. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n",
+				__func__,
+				WMI_VER_GET_MAJOR(out_vers->abi_version_0),
+				WMI_VER_GET_MINOR(out_vers->abi_version_0),
+				out_vers->abi_version_1,
+				out_vers->abi_version_ns_0,
+				out_vers->abi_version_ns_1,
+				out_vers->abi_version_ns_2,
+				out_vers->abi_version_ns_3);
+
+			return 1;       /* Incompatible */
+		}
+	} else {
+		/* We are compatible. Our host version is the output version */
+		wmi_tlv_print_verbose
+			("%s: Host and FW Compatible WMI Versions. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n",
+			__func__, WMI_VER_GET_MAJOR(out_vers->abi_version_0),
+			WMI_VER_GET_MINOR(out_vers->abi_version_0),
+			out_vers->abi_version_1, out_vers->abi_version_ns_0,
+			out_vers->abi_version_ns_1, out_vers->abi_version_ns_2,
+			out_vers->abi_version_ns_3);
+		return 0;       /* Compatible */
+	}
+}

+ 54 - 0
wmi_tlv_platform.c

@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * LMAC offload interface functions for WMI TLV Interface
+ */
+
+#include "ol_if_athvar.h"
+#include <cdf_memory.h>         /* cdf_mem_malloc,free, etc. */
+#include <osdep.h>
+#include "htc_api.h"
+#include "wmi.h"
+#include "wma.h"
+
+
+/* Following macro definitions use OS or platform specific functions */
+#define dummy_print(fmt, ...) {}
+#define wmi_tlv_print_verbose dummy_print
+#define wmi_tlv_print_error   cdf_print
+#define wmi_tlv_OS_MEMCPY     OS_MEMCPY
+#define wmi_tlv_OS_MEMZERO    OS_MEMZERO
+#define wmi_tlv_OS_MEMMOVE    OS_MEMMOVE
+
+#ifndef NO_DYNAMIC_MEM_ALLOC
+#define wmi_tlv_os_mem_alloc(scn, ptr, numBytes) \
+	{ \
+		(ptr) = os_malloc(NULL, (numBytes), GFP_ATOMIC); \
+	}
+#define wmi_tlv_os_mem_free   cdf_mem_free
+#endif

+ 1503 - 0
wmi_unified.c

@@ -0,0 +1,1503 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Host WMI unified implementation
+ */
+#include "athdefs.h"
+#include "osapi_linux.h"
+#include "a_types.h"
+#include "a_debug.h"
+#include "ol_if_athvar.h"
+#include "ol_defines.h"
+#include "ol_fw.h"
+#include "htc_api.h"
+#include "htc_api.h"
+#include "dbglog_host.h"
+#include "wmi.h"
+#include "wmi_unified_priv.h"
+#include "wma_api.h"
+#include "wma.h"
+#include "mac_trace.h"
+
+#define WMI_MIN_HEAD_ROOM 64
+
+#ifdef WMI_INTERFACE_EVENT_LOGGING
+/* WMI commands */
+uint32_t g_wmi_command_buf_idx = 0;
+struct wmi_command_debug wmi_command_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
+
+/* WMI commands TX completed */
+uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
+struct wmi_command_debug
+	wmi_command_tx_cmp_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
+
+/* WMI events when processed */
+uint32_t g_wmi_event_buf_idx = 0;
+struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
+
+/* WMI events when queued */
+uint32_t g_wmi_rx_event_buf_idx = 0;
+struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
+
+#define WMI_COMMAND_RECORD(a, b) {					\
+	if (WMI_EVENT_DEBUG_MAX_ENTRY <= g_wmi_command_buf_idx)		\
+		g_wmi_command_buf_idx = 0;				\
+	wmi_command_log_buffer[g_wmi_command_buf_idx].command = a;	\
+	cdf_mem_copy(wmi_command_log_buffer[g_wmi_command_buf_idx].data, b, 16); \
+	wmi_command_log_buffer[g_wmi_command_buf_idx].time =		\
+		cdf_get_log_timestamp();				\
+	g_wmi_command_buf_idx++;					\
+}
+
+#define WMI_COMMAND_TX_CMP_RECORD(a, b) {				\
+	if (WMI_EVENT_DEBUG_MAX_ENTRY <= g_wmi_command_tx_cmp_buf_idx)	\
+		g_wmi_command_tx_cmp_buf_idx = 0;			\
+	wmi_command_tx_cmp_log_buffer[g_wmi_command_tx_cmp_buf_idx].command = a; \
+	cdf_mem_copy(wmi_command_tx_cmp_log_buffer			\
+	     [g_wmi_command_tx_cmp_buf_idx].data, b, 16);	     \
+	wmi_command_tx_cmp_log_buffer[g_wmi_command_tx_cmp_buf_idx].time = \
+		cdf_get_log_timestamp();				\
+	g_wmi_command_tx_cmp_buf_idx++;					\
+}
+
+#define WMI_EVENT_RECORD(a, b) {					\
+	if (WMI_EVENT_DEBUG_MAX_ENTRY <= g_wmi_event_buf_idx)		\
+		g_wmi_event_buf_idx = 0;				\
+	wmi_event_log_buffer[g_wmi_event_buf_idx].event = a;		\
+	cdf_mem_copy(wmi_event_log_buffer[g_wmi_event_buf_idx].data, b, 16); \
+	wmi_event_log_buffer[g_wmi_event_buf_idx].time =		\
+		cdf_get_log_timestamp();				\
+	g_wmi_event_buf_idx++;						\
+}
+
+#define WMI_RX_EVENT_RECORD(a, b) {					\
+	if (WMI_EVENT_DEBUG_MAX_ENTRY <= g_wmi_rx_event_buf_idx)	\
+		g_wmi_rx_event_buf_idx = 0;				\
+	wmi_rx_event_log_buffer[g_wmi_rx_event_buf_idx].event = a;	\
+	cdf_mem_copy(wmi_rx_event_log_buffer[g_wmi_rx_event_buf_idx].data, b, 16); \
+	wmi_rx_event_log_buffer[g_wmi_rx_event_buf_idx].time =		\
+		cdf_get_log_timestamp();				\
+	g_wmi_rx_event_buf_idx++;					\
+}
+/* wmi_mgmt commands */
+#define WMI_MGMT_EVENT_DEBUG_MAX_ENTRY (256)
+
+uint32_t g_wmi_mgmt_command_buf_idx = 0;
+struct
+wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY];
+
+/* wmi_mgmt commands TX completed */
+uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
+struct wmi_command_debug
+wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY];
+
+/* wmi_mgmt events when processed */
+uint32_t g_wmi_mgmt_event_buf_idx = 0;
+struct wmi_event_debug
+wmi_mgmt_event_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY];
+
+#define WMI_MGMT_COMMAND_RECORD(a, b) {					     \
+	if (WMI_MGMT_EVENT_DEBUG_MAX_ENTRY <=				     \
+		g_wmi_mgmt_command_buf_idx)				     \
+		g_wmi_mgmt_command_buf_idx = 0;				     \
+	wmi_mgmt_command_log_buffer[g_wmi_mgmt_command_buf_idx].command = a; \
+	cdf_mem_copy(							     \
+		wmi_mgmt_command_log_buffer[g_wmi_mgmt_command_buf_idx].data,\
+		b, 16);							     \
+	wmi_mgmt_command_log_buffer[g_wmi_mgmt_command_buf_idx].time =	     \
+		cdf_get_log_timestamp();				     \
+	g_wmi_mgmt_command_buf_idx++;					     \
+}
+
+#define WMI_MGMT_COMMAND_TX_CMP_RECORD(a, b) {				     \
+	if (WMI_MGMT_EVENT_DEBUG_MAX_ENTRY <=				     \
+	    g_wmi_mgmt_command_tx_cmp_buf_idx)				     \
+		g_wmi_mgmt_command_tx_cmp_buf_idx = 0;			     \
+	wmi_mgmt_command_tx_cmp_log_buffer[g_wmi_mgmt_command_tx_cmp_buf_idx].\
+								command = a; \
+	cdf_mem_copy(wmi_mgmt_command_tx_cmp_log_buffer			     \
+		     [g_wmi_mgmt_command_tx_cmp_buf_idx].data, b, 16);	     \
+	wmi_mgmt_command_tx_cmp_log_buffer[g_wmi_mgmt_command_tx_cmp_buf_idx].\
+									time =\
+		cdf_get_log_timestamp();				      \
+	g_wmi_mgmt_command_tx_cmp_buf_idx++;				      \
+}
+
+#define WMI_MGMT_EVENT_RECORD(a, b) {					      \
+	if (WMI_MGMT_EVENT_DEBUG_MAX_ENTRY <= g_wmi_mgmt_event_buf_idx)       \
+		g_wmi_mgmt_event_buf_idx = 0;				      \
+	wmi_mgmt_event_log_buffer[g_wmi_mgmt_event_buf_idx].event = a;	      \
+	cdf_mem_copy(wmi_mgmt_event_log_buffer[g_wmi_mgmt_event_buf_idx].data,\
+		     b, 16);						      \
+	wmi_mgmt_event_log_buffer[g_wmi_mgmt_event_buf_idx].time =	      \
+		cdf_get_log_timestamp();				      \
+	g_wmi_mgmt_event_buf_idx++;					      \
+}
+
+#endif /*WMI_INTERFACE_EVENT_LOGGING */
+
+static void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf);
+int wmi_get_host_credits(wmi_unified_t wmi_handle);
+/* WMI buffer APIs */
+
+#ifdef MEMORY_DEBUG
+wmi_buf_t
+wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint16_t len, uint8_t *file_name,
+			 uint32_t line_num)
+{
+	wmi_buf_t wmi_buf;
+
+	if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) {
+		CDF_ASSERT(0);
+		return NULL;
+	}
+
+	wmi_buf = cdf_nbuf_alloc_debug(NULL,
+				       roundup(len + WMI_MIN_HEAD_ROOM, 4),
+				       WMI_MIN_HEAD_ROOM, 4, false, file_name,
+				       line_num);
+
+	if (!wmi_buf)
+		return NULL;
+
+	/* Clear the wmi buffer */
+	OS_MEMZERO(cdf_nbuf_data(wmi_buf), len);
+
+	/*
+	 * Set the length of the buffer to match the allocation size.
+	 */
+	cdf_nbuf_set_pktlen(wmi_buf, len);
+
+	return wmi_buf;
+}
+
+void wmi_buf_free(wmi_buf_t net_buf)
+{
+	cdf_nbuf_free(net_buf);
+}
+#else
+wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, uint16_t len)
+{
+	wmi_buf_t wmi_buf;
+
+	if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) {
+		CDF_ASSERT(0);
+		return NULL;
+	}
+
+	wmi_buf = cdf_nbuf_alloc(NULL, roundup(len + WMI_MIN_HEAD_ROOM, 4),
+				WMI_MIN_HEAD_ROOM, 4, false);
+	if (!wmi_buf)
+		return NULL;
+
+	/* Clear the wmi buffer */
+	OS_MEMZERO(cdf_nbuf_data(wmi_buf), len);
+
+	/*
+	 * Set the length of the buffer to match the allocation size.
+	 */
+	cdf_nbuf_set_pktlen(wmi_buf, len);
+	return wmi_buf;
+}
+
+void wmi_buf_free(wmi_buf_t net_buf)
+{
+	cdf_nbuf_free(net_buf);
+}
+#endif
+
+/**
+ * wmi_get_max_msg_len() - get maximum WMI message length
+ * @wmi_handle: WMI handle.
+ *
+ * This function returns the maximum WMI message length
+ *
+ * Return: maximum WMI message length
+ */
+uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
+{
+	return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
+}
+
+static uint8_t *get_wmi_cmd_string(WMI_CMD_ID wmi_command)
+{
+	switch (wmi_command) {
+		/* initialize the wlan sub system */
+		CASE_RETURN_STRING(WMI_INIT_CMDID);
+
+		/* Scan specific commands */
+
+		/* start scan request to FW  */
+		CASE_RETURN_STRING(WMI_START_SCAN_CMDID);
+		/* stop scan request to FW  */
+		CASE_RETURN_STRING(WMI_STOP_SCAN_CMDID);
+		/* full list of channels as defined by the regulatory
+		 * that will be used by scanner   */
+		CASE_RETURN_STRING(WMI_SCAN_CHAN_LIST_CMDID);
+		/* overwrite default priority table in scan scheduler   */
+		CASE_RETURN_STRING(WMI_SCAN_SCH_PRIO_TBL_CMDID);
+		/* This command to adjust the priority and min.max_rest_time
+		 * of an on ongoing scan request.
+		 */
+		CASE_RETURN_STRING(WMI_SCAN_UPDATE_REQUEST_CMDID);
+
+		/* PDEV(physical device) specific commands */
+		/* set regulatorty ctl id used by FW to determine the exact
+		 * ctl power limits */
+		CASE_RETURN_STRING(WMI_PDEV_SET_REGDOMAIN_CMDID);
+		/* set channel. mainly used for supporting monitor mode */
+		CASE_RETURN_STRING(WMI_PDEV_SET_CHANNEL_CMDID);
+		/* set pdev specific parameters */
+		CASE_RETURN_STRING(WMI_PDEV_SET_PARAM_CMDID);
+		/* enable packet log */
+		CASE_RETURN_STRING(WMI_PDEV_PKTLOG_ENABLE_CMDID);
+		/* disable packet log*/
+		CASE_RETURN_STRING(WMI_PDEV_PKTLOG_DISABLE_CMDID);
+		/* set wmm parameters */
+		CASE_RETURN_STRING(WMI_PDEV_SET_WMM_PARAMS_CMDID);
+		/* set HT cap ie that needs to be carried probe requests
+		 * HT/VHT channels */
+		CASE_RETURN_STRING(WMI_PDEV_SET_HT_CAP_IE_CMDID);
+		/* set VHT cap ie that needs to be carried on probe
+		 * requests on VHT channels */
+		CASE_RETURN_STRING(WMI_PDEV_SET_VHT_CAP_IE_CMDID);
+
+		/* Command to send the DSCP-to-TID map to the target */
+		CASE_RETURN_STRING(WMI_PDEV_SET_DSCP_TID_MAP_CMDID);
+		/* set quiet ie parameters. primarily used in AP mode */
+		CASE_RETURN_STRING(WMI_PDEV_SET_QUIET_MODE_CMDID);
+		/* Enable/Disable Green AP Power Save  */
+		CASE_RETURN_STRING(WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID);
+		/* get TPC config for the current operating channel */
+		CASE_RETURN_STRING(WMI_PDEV_GET_TPC_CONFIG_CMDID);
+
+		/* set the base MAC address for the physical device before
+		 * a VDEV is created. For firmware that does not support
+		 * this feature and this command, the pdev MAC address will
+		 * not be changed. */
+		CASE_RETURN_STRING(WMI_PDEV_SET_BASE_MACADDR_CMDID);
+
+		/* eeprom content dump , the same to bdboard data */
+		CASE_RETURN_STRING(WMI_PDEV_DUMP_CMDID);
+
+		/* VDEV(virtual device) specific commands */
+		/* vdev create */
+		CASE_RETURN_STRING(WMI_VDEV_CREATE_CMDID);
+		/* vdev delete */
+		CASE_RETURN_STRING(WMI_VDEV_DELETE_CMDID);
+		/* vdev start request */
+		CASE_RETURN_STRING(WMI_VDEV_START_REQUEST_CMDID);
+		/* vdev restart request (RX only, NO TX, used for CAC period)*/
+		CASE_RETURN_STRING(WMI_VDEV_RESTART_REQUEST_CMDID);
+		/* vdev up request */
+		CASE_RETURN_STRING(WMI_VDEV_UP_CMDID);
+		/* vdev stop request */
+		CASE_RETURN_STRING(WMI_VDEV_STOP_CMDID);
+		/* vdev down request */
+		CASE_RETURN_STRING(WMI_VDEV_DOWN_CMDID);
+		/* set a vdev param */
+		CASE_RETURN_STRING(WMI_VDEV_SET_PARAM_CMDID);
+		/* set a key (used for setting per peer unicast
+		 * and per vdev multicast) */
+		CASE_RETURN_STRING(WMI_VDEV_INSTALL_KEY_CMDID);
+
+		/* wnm sleep mode command */
+		CASE_RETURN_STRING(WMI_VDEV_WNM_SLEEPMODE_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_WMM_ADDTS_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_WMM_DELTS_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_SET_WMM_PARAMS_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_SET_GTX_PARAMS_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID);
+
+		CASE_RETURN_STRING(WMI_VDEV_PLMREQ_START_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_PLMREQ_STOP_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_TSF_TSTAMP_ACTION_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_SET_IE_CMDID);
+
+		/* peer specific commands */
+
+		/** create a peer */
+		CASE_RETURN_STRING(WMI_PEER_CREATE_CMDID);
+		/** delete a peer */
+		CASE_RETURN_STRING(WMI_PEER_DELETE_CMDID);
+		/** flush specific  tid queues of a peer */
+		CASE_RETURN_STRING(WMI_PEER_FLUSH_TIDS_CMDID);
+		/** set a parameter of a peer */
+		CASE_RETURN_STRING(WMI_PEER_SET_PARAM_CMDID);
+		/* set peer to associated state. will cary all parameters
+		 * determined during assocication time */
+		CASE_RETURN_STRING(WMI_PEER_ASSOC_CMDID);
+		/* add a wds  (4 address ) entry. used only for testing
+		 * WDS feature on AP products */
+		CASE_RETURN_STRING(WMI_PEER_ADD_WDS_ENTRY_CMDID);
+		/* remove wds  (4 address ) entry. used only for testing WDS
+		 * feature on AP products */
+		CASE_RETURN_STRING(WMI_PEER_REMOVE_WDS_ENTRY_CMDID);
+		/* set up mcast info for multicast to unicast conversion */
+		CASE_RETURN_STRING(WMI_PEER_MCAST_GROUP_CMDID);
+		/* request peer info from FW to get PEER_INFO_EVENTID */
+		CASE_RETURN_STRING(WMI_PEER_INFO_REQ_CMDID);
+
+		/* beacon/management specific commands */
+
+		/* transmit beacon by reference. used for transmitting beacon
+		 * on low latency interface like pcie */
+		CASE_RETURN_STRING(WMI_BCN_TX_CMDID);
+		/* transmit beacon by value */
+		CASE_RETURN_STRING(WMI_PDEV_SEND_BCN_CMDID);
+		/* set the beacon template. used in beacon offload mode to setup
+		 * the common beacon template with the FW to be used by FW to
+		 * generate beacons */
+		CASE_RETURN_STRING(WMI_BCN_TMPL_CMDID);
+		/* set beacon filter with FW */
+		CASE_RETURN_STRING(WMI_BCN_FILTER_RX_CMDID);
+		/* enable/disable filtering of probe requests in the firmware */
+		CASE_RETURN_STRING(WMI_PRB_REQ_FILTER_RX_CMDID);
+		/* transmit management frame by value. will be deprecated */
+		CASE_RETURN_STRING(WMI_MGMT_TX_CMDID);
+		/* set the probe response template. used in beacon offload mode
+		 * to setup the common probe response template with the FW to
+		 * be used by FW to generate probe responses */
+		CASE_RETURN_STRING(WMI_PRB_TMPL_CMDID);
+
+		/* commands to directly control ba negotiation directly from
+		 * host. only used in test mode */
+
+		/* turn off FW Auto addba mode and let host control addba */
+		CASE_RETURN_STRING(WMI_ADDBA_CLEAR_RESP_CMDID);
+		/* send add ba request */
+		CASE_RETURN_STRING(WMI_ADDBA_SEND_CMDID);
+		CASE_RETURN_STRING(WMI_ADDBA_STATUS_CMDID);
+		/* send del ba */
+		CASE_RETURN_STRING(WMI_DELBA_SEND_CMDID);
+		/* set add ba response will be used by FW to generate
+		 * addba response*/
+		CASE_RETURN_STRING(WMI_ADDBA_SET_RESP_CMDID);
+		/* send single VHT MPDU with AMSDU */
+		CASE_RETURN_STRING(WMI_SEND_SINGLEAMSDU_CMDID);
+
+		/* Station power save specific config */
+		/* enable/disable station powersave */
+		CASE_RETURN_STRING(WMI_STA_POWERSAVE_MODE_CMDID);
+		/* set station power save specific parameter */
+		CASE_RETURN_STRING(WMI_STA_POWERSAVE_PARAM_CMDID);
+		/* set station mimo powersave mode */
+		CASE_RETURN_STRING(WMI_STA_MIMO_PS_MODE_CMDID);
+
+		/* DFS-specific commands */
+		/* enable DFS (radar detection)*/
+		CASE_RETURN_STRING(WMI_PDEV_DFS_ENABLE_CMDID);
+		/* disable DFS (radar detection)*/
+		CASE_RETURN_STRING(WMI_PDEV_DFS_DISABLE_CMDID);
+		/* enable DFS phyerr/parse filter offload */
+		CASE_RETURN_STRING(WMI_DFS_PHYERR_FILTER_ENA_CMDID);
+		/* enable DFS phyerr/parse filter offload */
+		CASE_RETURN_STRING(WMI_DFS_PHYERR_FILTER_DIS_CMDID);
+
+		/* Roaming specific  commands */
+		/* set roam scan mode */
+		CASE_RETURN_STRING(WMI_ROAM_SCAN_MODE);
+		/* set roam scan rssi threshold below which roam
+		 * scan is enabled  */
+		CASE_RETURN_STRING(WMI_ROAM_SCAN_RSSI_THRESHOLD);
+		/* set roam scan period for periodic roam scan mode  */
+		CASE_RETURN_STRING(WMI_ROAM_SCAN_PERIOD);
+		/* set roam scan trigger rssi change threshold   */
+		CASE_RETURN_STRING(WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD);
+		/* set roam AP profile   */
+		CASE_RETURN_STRING(WMI_ROAM_AP_PROFILE);
+		/* set channel list for roam scans */
+		CASE_RETURN_STRING(WMI_ROAM_CHAN_LIST);
+		/* offload scan specific commands */
+		/* set offload scan AP profile   */
+		CASE_RETURN_STRING(WMI_OFL_SCAN_ADD_AP_PROFILE);
+		/* remove offload scan AP profile   */
+		CASE_RETURN_STRING(WMI_OFL_SCAN_REMOVE_AP_PROFILE);
+		/* set offload scan period   */
+		CASE_RETURN_STRING(WMI_OFL_SCAN_PERIOD);
+
+		/* P2P specific commands */
+		/* set P2P device info. FW will used by FW to create P2P IE
+		 * to be carried in probe response generated during p2p listen
+		 * and for p2p discoverability  */
+		CASE_RETURN_STRING(WMI_P2P_DEV_SET_DEVICE_INFO);
+		/* enable/disable p2p discoverability on STA/AP VDEVs  */
+		CASE_RETURN_STRING(WMI_P2P_DEV_SET_DISCOVERABILITY);
+		/* set p2p ie to be carried in beacons generated by FW for GO */
+		CASE_RETURN_STRING(WMI_P2P_GO_SET_BEACON_IE);
+		/* set p2p ie to be carried in probe response frames generated
+		 * by FW for GO  */
+		CASE_RETURN_STRING(WMI_P2P_GO_SET_PROBE_RESP_IE);
+		/* set the vendor specific p2p ie data.
+		 * FW will use this to parse the P2P NoA
+		 * attribute in the beacons/probe responses received.
+		 */
+		CASE_RETURN_STRING(WMI_P2P_SET_VENDOR_IE_DATA_CMDID);
+		/* set the configure of p2p find offload */
+		CASE_RETURN_STRING(WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID);
+		/* set the vendor specific p2p ie data for p2p find offload */
+		CASE_RETURN_STRING(WMI_P2P_DISC_OFFLOAD_APPIE_CMDID);
+		/* set the BSSID/device name pattern of p2p find offload */
+		CASE_RETURN_STRING(WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID);
+		/* set OppPS related parameters **/
+		CASE_RETURN_STRING(WMI_P2P_SET_OPPPS_PARAM_CMDID);
+
+		/* AP power save specific config
+		 * set AP power save specific param */
+		CASE_RETURN_STRING(WMI_AP_PS_PEER_PARAM_CMDID);
+		/* set AP UAPSD coex pecific param */
+		CASE_RETURN_STRING(WMI_AP_PS_PEER_UAPSD_COEX_CMDID);
+
+		/* Rate-control specific commands */
+		CASE_RETURN_STRING(WMI_PEER_RATE_RETRY_SCHED_CMDID);
+
+		/* WLAN Profiling commands. */
+		CASE_RETURN_STRING(WMI_WLAN_PROFILE_TRIGGER_CMDID);
+		CASE_RETURN_STRING(WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID);
+		CASE_RETURN_STRING(WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID);
+		CASE_RETURN_STRING(WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID);
+		CASE_RETURN_STRING(WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID);
+
+		/* Suspend resume command Ids */
+		CASE_RETURN_STRING(WMI_PDEV_SUSPEND_CMDID);
+		CASE_RETURN_STRING(WMI_PDEV_RESUME_CMDID);
+
+		/* Beacon filter commands */
+		/* add a beacon filter */
+		CASE_RETURN_STRING(WMI_ADD_BCN_FILTER_CMDID);
+		/* remove a  beacon filter */
+		CASE_RETURN_STRING(WMI_RMV_BCN_FILTER_CMDID);
+
+		/* WOW Specific WMI commands */
+		/* add pattern for awake */
+		CASE_RETURN_STRING(WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+		/* deleta a wake pattern */
+		CASE_RETURN_STRING(WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+		/* enable/deisable wake event  */
+		CASE_RETURN_STRING(WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+		/* enable WOW  */
+		CASE_RETURN_STRING(WMI_WOW_ENABLE_CMDID);
+		/* host woke up from sleep event to FW. Generated in response
+		 * to WOW Hardware event */
+		CASE_RETURN_STRING(WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+
+		/* RTT measurement related cmd */
+		/* reques to make an RTT measurement */
+		CASE_RETURN_STRING(WMI_RTT_MEASREQ_CMDID);
+		/* reques to report a tsf measurement */
+		CASE_RETURN_STRING(WMI_RTT_TSF_CMDID);
+
+		/* spectral scan command */
+		/* configure spectral scan */
+		CASE_RETURN_STRING(WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
+		/* enable/disable spectral scan and trigger */
+		CASE_RETURN_STRING(WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
+
+		/* F/W stats */
+		/* one time request for stats */
+		CASE_RETURN_STRING(WMI_REQUEST_STATS_CMDID);
+		/* Push MCC Adaptive Scheduler Stats to Firmware */
+		CASE_RETURN_STRING(WMI_MCC_SCHED_TRAFFIC_STATS_CMDID);
+
+		/* ARP OFFLOAD REQUEST*/
+		CASE_RETURN_STRING(WMI_SET_ARP_NS_OFFLOAD_CMDID);
+
+		/* Proactive ARP Response Add Pattern Command*/
+		CASE_RETURN_STRING(WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID);
+
+		/* Proactive ARP Response Del Pattern Command*/
+		CASE_RETURN_STRING(WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID);
+
+		/* NS offload confid*/
+		CASE_RETURN_STRING(WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+
+		/* GTK offload Specific WMI commands */
+		CASE_RETURN_STRING(WMI_GTK_OFFLOAD_CMDID);
+
+		/* CSA offload Specific WMI commands */
+		/* csa offload enable */
+		CASE_RETURN_STRING(WMI_CSA_OFFLOAD_ENABLE_CMDID);
+		/* chan switch command */
+		CASE_RETURN_STRING(WMI_CSA_OFFLOAD_CHANSWITCH_CMDID);
+
+		/* Chatter commands */
+		/* Change chatter mode of operation */
+		CASE_RETURN_STRING(WMI_CHATTER_SET_MODE_CMDID);
+		/* chatter add coalescing filter command */
+		CASE_RETURN_STRING(WMI_CHATTER_ADD_COALESCING_FILTER_CMDID);
+		/* chatter delete coalescing filter command */
+		CASE_RETURN_STRING(WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID);
+		/* chatter coalecing query command */
+		CASE_RETURN_STRING(WMI_CHATTER_COALESCING_QUERY_CMDID);
+
+		/* addba specific commands */
+		/* start the aggregation on this TID */
+		CASE_RETURN_STRING(WMI_PEER_TID_ADDBA_CMDID);
+		/* stop the aggregation on this TID */
+		CASE_RETURN_STRING(WMI_PEER_TID_DELBA_CMDID);
+
+		/* set station mimo powersave method */
+		CASE_RETURN_STRING(WMI_STA_DTIM_PS_METHOD_CMDID);
+		/* Configure the Station UAPSD AC Auto Trigger Parameters */
+		CASE_RETURN_STRING(WMI_STA_UAPSD_AUTO_TRIG_CMDID);
+		/* Configure the Keep Alive Parameters */
+		CASE_RETURN_STRING(WMI_STA_KEEPALIVE_CMDID);
+
+		/* Request ssn from target for a sta/tid pair */
+		CASE_RETURN_STRING(WMI_BA_REQ_SSN_CMDID);
+		/* misc command group */
+		/* echo command mainly used for testing */
+		CASE_RETURN_STRING(WMI_ECHO_CMDID);
+
+		/* !!IMPORTANT!!
+		 * If you need to add a new WMI command to the CASE_RETURN_STRING(WMI_GRP_MISC sub-group,
+		 * please make sure you add it BEHIND CASE_RETURN_STRING(WMI_PDEV_UTF_CMDID);
+		 * as we MUST have a fixed value here to maintain compatibility between
+		 * UTF and the ART2 driver
+		 */
+		/** UTF WMI commands */
+		CASE_RETURN_STRING(WMI_PDEV_UTF_CMDID);
+
+		/** set debug log config */
+		CASE_RETURN_STRING(WMI_DBGLOG_CFG_CMDID);
+		/* QVIT specific command id */
+		CASE_RETURN_STRING(WMI_PDEV_QVIT_CMDID);
+		/* Factory Testing Mode request command
+		 * used for integrated chipsets */
+		CASE_RETURN_STRING(WMI_PDEV_FTM_INTG_CMDID);
+		/* set and get keepalive parameters command */
+		CASE_RETURN_STRING(WMI_VDEV_SET_KEEPALIVE_CMDID);
+		CASE_RETURN_STRING(WMI_VDEV_GET_KEEPALIVE_CMDID);
+		/* For fw recovery test command */
+		CASE_RETURN_STRING(WMI_FORCE_FW_HANG_CMDID);
+		/* Set Mcast/Bdcast filter */
+		CASE_RETURN_STRING(WMI_SET_MCASTBCAST_FILTER_CMDID);
+		/* set thermal management params */
+		CASE_RETURN_STRING(WMI_THERMAL_MGMT_CMDID);
+		CASE_RETURN_STRING(WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_LRO_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_TRANSFER_DATA_TO_FLASH_CMDID);
+		CASE_RETURN_STRING(WMI_MAWC_SENSOR_REPORT_IND_CMDID);
+		CASE_RETURN_STRING(WMI_ROAM_CONFIGURE_MAWC_CMDID);
+		CASE_RETURN_STRING(WMI_NLO_CONFIGURE_MAWC_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_CONFIGURE_MAWC_CMDID);
+		/* GPIO Configuration */
+		CASE_RETURN_STRING(WMI_GPIO_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_GPIO_OUTPUT_CMDID);
+
+		/* Txbf configuration command */
+		CASE_RETURN_STRING(WMI_TXBF_CMDID);
+
+		/* FWTEST Commands */
+		CASE_RETURN_STRING(WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID);
+		/* set NoA descs */
+		CASE_RETURN_STRING(WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID);
+
+		/* TDLS Configuration */
+		/* enable/disable TDLS */
+		CASE_RETURN_STRING(WMI_TDLS_SET_STATE_CMDID);
+		/* set tdls peer state */
+		CASE_RETURN_STRING(WMI_TDLS_PEER_UPDATE_CMDID);
+
+		/* Resmgr Configuration */
+		/* Adaptive OCS is enabled by default in the FW.
+		 * This command is used to disable FW based adaptive OCS.
+		 */
+		CASE_RETURN_STRING
+			(WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID);
+		/* set the requested channel time quota for the home channels */
+		CASE_RETURN_STRING(WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID);
+		/* set the requested latency for the home channels */
+		CASE_RETURN_STRING(WMI_RESMGR_SET_CHAN_LATENCY_CMDID);
+
+		/* STA SMPS Configuration */
+		/* force SMPS mode */
+		CASE_RETURN_STRING(WMI_STA_SMPS_FORCE_MODE_CMDID);
+		/* set SMPS parameters */
+		CASE_RETURN_STRING(WMI_STA_SMPS_PARAM_CMDID);
+
+		/* Wlan HB commands */
+		/* enalbe/disable wlan HB */
+		CASE_RETURN_STRING(WMI_HB_SET_ENABLE_CMDID);
+		/* set tcp parameters for wlan HB */
+		CASE_RETURN_STRING(WMI_HB_SET_TCP_PARAMS_CMDID);
+		/* set tcp pkt filter for wlan HB */
+		CASE_RETURN_STRING(WMI_HB_SET_TCP_PKT_FILTER_CMDID);
+		/* set udp parameters for wlan HB */
+		CASE_RETURN_STRING(WMI_HB_SET_UDP_PARAMS_CMDID);
+		/* set udp pkt filter for wlan HB */
+		CASE_RETURN_STRING(WMI_HB_SET_UDP_PKT_FILTER_CMDID);
+
+		/* Wlan RMC commands*/
+		/* enable/disable RMC */
+		CASE_RETURN_STRING(WMI_RMC_SET_MODE_CMDID);
+		/* configure action frame period */
+		CASE_RETURN_STRING(WMI_RMC_SET_ACTION_PERIOD_CMDID);
+		/* For debug/future enhancement purposes only,
+		 * configures/finetunes RMC algorithms */
+		CASE_RETURN_STRING(WMI_RMC_CONFIG_CMDID);
+
+		/* WLAN MHF offload commands */
+		/** enable/disable MHF offload */
+		CASE_RETURN_STRING(WMI_MHF_OFFLOAD_SET_MODE_CMDID);
+		/* Plumb routing table for MHF offload */
+		CASE_RETURN_STRING(WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID);
+
+		/* location scan commands */
+		/* start batch scan */
+		CASE_RETURN_STRING(WMI_BATCH_SCAN_ENABLE_CMDID);
+		/* stop batch scan */
+		CASE_RETURN_STRING(WMI_BATCH_SCAN_DISABLE_CMDID);
+		/* get batch scan result */
+		CASE_RETURN_STRING(WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID);
+		/* OEM related cmd */
+		CASE_RETURN_STRING(WMI_OEM_REQ_CMDID);
+		CASE_RETURN_STRING(WMI_OEM_REQUEST_CMDID);
+		/* NAN request cmd */
+		CASE_RETURN_STRING(WMI_NAN_CMDID);
+		/* Modem power state cmd */
+		CASE_RETURN_STRING(WMI_MODEM_POWER_STATE_CMDID);
+		CASE_RETURN_STRING(WMI_REQUEST_STATS_EXT_CMDID);
+		CASE_RETURN_STRING(WMI_OBSS_SCAN_ENABLE_CMDID);
+		CASE_RETURN_STRING(WMI_OBSS_SCAN_DISABLE_CMDID);
+		CASE_RETURN_STRING(WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID);
+		CASE_RETURN_STRING(WMI_ROAM_SCAN_CMD);
+		CASE_RETURN_STRING(WMI_PDEV_SET_LED_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID);
+		CASE_RETURN_STRING(WMI_CHAN_AVOID_UPDATE_CMDID);
+		CASE_RETURN_STRING(WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID);
+		CASE_RETURN_STRING(WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID);
+		CASE_RETURN_STRING(WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID);
+		CASE_RETURN_STRING(WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID);
+		CASE_RETURN_STRING(WMI_REQUEST_LINK_STATS_CMDID);
+		CASE_RETURN_STRING(WMI_START_LINK_STATS_CMDID);
+		CASE_RETURN_STRING(WMI_CLEAR_LINK_STATS_CMDID);
+		CASE_RETURN_STRING(WMI_GET_FW_MEM_DUMP_CMDID);
+		CASE_RETURN_STRING(WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_LPI_START_SCAN_CMDID);
+		CASE_RETURN_STRING(WMI_LPI_STOP_SCAN_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_START_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_STOP_CMDID);
+		CASE_RETURN_STRING
+			(WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_SET_CAPABILITIES_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_GET_CAPABILITIES_CMDID);
+		CASE_RETURN_STRING(WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID);
+		CASE_RETURN_STRING(WMI_ROAM_SYNCH_COMPLETE);
+		CASE_RETURN_STRING(WMI_D0_WOW_ENABLE_DISABLE_CMDID);
+		CASE_RETURN_STRING(WMI_EXTWOW_ENABLE_CMDID);
+		CASE_RETURN_STRING(WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID);
+		CASE_RETURN_STRING(WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID);
+		CASE_RETURN_STRING(WMI_UNIT_TEST_CMDID);
+		CASE_RETURN_STRING(WMI_ROAM_SET_RIC_REQUEST_CMDID);
+		CASE_RETURN_STRING(WMI_PDEV_GET_TEMPERATURE_CMDID);
+		CASE_RETURN_STRING(WMI_SET_DHCP_SERVER_OFFLOAD_CMDID);
+		CASE_RETURN_STRING(WMI_TPC_CHAINMASK_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID);
+		CASE_RETURN_STRING(WMI_SCAN_PROB_REQ_OUI_CMDID);
+		CASE_RETURN_STRING(WMI_TDLS_SET_OFFCHAN_MODE_CMDID);
+		CASE_RETURN_STRING(WMI_PDEV_SET_LED_FLASHING_CMDID);
+		CASE_RETURN_STRING(WMI_MDNS_OFFLOAD_ENABLE_CMDID);
+		CASE_RETURN_STRING(WMI_MDNS_SET_FQDN_CMDID);
+		CASE_RETURN_STRING(WMI_MDNS_SET_RESPONSE_CMDID);
+		CASE_RETURN_STRING(WMI_MDNS_GET_STATS_CMDID);
+		CASE_RETURN_STRING(WMI_ROAM_INVOKE_CMDID);
+		CASE_RETURN_STRING(WMI_SET_ANTENNA_DIVERSITY_CMDID);
+		CASE_RETURN_STRING(WMI_SAP_OFL_ENABLE_CMDID);
+		CASE_RETURN_STRING(WMI_APFIND_CMDID);
+		CASE_RETURN_STRING(WMI_PASSPOINT_LIST_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_OCB_SET_SCHED_CMDID);
+		CASE_RETURN_STRING(WMI_OCB_SET_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_OCB_SET_UTC_TIME_CMDID);
+		CASE_RETURN_STRING(WMI_OCB_START_TIMING_ADVERT_CMDID);
+		CASE_RETURN_STRING(WMI_OCB_STOP_TIMING_ADVERT_CMDID);
+		CASE_RETURN_STRING(WMI_OCB_GET_TSF_TIMER_CMDID);
+		CASE_RETURN_STRING(WMI_DCC_GET_STATS_CMDID);
+		CASE_RETURN_STRING(WMI_DCC_CLEAR_STATS_CMDID);
+		CASE_RETURN_STRING(WMI_DCC_UPDATE_NDL_CMDID);
+		CASE_RETURN_STRING(WMI_ROAM_FILTER_CMDID);
+		CASE_RETURN_STRING(WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_DEBUG_MESG_FLUSH_CMDID);
+		CASE_RETURN_STRING(WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID);
+		CASE_RETURN_STRING(WMI_SOC_SET_PCL_CMDID);
+		CASE_RETURN_STRING(WMI_SOC_SET_HW_MODE_CMDID);
+		CASE_RETURN_STRING(WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID);
+		CASE_RETURN_STRING(WMI_DIAG_EVENT_LOG_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_PACKET_FILTER_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_PACKET_FILTER_ENABLE_CMDID);
+		CASE_RETURN_STRING(WMI_SAP_SET_BLACKLIST_PARAM_CMDID);
+		CASE_RETURN_STRING(WMI_WOW_UDP_SVC_OFLD_CMDID);
+		CASE_RETURN_STRING(WMI_MGMT_TX_SEND_CMDID);
+		CASE_RETURN_STRING(WMI_SOC_SET_ANTENNA_MODE_CMDID);
+		CASE_RETURN_STRING(WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID);
+		CASE_RETURN_STRING(WMI_AP_PS_EGAP_PARAM_CMDID);
+		CASE_RETURN_STRING(WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID);
+		CASE_RETURN_STRING(WMI_BPF_GET_CAPABILITY_CMDID);
+		CASE_RETURN_STRING(WMI_BPF_GET_VDEV_STATS_CMDID);
+		CASE_RETURN_STRING(WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID);
+		CASE_RETURN_STRING(WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID);
+
+	}
+
+	return "Invalid WMI cmd";
+}
+
+#ifdef QCA_WIFI_3_0_EMU
+static inline void wma_log_cmd_id(WMI_CMD_ID cmd_id)
+{
+	WMA_LOGE("Send WMI command:%s command_id:%d",
+		 get_wmi_cmd_string(cmd_id), cmd_id);
+}
+#else
+static inline void wma_log_cmd_id(WMI_CMD_ID cmd_id)
+{
+	WMA_LOGD("Send WMI command:%s command_id:%d",
+		 get_wmi_cmd_string(cmd_id), cmd_id);
+}
+#endif
+
+/**
+ * wmi_is_runtime_pm_cmd() - check if a cmd is part of the suspend resume sequence
+ * @cmd: command to check
+ *
+ * Return: true if the command is part of the suspend resume sequence.
+ */
+bool wmi_is_runtime_pm_cmd(WMI_CMD_ID cmd_id)
+{
+	switch (cmd_id) {
+	case WMI_WOW_ENABLE_CMDID:
+	case WMI_PDEV_SUSPEND_CMDID:
+	case WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID:
+	case WMI_WOW_ADD_WAKE_PATTERN_CMDID:
+	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
+	case WMI_PDEV_RESUME_CMDID:
+	case WMI_WOW_DEL_WAKE_PATTERN_CMDID:
+	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
+		return true;
+
+	default:
+		return false;
+	}
+}
+
+/**
+ * wmi_unified_cmd_send() - WMI command API
+ * @wmi_handle: handle to wmi
+ * @buf: wmi buf
+ * @len: wmi buffer length
+ * @cmd_id: wmi command id
+ *
+ * Return: 0 on success
+ */
+int wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, int len,
+			 WMI_CMD_ID cmd_id)
+{
+	HTC_PACKET *pkt;
+	A_STATUS status;
+
+	if (wmi_get_runtime_pm_inprogress(wmi_handle)) {
+		if (wmi_is_runtime_pm_cmd(cmd_id))
+			htc_tag = HTC_TX_PACKET_TAG_AUTO_PM;
+	} else if (cdf_atomic_read(&wmi_handle->is_target_suspended) &&
+	    ((WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID != cmd_id) &&
+	     (WMI_PDEV_RESUME_CMDID != cmd_id))) {
+		cdf_print("%s: Target is suspended  could not send WMI command \n",
+		       __func__);
+		CDF_ASSERT(0);
+		return -EBUSY;
+	}
+
+	/* Do sanity check on the TLV parameter structure */
+	{
+		void *buf_ptr = (void *)cdf_nbuf_data(buf);
+
+		if (wmitlv_check_command_tlv_params(NULL, buf_ptr, len, cmd_id)
+		    != 0) {
+			cdf_print
+			("\nERROR: %s: Invalid WMI Param Buffer for Cmd:%d\n",
+				__func__, cmd_id);
+			return -EINVAL;
+		}
+	}
+
+	if (cdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
+		pr_err("%s, Failed to send cmd %x, no memory\n",
+		       __func__, cmd_id);
+		return -ENOMEM;
+	}
+
+	WMI_SET_FIELD(cdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
+
+	cdf_atomic_inc(&wmi_handle->pending_cmds);
+	if (cdf_atomic_read(&wmi_handle->pending_cmds) >= WMI_MAX_CMDS) {
+		pr_err("\n%s: hostcredits = %d\n", __func__,
+		       wmi_get_host_credits(wmi_handle));
+		htc_dump_counter_info(wmi_handle->htc_handle);
+		cdf_atomic_dec(&wmi_handle->pending_cmds);
+		pr_err("%s: MAX 1024 WMI Pending cmds reached.\n", __func__);
+		CDF_BUG(0);
+		return -EBUSY;
+	}
+
+	pkt = cdf_mem_malloc(sizeof(*pkt));
+	if (!pkt) {
+		cdf_atomic_dec(&wmi_handle->pending_cmds);
+		pr_err("%s, Failed to alloc htc packet %x, no memory\n",
+		       __func__, cmd_id);
+		return -ENOMEM;
+	}
+
+	SET_HTC_PACKET_INFO_TX(pkt,
+			       NULL,
+			       cdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
+			       wmi_handle->wmi_endpoint_id, htc_tag);
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
+
+	wma_log_cmd_id(cmd_id);
+
+#ifdef WMI_INTERFACE_EVENT_LOGGING
+	cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
+	/*Record 16 bytes of WMI cmd data - exclude TLV and WMI headers */
+	if (cmd_id == WMI_MGMT_TX_SEND_CMDID) {
+		WMI_MGMT_COMMAND_RECORD(cmd_id,
+					((uint32_t *)cdf_nbuf_data(buf) + 2));
+	} else {
+		WMI_COMMAND_RECORD(cmd_id, ((uint32_t *) cdf_nbuf_data(buf) +
+					    2));
+	}
+
+	cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
+#endif
+
+	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
+
+	if (A_OK != status) {
+		cdf_atomic_dec(&wmi_handle->pending_cmds);
+		pr_err("%s %d, htc_send_pkt failed\n", __func__, __LINE__);
+	}
+	if (status)
+		return CDF_STATUS_E_FAILURE;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * wmi_unified_get_event_handler_ix() - gives event handler's index
+ * @wmi_handle: handle to wmi
+ * @event_id: wmi  event id
+ *
+ * Return: event handler's index
+ */
+int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
+				     WMI_EVT_ID event_id)
+{
+	uint32_t idx = 0;
+	int32_t invalid_idx = -1;
+	for (idx = 0; (idx < wmi_handle->max_event_idx &&
+		       idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
+		if (wmi_handle->event_id[idx] == event_id &&
+		    wmi_handle->event_handler[idx] != NULL) {
+			return idx;
+		}
+	}
+
+	return invalid_idx;
+}
+
+/**
+ * wmi_unified_register_event_handler() - register wmi event handler
+ * @wmi_handle: handle to wmi
+ * @event_id: wmi event id
+ * @handler_func: wmi event handler function
+ *
+ * Return: 0 on success
+ */
+int wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
+				       WMI_EVT_ID event_id,
+				       wmi_unified_event_handler handler_func)
+{
+	uint32_t idx = 0;
+
+	if (wmi_unified_get_event_handler_ix(wmi_handle, event_id) != -1) {
+		cdf_print("%s : event handler already registered 0x%x \n",
+		       __func__, event_id);
+		return CDF_STATUS_E_FAILURE;
+	}
+	if (wmi_handle->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
+		cdf_print("%s : no more event handlers 0x%x \n",
+		       __func__, event_id);
+		return CDF_STATUS_E_FAILURE;
+	}
+	idx = wmi_handle->max_event_idx;
+	wmi_handle->event_handler[idx] = handler_func;
+	wmi_handle->event_id[idx] = event_id;
+	wmi_handle->max_event_idx++;
+
+	return 0;
+}
+
+/**
+ * wmi_unified_unregister_event_handler() - unregister wmi event handler
+ * @wmi_handle: handle to wmi
+ * @event_id: wmi event id
+ *
+ * Return: 0 on success
+ */
+int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
+					 WMI_EVT_ID event_id)
+{
+	uint32_t idx = 0;
+
+	idx = wmi_unified_get_event_handler_ix(wmi_handle, event_id);
+	if (idx == -1) {
+		cdf_print("%s : event handler is not registered: event id 0x%x \n",
+		       __func__, event_id);
+		return CDF_STATUS_E_FAILURE;
+	}
+	wmi_handle->event_handler[idx] = NULL;
+	wmi_handle->event_id[idx] = 0;
+	--wmi_handle->max_event_idx;
+	wmi_handle->event_handler[idx] =
+		wmi_handle->event_handler[wmi_handle->max_event_idx];
+	wmi_handle->event_id[idx] =
+		wmi_handle->event_id[wmi_handle->max_event_idx];
+
+	return 0;
+}
+
+/**
+ * wmi_process_fw_event_tasklet_ctx() - process in tasklet context
+ * @wmi_handle: handle to wmi
+ * @htc_packet: pointer to htc packet
+ *
+ * Event process by below function will be in tasket context,
+ * need to use this method only for time sensitive functions.
+ *
+ * Return: none
+ */
+static void wmi_process_fw_event_tasklet_ctx(struct wmi_unified *wmi_handle,
+					     HTC_PACKET *htc_packet)
+{
+
+	wmi_buf_t evt_buf;
+	evt_buf = (wmi_buf_t) htc_packet->pPktContext;
+
+	__wmi_control_rx(wmi_handle, evt_buf);
+	return;
+}
+
+/**
+ * wmi_process_fw_event_mc_thread_ctx() - process in mc thread context
+ * @wmi_handle: handle to wmi
+ * @htc_packet: pointer to htc packet
+ *
+ * Event process by below function will be in mc_thread context.
+ * By default all event will be executed in mc_thread context.
+ * Use this method for all events which are processed by protocol stack.
+ * This method will reduce context switching and race conditions.
+ *
+ * Return: none
+ */
+static void wmi_process_fw_event_mc_thread_ctx(struct wmi_unified *wmi_handle,
+					       HTC_PACKET *htc_packet)
+{
+	wmi_buf_t evt_buf;
+	evt_buf = (wmi_buf_t) htc_packet->pPktContext;
+
+	wmi_handle->wma_process_fw_event_handler_cbk(wmi_handle, evt_buf);
+	return;
+}
+
+/**
+ * wmi_process_fw_event_worker_thread_ctx() - process in worker thread context
+ * @wmi_handle: handle to wmi
+ * @htc_packet: pointer to htc packet
+ *
+ * Event process by below function will be in worker thread context.
+ * Use this method for events which are not critical and not
+ * handled in protocol stack.
+ *
+ * Return: none
+ */
+static void wmi_process_fw_event_worker_thread_ctx
+		(struct wmi_unified *wmi_handle, HTC_PACKET *htc_packet)
+{
+	wmi_buf_t evt_buf;
+	uint32_t id;
+	uint8_t *data;
+
+	evt_buf = (wmi_buf_t) htc_packet->pPktContext;
+	id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
+	data = cdf_nbuf_data(evt_buf);
+
+	cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
+	/* Exclude 4 bytes of TLV header */
+	WMI_RX_EVENT_RECORD(id, ((uint8_t *) data + 4));
+	cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
+	cdf_spin_lock_bh(&wmi_handle->eventq_lock);
+	cdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
+	cdf_spin_unlock_bh(&wmi_handle->eventq_lock);
+	schedule_work(&wmi_handle->rx_event_work);
+	return;
+}
+
+/**
+ * wmi_control_rx() - process in worker thread context
+ * @ctx: handle to wmi
+ * @htc_packet: pointer to htc packet
+ *
+ * Temporarily added to support older WMI events.
+ * We should move all events to unified
+ * when the target is ready to support it.
+ *
+ * Return: none
+ */
+void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
+{
+	struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx;
+	wmi_buf_t evt_buf;
+	uint32_t id;
+
+	evt_buf = (wmi_buf_t) htc_packet->pPktContext;
+	id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
+	switch (id) {
+	/*Event will be handled in tasklet ctx*/
+	case WMI_TX_PAUSE_EVENTID:
+	case WMI_WOW_WAKEUP_HOST_EVENTID:
+	case WMI_PDEV_RESUME_EVENTID:
+	case WMI_D0_WOW_DISABLE_ACK_EVENTID:
+		wmi_process_fw_event_tasklet_ctx
+					(wmi_handle, htc_packet);
+		break;
+	/*Event will be handled in worker thread ctx*/
+	case WMI_DEBUG_MESG_EVENTID:
+	case WMI_DFS_RADAR_EVENTID:
+	case WMI_PHYERR_EVENTID:
+	case WMI_PEER_STATE_EVENTID:
+	case WMI_MGMT_RX_EVENTID:
+	case WMI_ROAM_EVENTID:
+		wmi_process_fw_event_worker_thread_ctx
+					(wmi_handle, htc_packet);
+		break;
+	/*Event will be handled in mc_thread ctx*/
+	default:
+		wmi_process_fw_event_mc_thread_ctx
+					(wmi_handle, htc_packet);
+		break;
+	}
+}
+
+/**
+ * wmi_process_fw_event() - process any fw event
+ * @wmi_handle: wmi handle
+ * @evt_buf: fw event buffer
+ *
+ * This function process any fw event to serialize it through mc thread.
+ *
+ * Return: none
+ */
+void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
+{
+	__wmi_control_rx(wmi_handle, evt_buf);
+}
+
+/**
+ * __wmi_control_rx() - process serialize wmi event callback
+ * @wmi_handle: wmi handle
+ * @evt_buf: fw event buffer
+ *
+ * Return: none
+ */
+void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
+{
+	uint32_t id;
+	uint8_t *data;
+	uint32_t len;
+	void *wmi_cmd_struct_ptr = NULL;
+	int tlv_ok_status = 0;
+
+	id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
+
+	if (cdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
+		goto end;
+
+	data = cdf_nbuf_data(evt_buf);
+	len = cdf_nbuf_len(evt_buf);
+
+	/* Validate and pad(if necessary) the TLVs */
+	tlv_ok_status = wmitlv_check_and_pad_event_tlvs(wmi_handle->scn_handle,
+							data, len, id,
+							&wmi_cmd_struct_ptr);
+	if (tlv_ok_status != 0) {
+		pr_err("%s: Error: id=0x%d, wmitlv_check_and_pad_tlvs ret=%d\n",
+		       __func__, id, tlv_ok_status);
+		goto end;
+	}
+
+	if ((id >= WMI_EVT_GRP_START_ID(WMI_GRP_START)) &&
+		/* WMI_SERVICE_READY_EXT_EVENTID is supposed to be part of the
+		 * WMI_GRP_START group. Since the group is out of space, FW
+		 * has accomodated this in WMI_GRP_VDEV.
+		 * WMI_SERVICE_READY_EXT_EVENTID does not have any specific
+		 * event handler registered. So, we do not want to go through
+		 * the WMI registered event handler path for this event.
+		 */
+		(id != WMI_SERVICE_READY_EXT_EVENTID)) {
+		uint32_t idx = 0;
+
+		idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
+		if (idx == -1) {
+			cdf_print
+				("%s : event handler is not registered: event id 0x%x\n",
+				__func__, id);
+			goto end;
+		}
+#ifdef WMI_INTERFACE_EVENT_LOGGING
+		cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
+		/* Exclude 4 bytes of TLV header */
+		if (id == WMI_MGMT_TX_COMPLETION_EVENTID) {
+			WMI_MGMT_EVENT_RECORD(id, ((uint8_t *) data + 4));
+		} else {
+			WMI_EVENT_RECORD(id, ((uint8_t *) data + 4));
+		}
+		cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
+#endif
+		/* Call the WMI registered event handler */
+		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
+						wmi_cmd_struct_ptr, len);
+		goto end;
+	}
+
+	switch (id) {
+	default:
+		cdf_print("%s: Unhandled WMI event %d\n", __func__, id);
+		break;
+	case WMI_SERVICE_READY_EVENTID:
+		cdf_print("%s: WMI UNIFIED SERVICE READY event\n", __func__);
+		wma_rx_service_ready_event(wmi_handle->scn_handle,
+					   wmi_cmd_struct_ptr);
+		break;
+	case WMI_SERVICE_READY_EXT_EVENTID:
+		WMA_LOGA("%s: WMI UNIFIED SERVICE READY Extended event",
+			__func__);
+		wma_rx_service_ready_ext_event(wmi_handle->scn_handle,
+						wmi_cmd_struct_ptr);
+		break;
+	case WMI_READY_EVENTID:
+		cdf_print("%s:  WMI UNIFIED READY event\n", __func__);
+		wma_rx_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr);
+		break;
+	}
+end:
+	wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr);
+	cdf_nbuf_free(evt_buf);
+}
+
+/**
+ * wmi_rx_event_work() - process rx event in rx work queue context
+ * @work: rx work queue struct
+ *
+ * This function process any fw event to serialize it through rx worker thread.
+ *
+ * Return: none
+ */
+void wmi_rx_event_work(struct work_struct *work)
+{
+	struct wmi_unified *wmi = container_of(work, struct wmi_unified,
+					       rx_event_work);
+	wmi_buf_t buf;
+
+	cdf_spin_lock_bh(&wmi->eventq_lock);
+	buf = cdf_nbuf_queue_remove(&wmi->event_queue);
+	cdf_spin_unlock_bh(&wmi->eventq_lock);
+	while (buf) {
+		__wmi_control_rx(wmi, buf);
+		cdf_spin_lock_bh(&wmi->eventq_lock);
+		buf = cdf_nbuf_queue_remove(&wmi->event_queue);
+		cdf_spin_unlock_bh(&wmi->eventq_lock);
+	}
+}
+
+/* WMI Initialization functions */
+
+#ifdef FEATURE_RUNTIME_PM
+/**
+ * wmi_runtime_pm_init() - initialize runtime pm wmi variables
+ * @wmi_handle: wmi context
+ */
+void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
+{
+	cdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
+}
+#else
+void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
+{
+}
+#endif
+
+/**
+ * wmi_unified_attach() -  attach for unified WMI
+ *
+ * @param scn_handle  : handle to SCN.
+ * @wma_process_fw_event_handler_cbk: rx callbacks
+ *
+ * @Return: wmi handle.
+ */
+void *wmi_unified_attach(ol_scn_t scn_handle,
+			 wma_process_fw_event_handler_cbk func)
+{
+	struct wmi_unified *wmi_handle;
+
+	wmi_handle =
+		(struct wmi_unified *)os_malloc(NULL,
+				sizeof(struct wmi_unified),
+				GFP_ATOMIC);
+	if (wmi_handle == NULL) {
+		cdf_print("allocation of wmi handle failed %zu \n",
+		       sizeof(struct wmi_unified));
+		return NULL;
+	}
+	OS_MEMZERO(wmi_handle, sizeof(struct wmi_unified));
+	wmi_handle->scn_handle = scn_handle;
+	cdf_atomic_init(&wmi_handle->pending_cmds);
+	cdf_atomic_init(&wmi_handle->is_target_suspended);
+	wmi_runtime_pm_init(wmi_handle);
+	cdf_spinlock_init(&wmi_handle->eventq_lock);
+	cdf_nbuf_queue_init(&wmi_handle->event_queue);
+#ifdef CONFIG_CNSS
+	cnss_init_work(&wmi_handle->rx_event_work, wmi_rx_event_work);
+#else
+	INIT_WORK(&wmi_handle->rx_event_work, wmi_rx_event_work);
+#endif
+#ifdef WMI_INTERFACE_EVENT_LOGGING
+	cdf_spinlock_init(&wmi_handle->wmi_record_lock);
+#endif
+	wmi_handle->wma_process_fw_event_handler_cbk = func;
+
+	return wmi_handle;
+}
+
+/**
+ * wmi_unified_detach() -  detach for unified WMI
+ *
+ * @wmi_handle  : handle to wmi.
+ *
+ * @Return: none.
+ */
+void wmi_unified_detach(struct wmi_unified *wmi_handle)
+{
+	wmi_buf_t buf;
+
+	cds_flush_work(&wmi_handle->rx_event_work);
+	cdf_spin_lock_bh(&wmi_handle->eventq_lock);
+	buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
+	while (buf) {
+		cdf_nbuf_free(buf);
+		buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
+	}
+	cdf_spin_unlock_bh(&wmi_handle->eventq_lock);
+	if (wmi_handle != NULL) {
+		OS_FREE(wmi_handle);
+		wmi_handle = NULL;
+	}
+}
+
+/**
+ * wmi_unified_remove_work() - detach for WMI work
+ * @wmi_handle: handle to WMI
+ *
+ * A function that does not fully detach WMI, but just remove work
+ * queue items associated with it. This is used to make sure that
+ * before any other processing code that may destroy related contexts
+ * (HTC, etc), work queue processing on WMI has already been stopped.
+ *
+ * Return: None
+ */
+void
+wmi_unified_remove_work(struct wmi_unified *wmi_handle)
+{
+	wmi_buf_t buf;
+
+	CDF_TRACE(CDF_MODULE_ID_WMI, CDF_TRACE_LEVEL_INFO,
+		"Enter: %s", __func__);
+	cds_flush_work(&wmi_handle->rx_event_work);
+	cdf_spin_lock_bh(&wmi_handle->eventq_lock);
+	buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
+	while (buf) {
+		cdf_nbuf_free(buf);
+		buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
+	}
+	cdf_spin_unlock_bh(&wmi_handle->eventq_lock);
+	CDF_TRACE(CDF_MODULE_ID_WMA, CDF_TRACE_LEVEL_INFO,
+		"Done: %s", __func__);
+}
+
+void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
+{
+	struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx;
+	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
+#ifdef WMI_INTERFACE_EVENT_LOGGING
+	uint32_t cmd_id;
+#endif
+
+	ASSERT(wmi_cmd_buf);
+#ifdef WMI_INTERFACE_EVENT_LOGGING
+	cmd_id = WMI_GET_FIELD(cdf_nbuf_data(wmi_cmd_buf),
+			       WMI_CMD_HDR, COMMANDID);
+
+#ifdef QCA_WIFI_3_0_EMU
+	cdf_print
+		("\nSent WMI command:%s command_id:0x%x over dma and recieved tx complete interupt\n",
+		 get_wmi_cmd_string(cmd_id), cmd_id);
+#endif
+
+	cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
+	/* Record 16 bytes of WMI cmd tx complete data
+	   - exclude TLV and WMI headers */
+	if (cmd_id == WMI_MGMT_TX_SEND_CMDID) {
+		WMI_MGMT_COMMAND_TX_CMP_RECORD(cmd_id,
+				((uint32_t *) cdf_nbuf_data(wmi_cmd_buf) + 2));
+	} else {
+		WMI_COMMAND_TX_CMP_RECORD(cmd_id,
+				((uint32_t *) cdf_nbuf_data(wmi_cmd_buf) + 2));
+	}
+
+	cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
+#endif
+	cdf_nbuf_free(wmi_cmd_buf);
+	cdf_mem_free(htc_pkt);
+	cdf_atomic_dec(&wmi_handle->pending_cmds);
+}
+
+/**
+ * wmi_get_host_credits() -  WMI API to get updated host_credits
+ *
+ * @wmi_handle: handle to WMI.
+ *
+ * @Return: updated host_credits.
+ */
+int
+wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
+				void *htc_handle)
+{
+
+	int status;
+	HTC_SERVICE_CONNECT_RESP response;
+	HTC_SERVICE_CONNECT_REQ connect;
+
+	OS_MEMZERO(&connect, sizeof(connect));
+	OS_MEMZERO(&response, sizeof(response));
+
+	/* meta data is unused for now */
+	connect.pMetaData = NULL;
+	connect.MetaDataLength = 0;
+	/* these fields are the same for all service endpoints */
+	connect.EpCallbacks.pContext = wmi_handle;
+	connect.EpCallbacks.EpTxCompleteMultiple =
+		NULL /* Control path completion ar6000_tx_complete */;
+	connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
+	connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
+	connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
+	connect.EpCallbacks.EpTxComplete =
+		wmi_htc_tx_complete /* ar6000_tx_queue_full */;
+
+	/* connect to control service */
+	connect.service_id = WMI_CONTROL_SVC;
+	status = htc_connect_service(htc_handle, &connect,
+				&response);
+
+	if (status != EOK) {
+		cdf_print
+			("Failed to connect to WMI CONTROL service status:%d \n",
+			status);
+		return status;
+	}
+	wmi_handle->wmi_endpoint_id = response.Endpoint;
+	wmi_handle->htc_handle = htc_handle;
+	wmi_handle->max_msg_len = response.MaxMsgLength;
+
+	return EOK;
+}
+
+/**
+ * wmi_get_host_credits() -  WMI API to get updated host_credits
+ *
+ * @wmi_handle: handle to WMI.
+ *
+ * @Return: updated host_credits.
+ */
+int wmi_get_host_credits(wmi_unified_t wmi_handle)
+{
+	int host_credits;
+
+	htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
+						 &host_credits);
+	return host_credits;
+}
+
+/**
+ * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC queue
+ *
+ * @wmi_handle: handle to WMI.
+ *
+ * @Return: Pending Commands in the HTC queue.
+ */
+int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
+{
+	return cdf_atomic_read(&wmi_handle->pending_cmds);
+}
+
+/**
+ * wmi_set_target_suspend() -  WMI API to set target suspend state
+ *
+ * @wmi_handle: handle to WMI.
+ * @val: suspend state boolean.
+ *
+ * @Return: none.
+ */
+void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
+{
+	cdf_atomic_set(&wmi_handle->is_target_suspended, val);
+}
+
+#ifdef FEATURE_RUNTIME_PM
+void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
+{
+	cdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
+}
+
+inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
+{
+	return cdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
+}
+#endif

+ 38 - 0
wmi_version_whitelist.c

@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Every Product Line or chipset or team can have its own Whitelist table.
+ * The following is a list of versions that the present software can support
+ * even though its versions are incompatible. Any entry here means that the
+ * indicated version does not break WMI compatibility even though it has
+ * a minor version change.
+ */
+wmi_whitelist_version_info version_whitelist[] = {
+	{0, 0, 0x5F414351, 0x00004C4D, 0, 0}
+	/* Placeholder: Major=0, Minor=0, Namespace="QCA_ML" (Dummy entry) */
+};