浏览代码

qcacld-3.0: Initial snapshot of ihelium wlan driver

qcacld-3.0: Initial snapshot of ihelium wlan driver
to match code-scanned SU Release 5.0.0.139. This is
open-source version of wlan for next Android release.

Change-Id: Icf598ca97da74f84bea607e4e902d1889806f507
Prakash Dhavali 9 年之前
父节点
当前提交
7090c5fd8d
共有 100 个文件被更改,包括 55381 次插入0 次删除
  1. 114 0
      Android.mk
  2. 1331 0
      Kbuild
  3. 110 0
      Kconfig
  4. 20 0
      Makefile
  5. 591 0
      config/WCNSS_qcom_cfg.ini
  6. 60 0
      core/bmi/inc/bmi.h
  7. 64 0
      core/bmi/inc/ol_fw.h
  8. 87 0
      core/bmi/inc/ol_if_athvar.h
  9. 470 0
      core/bmi/src/bmi.c
  10. 321 0
      core/bmi/src/bmi_1.c
  11. 452 0
      core/bmi/src/bmi_2.c
  12. 607 0
      core/bmi/src/i_ar6320v2_regtable.h
  13. 149 0
      core/bmi/src/i_bmi.h
  14. 1637 0
      core/bmi/src/ol_fw.c
  15. 140 0
      core/cdf/inc/cdf_atomic.h
  16. 138 0
      core/cdf/inc/cdf_defer.h
  17. 154 0
      core/cdf/inc/cdf_event.h
  18. 110 0
      core/cdf/inc/cdf_list.h
  19. 296 0
      core/cdf/inc/cdf_lock.h
  20. 253 0
      core/cdf/inc/cdf_mc_timer.h
  21. 225 0
      core/cdf/inc/cdf_memory.h
  22. 1053 0
      core/cdf/inc/cdf_nbuf.h
  23. 117 0
      core/cdf/inc/cdf_net_types.h
  24. 118 0
      core/cdf/inc/cdf_softirq_timer.h
  25. 111 0
      core/cdf/inc/cdf_status.h
  26. 83 0
      core/cdf/inc/cdf_threads.h
  27. 184 0
      core/cdf/inc/cdf_time.h
  28. 283 0
      core/cdf/inc/cdf_trace.h
  29. 492 0
      core/cdf/inc/cdf_types.h
  30. 325 0
      core/cdf/inc/cdf_util.h
  31. 300 0
      core/cdf/inc/osdep.h
  32. 50 0
      core/cdf/src/cdf_defer.c
  33. 270 0
      core/cdf/src/cdf_event.c
  34. 225 0
      core/cdf/src/cdf_list.c
  35. 491 0
      core/cdf/src/cdf_lock.c
  36. 800 0
      core/cdf/src/cdf_mc_timer.c
  37. 631 0
      core/cdf/src/cdf_memory.c
  38. 1017 0
      core/cdf/src/cdf_nbuf.c
  39. 107 0
      core/cdf/src/cdf_threads.c
  40. 1018 0
      core/cdf/src/cdf_trace.c
  41. 78 0
      core/cdf/src/i_cdf_atomic.h
  42. 106 0
      core/cdf/src/i_cdf_defer.h
  43. 62 0
      core/cdf/src/i_cdf_event.h
  44. 255 0
      core/cdf/src/i_cdf_lock.h
  45. 61 0
      core/cdf/src/i_cdf_mc_timer.h
  46. 1092 0
      core/cdf/src/i_cdf_nbuf.h
  47. 152 0
      core/cdf/src/i_cdf_softirq_timer.h
  48. 217 0
      core/cdf/src/i_cdf_time.h
  49. 145 0
      core/cdf/src/i_cdf_trace.h
  50. 234 0
      core/cdf/src/i_cdf_types.h
  51. 107 0
      core/cdf/src/i_cdf_util.h
  52. 124 0
      core/cds/inc/cds_api.h
  53. 732 0
      core/cds/inc/cds_concurrency.h
  54. 183 0
      core/cds/inc/cds_crypto.h
  55. 75 0
      core/cds/inc/cds_get_bin.h
  56. 2105 0
      core/cds/inc/cds_ieee80211_common.h
  57. 1374 0
      core/cds/inc/cds_ieee80211_defines.h
  58. 238 0
      core/cds/inc/cds_if_upperproto.h
  59. 165 0
      core/cds/inc/cds_mq.h
  60. 111 0
      core/cds/inc/cds_pack_align.h
  61. 185 0
      core/cds/inc/cds_packet.h
  62. 33 0
      core/cds/inc/cds_queue.h
  63. 310 0
      core/cds/inc/cds_reg_service.h
  64. 1098 0
      core/cds/inc/cds_regdomain.h
  65. 2218 0
      core/cds/inc/cds_regdomain_common.h
  66. 451 0
      core/cds/inc/cds_sched.h
  67. 189 0
      core/cds/inc/cds_utils.h
  68. 2085 0
      core/cds/src/cds_api.c
  69. 6823 0
      core/cds/src/cds_concurrency.c
  70. 166 0
      core/cds/src/cds_get_bin.c
  71. 545 0
      core/cds/src/cds_ieee80211_common_i.h
  72. 215 0
      core/cds/src/cds_mq.c
  73. 348 0
      core/cds/src/cds_packet.c
  74. 1439 0
      core/cds/src/cds_reg_service.c
  75. 699 0
      core/cds/src/cds_regdomain.c
  76. 1270 0
      core/cds/src/cds_sched.c
  77. 1135 0
      core/cds/src/cds_utils.c
  78. 76 0
      core/cds/src/i_cds_packet.h
  79. 571 0
      core/cds/src/queue.h
  80. 562 0
      core/dp/htt/htt.c
  81. 1155 0
      core/dp/htt/htt_fw_stats.c
  82. 904 0
      core/dp/htt/htt_h2t.c
  83. 500 0
      core/dp/htt/htt_internal.h
  84. 2444 0
      core/dp/htt/htt_rx.c
  85. 935 0
      core/dp/htt/htt_t2h.c
  86. 864 0
      core/dp/htt/htt_tx.c
  87. 373 0
      core/dp/htt/htt_types.h
  88. 533 0
      core/dp/htt/rx_desc.h
  89. 543 0
      core/dp/ol/inc/ol_cfg.h
  90. 43 0
      core/dp/ol/inc/ol_ctrl_addba_api.h
  91. 44 0
      core/dp/ol/inc/ol_ctrl_api.h
  92. 47 0
      core/dp/ol/inc/ol_defines.h
  93. 353 0
      core/dp/ol/inc/ol_htt_api.h
  94. 863 0
      core/dp/ol/inc/ol_htt_rx_api.h
  95. 969 0
      core/dp/ol/inc/ol_htt_tx_api.h
  96. 42 0
      core/dp/ol/inc/ol_osif_api.h
  97. 103 0
      core/dp/ol/inc/ol_params.h
  98. 113 0
      core/dp/ol/inc/ol_txrx_api.h
  99. 1312 0
      core/dp/ol/inc/ol_txrx_ctrl_api.h
  100. 203 0
      core/dp/ol/inc/ol_txrx_dbg.h

+ 114 - 0
Android.mk

@@ -0,0 +1,114 @@
+# Android makefile for the WLAN Module
+
+# Assume no targets will be supported
+WLAN_CHIPSET :=
+
+ifeq ($(BOARD_HAS_QCOM_WLAN), true)
+
+# Build/Package options for 8084/8092/8960/8992/8994 target
+ifeq ($(call is-board-platform-in-list, apq8084 mpq8092 msm8960 msm8992 msm8994 msm8996 msm8998),true)
+	WLAN_CHIPSET := qca_cld
+	WLAN_SELECT  := CONFIG_QCA_CLD_WLAN=m
+endif # platform
+
+# Build/Package only in case of supported target
+ifneq ($(WLAN_CHIPSET),)
+
+LOCAL_PATH := $(call my-dir)
+
+# This makefile is only for DLKM
+ifneq ($(findstring vendor,$(LOCAL_PATH)),)
+
+ifneq ($(findstring opensource,$(LOCAL_PATH)),)
+    WLAN_PROPRIETARY := 0
+    WLAN_BLD_DIR := vendor/qcom/opensource/wlan
+else
+    WLAN_PROPRIETARY := 1
+    WLAN_BLD_DIR := vendor/qcom/proprietary/wlan-noship
+endif # opensource
+
+# DLKM_DIR was moved for JELLY_BEAN (PLATFORM_SDK 16)
+ifeq ($(call is-platform-sdk-version-at-least,16),true)
+       DLKM_DIR := $(TOP)/device/qcom/common/dlkm
+else
+       DLKM_DIR := build/dlkm
+endif # platform-sdk-version
+
+# Copy WCNSS_cfg.dat and WCNSS_qcom_cfg.ini file from firmware_bin/ folder to target out directory.
+ifeq ($(call is-board-platform-in-list, msm8960),true)
+$(shell rm -f $(TARGET_OUT_ETC)/firmware/wlan/qca_cld/WCNSS_cfg.dat)
+$(shell rm -f $(TARGET_OUT_ETC)/firmware/wlan/qca_cld/WCNSS_qcom_cfg.ini)
+$(shell cp $(LOCAL_PATH)/firmware_bin/WCNSS_cfg.dat $(TARGET_OUT_ETC)/firmware/wlan/qca_cld)
+$(shell cp $(LOCAL_PATH)/firmware_bin/WCNSS_qcom_cfg.ini $(TARGET_OUT_ETC)/firmware/wlan/qca_cld)
+endif
+
+# Build wlan.ko as $(WLAN_CHIPSET)_wlan.ko
+###########################################################
+# This is set once per LOCAL_PATH, not per (kernel) module
+ifeq ($(WLAN_PROPRIETARY),1)
+	KBUILD_OPTIONS := WLAN_ROOT=../$(WLAN_BLD_DIR)/qcacld-new
+else
+	KBUILD_OPTIONS := WLAN_ROOT=../$(WLAN_BLD_DIR)/qcacld-3.0
+endif # WLAN_PROPRIETARY
+# We are actually building wlan.ko here, as per the
+# requirement we are specifying <chipset>_wlan.ko as LOCAL_MODULE.
+# This means we need to rename the module to <chipset>_wlan.ko
+# after wlan.ko is built.
+KBUILD_OPTIONS += MODNAME=wlan
+KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
+KBUILD_OPTIONS += $(WLAN_SELECT)
+
+include $(CLEAR_VARS)
+ifeq ($(WLAN_PROPRIETARY),1)
+	LOCAL_MODULE              := proprietary_$(WLAN_CHIPSET)_wlan.ko
+else
+	LOCAL_MODULE              := $(WLAN_CHIPSET)_wlan.ko
+endif # WLAN_PROPRIETARY
+LOCAL_MODULE_KBUILD_NAME  := wlan.ko
+LOCAL_MODULE_TAGS         := debug
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(TARGET_OUT)/lib/modules/$(WLAN_CHIPSET)
+include $(DLKM_DIR)/AndroidKernelModule.mk
+###########################################################
+
+# Create Symbolic link
+ifeq ($(WLAN_PROPRIETARY),1)
+$(shell mkdir -p $(TARGET_OUT)/lib/modules; \
+        ln -sf /system/lib/modules/$(WLAN_CHIPSET)/$(LOCAL_MODULE) \
+               $(TARGET_OUT)/lib/modules/wlan.ko)
+endif
+$(shell ln -sf /persist/wlan_mac.bin $(TARGET_OUT_ETC)/firmware/wlan/qca_cld/wlan_mac.bin)
+
+ifeq ($(call is-board-platform-in-list, msm8960),true)
+$(shell ln -sf /firmware/image/bdwlan20.bin $(TARGET_OUT_ETC)/firmware/fakeboar.bin)
+$(shell ln -sf /firmware/image/otp20.bin $(TARGET_OUT_ETC)/firmware/otp.bin)
+$(shell ln -sf /firmware/image/utf20.bin $(TARGET_OUT_ETC)/firmware/utf.bin)
+$(shell ln -sf /firmware/image/qwlan20.bin $(TARGET_OUT_ETC)/firmware/athwlan.bin)
+
+$(shell ln -sf /firmware/image/bdwlan20.bin $(TARGET_OUT_ETC)/firmware/bdwlan20.bin)
+$(shell ln -sf /firmware/image/otp20.bin $(TARGET_OUT_ETC)/firmware/otp20.bin)
+$(shell ln -sf /firmware/image/utf20.bin $(TARGET_OUT_ETC)/firmware/utf20.bin)
+$(shell ln -sf /firmware/image/qwlan20.bin $(TARGET_OUT_ETC)/firmware/qwlan20.bin)
+
+$(shell ln -sf /firmware/image/bdwlan30.bin $(TARGET_OUT_ETC)/firmware/bdwlan30.bin)
+$(shell ln -sf /firmware/image/otp30.bin $(TARGET_OUT_ETC)/firmware/otp30.bin)
+$(shell ln -sf /firmware/image/utf30.bin $(TARGET_OUT_ETC)/firmware/utf30.bin)
+$(shell ln -sf /firmware/image/qwlan30.bin $(TARGET_OUT_ETC)/firmware/qwlan30.bin)
+endif
+
+# Copy config ini files to target
+#ifeq ($(call is-board-platform-in-list, msm8992 msm8994),false)
+ifeq ($(WLAN_PROPRIETARY),1)
+$(shell mkdir -p $(TARGET_OUT)/etc/firmware/wlan/$(WLAN_CHIPSET))
+$(shell mkdir -p $(TARGET_OUT)/etc/wifi)
+$(shell rm -f $(TARGET_OUT)/etc/wifi/WCNSS_qcom_cfg.ini)
+$(shell rm -f $(TARGET_OUT)/etc/firmware/wlan/$(WLAN_SHIPSET)/WCNSS_cfg.dat)
+$(shell cp $(LOCAL_PATH)/config/WCNSS_qcom_cfg.ini $(TARGET_OUT)/etc/wifi)
+$(shell cp $(LOCAL_PATH)/firmware_bin/WCNSS_cfg.dat $(TARGET_OUT)/etc/firmware/wlan/$(WLAN_CHIPSET))
+endif
+#endif
+
+endif # DLKM check
+
+endif # supported target check
+endif # WLAN enabled check

+ 1331 - 0
Kbuild

@@ -0,0 +1,1331 @@
+# We can build either as part of a standalone Kernel build or as
+# an external module.  Determine which mechanism is being used
+ifeq ($(MODNAME),)
+	KERNEL_BUILD := 1
+else
+	KERNEL_BUILD := 0
+endif
+
+ifeq ($(CONFIG_CNSS), y)
+ifndef CONFIG_ROME_IF
+	#use pci as default interface
+	CONFIG_ROME_IF = pci
+endif
+endif
+
+ifeq ($(KERNEL_BUILD),1)
+	# These are provided in external module based builds
+	# Need to explicitly define for Kernel-based builds
+	MODNAME := wlan
+	WLAN_ROOT := drivers/staging/qcacld-3.0
+endif
+
+# Make WLAN as open-source driver by default
+WLAN_OPEN_SOURCE := 1
+
+ifeq ($(KERNEL_BUILD), 0)
+	# These are configurable via Kconfig for kernel-based builds
+	# Need to explicitly configure for Android-based builds
+
+	ifeq ($(CONFIG_ARCH_MDM9630), y)
+	CONFIG_MOBILE_ROUTER := y
+	endif
+
+	ifeq ($(CONFIG_ARCH_MDM9640), y)
+	CONFIG_MOBILE_ROUTER := y
+	endif
+
+	#Flag to enable Legacy Fast Roaming3(LFR3)
+	CONFIG_QCACLD_WLAN_LFR3 := y
+
+	#JB kernel has PMKSA patches, hence enabling this flag
+	CONFIG_PRIMA_WLAN_OKC := y
+
+	# JB kernel has CPU enablement patches, so enable
+	ifeq ($(CONFIG_ROME_IF),pci)
+		CONFIG_PRIMA_WLAN_11AC_HIGH_TP := y
+	endif
+	ifeq ($(CONFIG_ROME_IF),usb)
+		CONFIG_PRIMA_WLAN_11AC_HIGH_TP := n
+	endif
+	ifeq ($(CONFIG_ROME_IF),sdio)
+		CONFIG_PRIMA_WLAN_11AC_HIGH_TP := n
+	endif
+
+	ifneq ($(CONFIG_MOBILE_ROUTER), y)
+	#Flag to enable TDLS feature
+	CONFIG_QCOM_TDLS := y
+	endif
+
+	ifeq ($(CONFIG_MOBILE_ROUTER), y)
+	CONFIG_QCACLD_FEATURE_GREEN_AP := y
+	endif
+
+	#Flag to enable Fast Transition (11r) feature
+	CONFIG_QCOM_VOWIFI_11R := y
+
+	ifneq ($(CONFIG_QCA_CLD_WLAN),)
+	        ifeq ($(CONFIG_CNSS),y)
+		#Flag to enable Protected Managment Frames (11w) feature
+		CONFIG_WLAN_FEATURE_11W := y
+		#Flag to enable LTE CoEx feature
+		CONFIG_QCOM_LTE_COEX := y
+			ifneq ($(CONFIG_MOBILE_ROUTER), y)
+			#Flag to enable LPSS feature
+			CONFIG_WLAN_FEATURE_LPSS := y
+			endif
+		endif
+	endif
+
+
+        #Flag to enable Protected Managment Frames (11w) feature
+        ifeq ($(CONFIG_ROME_IF),usb)
+                CONFIG_WLAN_FEATURE_11W := y
+        endif
+        ifeq ($(CONFIG_ROME_IF),sdio)
+                CONFIG_WLAN_FEATURE_11W := y
+        endif
+
+	ifneq ($(CONFIG_MOBILE_ROUTER), y)
+		#Flag to enable NAN
+		CONFIG_QCACLD_FEATURE_NAN := y
+	endif
+
+        #Flag to enable Linux QCMBR feature as default feature
+        ifeq ($(CONFIG_ROME_IF),usb)
+                CONFIG_LINUX_QCMBR :=y
+        endif
+
+        ifeq ($(CONFIG_CNSS_EOS),y)
+        CONFIG_FEATURE_BMI_2 :=y
+        endif
+
+	CONFIG_MPC_UT_FRAMEWORK := y
+
+	#Flag to enable offload packets feature
+	CONFIG_WLAN_OFFLOAD_PACKETS := y
+
+	#Flag to enable memdump feature
+	CONFIG_WLAN_FEATURE_MEMDUMP := y
+
+	#Flag to enable Fast Path feature
+	CONFIG_WLAN_FASTPATH := y
+
+	# Flag to enable NAPI
+	CONFIG_WLAN_NAPI := y
+	CONFIG_WLAN_NAPI_DEBUG := n
+
+	# Flag to enable FW based TX Flow control
+	CONFIG_WLAN_TX_FLOW_CONTROL_V2 := n
+
+	# Flag to enable LRO (Large Receive Offload)
+	ifeq ($(CONFIG_CNSS_EOS), y)
+		ifeq ($(CONFIG_INET_LRO), y)
+			CONFIG_WLAN_LRO := y
+		else
+			CONFIG_WLAN_LRO := n
+		endif
+	endif
+endif
+
+ifneq ($(CONFIG_MOBILE_ROUTER), y)
+# To enable ESE upload, dependent config
+# CONFIG_QCOM_ESE must be enabled.
+CONFIG_QCOM_ESE := y
+CONFIG_QCOM_ESE_UPLOAD := y
+endif
+
+# Feature flags which are not (currently) configurable via Kconfig
+
+#Whether to build debug version
+BUILD_DEBUG_VERSION := 1
+
+#Enable this flag to build driver in diag version
+BUILD_DIAG_VERSION := 1
+
+#Do we panic on bug?  default is to warn
+PANIC_ON_BUG := 1
+
+#Enable OL debug and wmi unified functions
+CONFIG_ATH_PERF_PWR_OFFLOAD := 1
+
+#Disable packet log
+CONFIG_REMOVE_PKT_LOG := 0
+
+#Enable 11AC TX
+ifeq ($(CONFIG_ROME_IF),pci)
+	CONFIG_ATH_11AC_TXCOMPACT := 1
+endif
+ifeq ($(CONFIG_ROME_IF),usb)
+	CONFIG_ATH_11AC_TXCOMPACT := 0
+endif
+
+#Enable per vdev Tx desc pool
+ifeq ($(CONFIG_ROME_IF),pci)
+	CONFIG_PER_VDEV_TX_DESC_POOL := 0
+endif
+ifeq ($(CONFIG_ROME_IF),usb)
+	CONFIG_PER_VDEV_TX_DESC_POOL := 1
+endif
+
+
+#Enable OS specific IRQ abstraction
+CONFIG_ATH_SUPPORT_SHARED_IRQ := 1
+
+#Enable message based HIF instead of RAW access in BMI
+ifeq ($(CONFIG_QCA_WIFI_SDIO), 1)
+CONFIG_HIF_MESSAGE_BASED := 0
+else
+CONFIG_HIF_MESSAGE_BASED := 1
+endif
+
+#Enable PCI specific APIS (dma, etc)
+ifeq ($(CONFIG_ROME_IF),pci)
+	CONFIG_HIF_PCI := 1
+endif
+
+#Enable pci read/write config functions
+ifeq ($(CONFIG_ROME_IF),pci)
+	CONFIG_ATH_PCI := 1
+endif
+ifeq ($(CONFIG_ROME_IF),usb)
+#CONFIG_ATH_PCI := 1
+endif
+
+ifneq ($(CONFIG_MOBILE_ROUTER), y)
+#Enable IBSS support on CLD
+CONFIG_QCA_IBSS_SUPPORT := 1
+endif
+
+#Enable power management suspend/resume functionality to PCI
+CONFIG_ATH_BUS_PM := 1
+
+#Enable FLOWMAC module support
+CONFIG_ATH_SUPPORT_FLOWMAC_MODULE := 0
+
+#Enable spectral support
+CONFIG_ATH_SUPPORT_SPECTRAL := 0
+
+#Enable HOST statistics support
+CONFIG_SUPPORT_HOST_STATISTICS := 0
+
+#Enable WDI Event support
+CONFIG_WDI_EVENT_ENABLE := 1
+
+#Endianess selection
+CONFIG_LITTLE_ENDIAN := 1
+
+#Enable TX reclaim support
+CONFIG_TX_CREDIT_RECLAIM_SUPPORT := 0
+
+#Enable FTM support
+CONFIG_QCA_WIFI_FTM := 1
+
+#Enable Checksum Offload
+CONFIG_CHECKSUM_OFFLOAD := 1
+
+#Enable GTK offload
+CONFIG_GTK_OFFLOAD := 1
+
+#Enable EXT WOW
+ifeq ($(CONFIG_ROME_IF),pci)
+	CONFIG_EXT_WOW := 1
+endif
+
+#Set this to 1 to catch erroneous Target accesses during debug.
+CONFIG_ATH_PCIE_ACCESS_DEBUG := 0
+
+#Enable IPA offload
+ifeq ($(CONFIG_IPA), y)
+CONFIG_IPA_OFFLOAD := 1
+endif
+
+#Enable Signed firmware support for split binary format
+CONFIG_QCA_SIGNED_SPLIT_BINARY_SUPPORT := 0
+
+#Enable single firmware binary format
+CONFIG_QCA_SINGLE_BINARY_SUPPORT := 0
+
+#Enable collecting target RAM dump after kernel panic
+CONFIG_TARGET_RAMDUMP_AFTER_KERNEL_PANIC := 1
+
+#Flag to enable/disable secure firmware feature
+CONFIG_FEATURE_SECURE_FIRMWARE := 0
+
+#Flag to enable Stats Ext implementation
+CONFIG_FEATURE_STATS_EXT := 1
+
+#Flag to force the inclusion of the 802.11p channels because support
+#for these channels has not yet been added to the kernel.
+CONFIG_STATICALLY_ADD_11P_CHANNELS := n
+
+ifeq ($(CONFIG_CFG80211),y)
+HAVE_CFG80211 := 1
+else
+ifeq ($(CONFIG_CFG80211),m)
+HAVE_CFG80211 := 1
+else
+HAVE_CFG80211 := 0
+endif
+endif
+
+############ UAPI ############
+UAPI_DIR :=	uapi
+UAPI_INC :=	-I$(WLAN_ROOT)/$(UAPI_DIR)/linux
+
+############ COMMON ############
+COMMON_DIR :=	core/common
+COMMON_INC :=	-I$(WLAN_ROOT)/$(COMMON_DIR)
+
+############ HDD ############
+HDD_DIR :=	core/hdd
+HDD_INC_DIR :=	$(HDD_DIR)/inc
+HDD_SRC_DIR :=	$(HDD_DIR)/src
+
+HDD_INC := 	-I$(WLAN_ROOT)/$(HDD_INC_DIR) \
+		-I$(WLAN_ROOT)/$(HDD_SRC_DIR)
+
+HDD_OBJS := 	$(HDD_SRC_DIR)/wlan_hdd_assoc.o \
+		$(HDD_SRC_DIR)/wlan_hdd_cfg.o \
+		$(HDD_SRC_DIR)/wlan_hdd_debugfs.o \
+		$(HDD_SRC_DIR)/wlan_hdd_driver_ops.o \
+		$(HDD_SRC_DIR)/wlan_hdd_ftm.o \
+		$(HDD_SRC_DIR)/wlan_hdd_hostapd.o \
+		$(HDD_SRC_DIR)/wlan_hdd_ioctl.o \
+		$(HDD_SRC_DIR)/wlan_hdd_main.o \
+		$(HDD_SRC_DIR)/wlan_hdd_ocb.o \
+		$(HDD_SRC_DIR)/wlan_hdd_oemdata.o \
+		$(HDD_SRC_DIR)/wlan_hdd_power.o \
+		$(HDD_SRC_DIR)/wlan_hdd_scan.o \
+		$(HDD_SRC_DIR)/wlan_hdd_softap_tx_rx.o \
+		$(HDD_SRC_DIR)/wlan_hdd_tx_rx.o \
+		$(HDD_SRC_DIR)/wlan_hdd_trace.o \
+		$(HDD_SRC_DIR)/wlan_hdd_wext.o \
+		$(HDD_SRC_DIR)/wlan_hdd_wmm.o \
+		$(HDD_SRC_DIR)/wlan_hdd_wowl.o
+
+ifeq ($(CONFIG_WLAN_LRO), y)
+HDD_OBJS +=     $(HDD_SRC_DIR)/wlan_hdd_lro.o
+endif
+
+ifeq ($(CONFIG_WLAN_NAPI), y)
+HDD_OBJS +=     $(HDD_SRC_DIR)/wlan_hdd_napi.o
+endif
+
+ifeq ($(CONFIG_IPA_OFFLOAD), 1)
+HDD_OBJS +=	$(HDD_SRC_DIR)/wlan_hdd_ipa.o
+endif
+
+ifeq ($(HAVE_CFG80211),1)
+HDD_OBJS +=	$(HDD_SRC_DIR)/wlan_hdd_cfg80211.o \
+		$(HDD_SRC_DIR)/wlan_hdd_ext_scan.o \
+		$(HDD_SRC_DIR)/wlan_hdd_stats.o \
+		$(HDD_SRC_DIR)/wlan_hdd_p2p.o
+endif
+
+ifeq ($(CONFIG_QCACLD_FEATURE_GREEN_AP),y)
+HDD_OBJS +=	$(HDD_SRC_DIR)/wlan_hdd_green_ap.o
+endif
+
+ifeq ($(CONFIG_QCACLD_FEATURE_NAN),y)
+HDD_OBJS +=	$(HDD_SRC_DIR)/wlan_hdd_nan.o
+endif
+
+ifeq ($(CONFIG_QCOM_TDLS),y)
+HDD_OBJS +=	$(HDD_SRC_DIR)/wlan_hdd_tdls.o
+endif
+
+ifeq ($(CONFIG_MPC_UT_FRAMEWORK),y)
+HDD_OBJS +=	$(HDD_SRC_DIR)/wlan_hdd_conc_ut.o
+endif
+
+ifeq ($(CONFIG_WLAN_FEATURE_MEMDUMP),y)
+HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_memdump.o
+endif
+
+########### HOST DIAG LOG ###########
+HOST_DIAG_LOG_DIR :=	core/utils/host_diag_log
+
+HOST_DIAG_LOG_INC_DIR :=	$(HOST_DIAG_LOG_DIR)/inc
+HOST_DIAG_LOG_SRC_DIR :=	$(HOST_DIAG_LOG_DIR)/src
+
+HOST_DIAG_LOG_INC :=	-I$(WLAN_ROOT)/$(HOST_DIAG_LOG_INC_DIR) \
+			-I$(WLAN_ROOT)/$(HOST_DIAG_LOG_SRC_DIR)
+
+HOST_DIAG_LOG_OBJS +=	$(HOST_DIAG_LOG_SRC_DIR)/host_diag_log.o
+
+############ EPPING ############
+EPPING_DIR :=	core/utils/epping
+EPPING_INC_DIR :=	$(EPPING_DIR)/inc
+EPPING_SRC_DIR :=	$(EPPING_DIR)/src
+
+EPPING_INC := 	-I$(WLAN_ROOT)/$(EPPING_INC_DIR)
+
+EPPING_OBJS := $(EPPING_SRC_DIR)/epping_main.o \
+		$(EPPING_SRC_DIR)/epping_txrx.o \
+		$(EPPING_SRC_DIR)/epping_tx.o \
+		$(EPPING_SRC_DIR)/epping_rx.o \
+		$(EPPING_SRC_DIR)/epping_helper.o \
+
+
+############ MAC ############
+MAC_DIR :=	core/mac
+MAC_INC_DIR :=	$(MAC_DIR)/inc
+MAC_SRC_DIR :=	$(MAC_DIR)/src
+
+MAC_INC := 	-I$(WLAN_ROOT)/$(MAC_INC_DIR) \
+		-I$(WLAN_ROOT)/$(MAC_SRC_DIR)/dph \
+		-I$(WLAN_ROOT)/$(MAC_SRC_DIR)/include \
+		-I$(WLAN_ROOT)/$(MAC_SRC_DIR)/pe/include \
+		-I$(WLAN_ROOT)/$(MAC_SRC_DIR)/pe/lim
+
+MAC_CFG_OBJS := $(MAC_SRC_DIR)/cfg/cfg_api.o \
+		$(MAC_SRC_DIR)/cfg/cfg_debug.o \
+		$(MAC_SRC_DIR)/cfg/cfg_param_name.o \
+		$(MAC_SRC_DIR)/cfg/cfg_proc_msg.o \
+		$(MAC_SRC_DIR)/cfg/cfg_send_msg.o
+
+MAC_DPH_OBJS :=	$(MAC_SRC_DIR)/dph/dph_hash_table.o
+
+MAC_LIM_OBJS := $(MAC_SRC_DIR)/pe/lim/lim_aid_mgmt.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_admit_control.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_api.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_assoc_utils.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_debug.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_ft.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_ibss_peer_mgmt.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_link_monitoring_algo.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_p2p.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_action_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_assoc_req_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_assoc_rsp_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_auth_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_beacon_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_cfg_updates.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_deauth_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_disassoc_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_message_queue.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_mlm_req_messages.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_mlm_rsp_messages.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_probe_req_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_probe_rsp_frame.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_process_sme_req_messages.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_prop_exts_utils.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_scan_result_utils.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_security_utils.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_send_management_frames.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_send_messages.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_send_sme_rsp_messages.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_ser_des_utils.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_session.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_session_utils.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_sme_req_utils.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_sta_hash_api.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_timer_utils.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_trace.o \
+		$(MAC_SRC_DIR)/pe/lim/lim_utils.o
+
+ifeq ($(CONFIG_QCOM_ESE),y)
+ifneq ($(CONFIG_QCOM_ESE_UPLOAD),y)
+MAC_LIM_OBJS += $(MAC_SRC_DIR)/pe/lim/limProcessEseFrame.o
+endif
+endif
+
+ifeq ($(CONFIG_QCOM_TDLS),y)
+MAC_LIM_OBJS += $(MAC_SRC_DIR)/pe/lim/lim_process_tdls.o
+endif
+
+MAC_SCH_OBJS := $(MAC_SRC_DIR)/pe/sch/sch_api.o \
+		$(MAC_SRC_DIR)/pe/sch/sch_beacon_gen.o \
+		$(MAC_SRC_DIR)/pe/sch/sch_beacon_process.o \
+		$(MAC_SRC_DIR)/pe/sch/sch_debug.o \
+		$(MAC_SRC_DIR)/pe/sch/sch_message.o
+
+MAC_RRM_OBJS :=	$(MAC_SRC_DIR)/pe/rrm/rrm_api.o
+
+MAC_OBJS := 	$(MAC_CFG_OBJS) \
+		$(MAC_DPH_OBJS) \
+		$(MAC_LIM_OBJS) \
+		$(MAC_SCH_OBJS) \
+		$(MAC_RRM_OBJS)
+
+############ SAP ############
+SAP_DIR :=	core/sap
+SAP_INC_DIR :=	$(SAP_DIR)/inc
+SAP_SRC_DIR :=	$(SAP_DIR)/src
+
+SAP_INC := 	-I$(WLAN_ROOT)/$(SAP_INC_DIR) \
+		-I$(WLAN_ROOT)/$(SAP_SRC_DIR)
+
+SAP_OBJS :=	$(SAP_SRC_DIR)/sap_api_link_cntl.o \
+		$(SAP_SRC_DIR)/sap_ch_select.o \
+		$(SAP_SRC_DIR)/sap_fsm.o \
+		$(SAP_SRC_DIR)/sap_module.o
+
+############ DFS ############ 350
+DFS_DIR :=	$(SAP_DIR)/dfs
+DFS_INC_DIR :=	$(DFS_DIR)/inc
+DFS_SRC_DIR :=	$(DFS_DIR)/src
+
+DFS_INC :=	-I$(WLAN_ROOT)/$(DFS_INC_DIR) \
+		-I$(WLAN_ROOT)/$(DFS_SRC_DIR)
+
+DFS_OBJS :=	$(DFS_SRC_DIR)/dfs_bindetects.o \
+		$(DFS_SRC_DIR)/dfs.o \
+		$(DFS_SRC_DIR)/dfs_debug.o\
+		$(DFS_SRC_DIR)/dfs_fcc_bin5.o\
+		$(DFS_SRC_DIR)/dfs_init.o\
+		$(DFS_SRC_DIR)/dfs_misc.o\
+		$(DFS_SRC_DIR)/dfs_nol.o\
+		$(DFS_SRC_DIR)/dfs_phyerr_tlv.o\
+		$(DFS_SRC_DIR)/dfs_process_phyerr.o\
+		$(DFS_SRC_DIR)/dfs_process_radarevent.o\
+		$(DFS_SRC_DIR)/dfs_staggered.o
+
+############ SME ############
+SME_DIR :=	core/sme
+SME_INC_DIR :=	$(SME_DIR)/inc
+SME_SRC_DIR :=	$(SME_DIR)/src
+
+SME_INC := 	-I$(WLAN_ROOT)/$(SME_INC_DIR) \
+		-I$(WLAN_ROOT)/$(SME_SRC_DIR)/csr
+
+SME_CSR_OBJS := $(SME_SRC_DIR)/csr/csr_api_roam.o \
+		$(SME_SRC_DIR)/csr/csr_api_scan.o \
+		$(SME_SRC_DIR)/csr/csr_cmd_process.o \
+		$(SME_SRC_DIR)/csr/csr_link_list.o \
+		$(SME_SRC_DIR)/csr/csr_neighbor_roam.o \
+		$(SME_SRC_DIR)/csr/csr_util.o
+
+ifeq ($(CONFIG_QCOM_ESE),y)
+ifneq ($(CONFIG_QCOM_ESE_UPLOAD),y)
+SME_CSR_OBJS += $(SME_SRC_DIR)/csr/csrEse.o
+endif
+endif
+
+ifeq ($(CONFIG_QCOM_TDLS),y)
+SME_CSR_OBJS += $(SME_SRC_DIR)/csr/csr_tdls_process.o
+endif
+
+SME_QOS_OBJS := $(SME_SRC_DIR)/qos/sme_qos.o
+
+SME_CMN_OBJS := $(SME_SRC_DIR)/common/sme_api.o \
+		$(SME_SRC_DIR)/common/sme_ft_api.o \
+		$(SME_SRC_DIR)/common/sme_power_save.o \
+		$(SME_SRC_DIR)/common/sme_trace.o
+
+SME_OEM_DATA_OBJS := $(SME_SRC_DIR)/oem_data/oem_data_api.o
+
+SME_P2P_OBJS = $(SME_SRC_DIR)/p2p/p2p_api.o
+
+SME_RRM_OBJS := $(SME_SRC_DIR)/rrm/sme_rrm.o
+
+ifeq ($(CONFIG_QCACLD_FEATURE_NAN),y)
+SME_NAN_OBJS = $(SME_SRC_DIR)/nan/nan_api.o
+endif
+
+SME_OBJS :=	$(SME_CMN_OBJS) \
+		$(SME_CSR_OBJS) \
+		$(SME_OEM_DATA_OBJS) \
+		$(SME_P2P_OBJS) \
+		$(SME_QOS_OBJS) \
+		$(SME_RRM_OBJS) \
+		$(SME_NAN_OBJS)
+
+############ NLINK ############
+NLINK_DIR     :=	core/utils/nlink
+NLINK_INC_DIR :=	$(NLINK_DIR)/inc
+NLINK_SRC_DIR :=	$(NLINK_DIR)/src
+
+NLINK_INC     := 	-I$(WLAN_ROOT)/$(NLINK_INC_DIR)
+NLINK_OBJS    :=	$(NLINK_SRC_DIR)/wlan_nlink_srv.o
+
+############ PTT ############
+PTT_DIR     :=	core/utils/ptt
+PTT_INC_DIR :=	$(PTT_DIR)/inc
+PTT_SRC_DIR :=	$(PTT_DIR)/src
+
+PTT_INC     := 	-I$(WLAN_ROOT)/$(PTT_INC_DIR)
+PTT_OBJS    :=	$(PTT_SRC_DIR)/wlan_ptt_sock_svc.o
+
+############ WLAN_LOGGING ############
+WLAN_LOGGING_DIR     :=	core/utils/logging
+WLAN_LOGGING_INC_DIR :=	$(WLAN_LOGGING_DIR)/inc
+WLAN_LOGGING_SRC_DIR :=	$(WLAN_LOGGING_DIR)/src
+
+WLAN_LOGGING_INC     := -I$(WLAN_ROOT)/$(WLAN_LOGGING_INC_DIR)
+WLAN_LOGGING_OBJS    := $(WLAN_LOGGING_SRC_DIR)/wlan_logging_sock_svc.o
+
+############ SYS ############
+SYS_DIR :=	core/mac/src/sys
+
+SYS_INC := 	-I$(WLAN_ROOT)/$(SYS_DIR)/common/inc \
+		-I$(WLAN_ROOT)/$(SYS_DIR)/legacy/src/platform/inc \
+		-I$(WLAN_ROOT)/$(SYS_DIR)/legacy/src/system/inc \
+		-I$(WLAN_ROOT)/$(SYS_DIR)/legacy/src/utils/inc
+
+SYS_COMMON_SRC_DIR := $(SYS_DIR)/common/src
+SYS_LEGACY_SRC_DIR := $(SYS_DIR)/legacy/src
+SYS_OBJS :=	$(SYS_COMMON_SRC_DIR)/wlan_qct_sys.o \
+		$(SYS_LEGACY_SRC_DIR)/platform/src/sys_wrapper.o \
+		$(SYS_LEGACY_SRC_DIR)/system/src/mac_init_api.o \
+		$(SYS_LEGACY_SRC_DIR)/system/src/sys_entry_func.o \
+		$(SYS_LEGACY_SRC_DIR)/utils/src/dot11f.o \
+		$(SYS_LEGACY_SRC_DIR)/utils/src/log_api.o \
+		$(SYS_LEGACY_SRC_DIR)/utils/src/mac_trace.o \
+		$(SYS_LEGACY_SRC_DIR)/utils/src/parser_api.o \
+		$(SYS_LEGACY_SRC_DIR)/utils/src/utils_api.o \
+		$(SYS_LEGACY_SRC_DIR)/utils/src/utils_parser.o
+
+############ CDF (Connectivity driver framework) ############
+CDF_DIR :=	core/cdf
+CDF_INC_DIR :=	$(CDF_DIR)/inc
+CDF_SRC_DIR :=	$(CDF_DIR)/src
+
+CDF_INC := 	-I$(WLAN_ROOT)/$(CDF_INC_DIR) \
+		-I$(WLAN_ROOT)/$(CDF_SRC_DIR)
+
+CDF_OBJS :=	$(CDF_SRC_DIR)/cdf_event.o \
+		$(CDF_SRC_DIR)/cdf_list.o \
+		$(CDF_SRC_DIR)/cdf_lock.o \
+		$(CDF_SRC_DIR)/cdf_memory.o \
+		$(CDF_SRC_DIR)/cdf_threads.o \
+		$(CDF_SRC_DIR)/cdf_mc_timer.o \
+		$(CDF_SRC_DIR)/cdf_trace.o \
+		$(CDF_SRC_DIR)/cdf_nbuf.o \
+		$(CDF_SRC_DIR)/cdf_defer.o
+
+############ CDS (Connectivity driver services) ############
+CDS_DIR :=	core/cds
+CDS_INC_DIR :=	$(CDS_DIR)/inc
+CDS_SRC_DIR :=	$(CDS_DIR)/src
+
+CDS_INC := 	-I$(WLAN_ROOT)/$(CDS_INC_DIR) \
+		-I$(WLAN_ROOT)/$(CDS_SRC_DIR)
+
+CDS_OBJS :=	$(CDS_SRC_DIR)/cds_api.o \
+		$(CDS_SRC_DIR)/cds_get_bin.o \
+		$(CDS_SRC_DIR)/cds_reg_service.o \
+		$(CDS_SRC_DIR)/cds_mq.o \
+		$(CDS_SRC_DIR)/cds_packet.o \
+		$(CDS_SRC_DIR)/cds_regdomain.o \
+		$(CDS_SRC_DIR)/cds_sched.o \
+		$(CDS_SRC_DIR)/cds_concurrency.o \
+		$(CDS_SRC_DIR)/cds_utils.o
+
+
+########### BMI ###########
+BMI_DIR := core/bmi
+
+BMI_INC := -I$(WLAN_ROOT)/$(BMI_DIR)/inc
+
+ifneq ($(CONFIG_ICNSS), y)
+BMI_OBJS := $(BMI_DIR)/src/bmi.o \
+            $(BMI_DIR)/src/ol_fw.o
+ifeq ($(CONFIG_FEATURE_BMI_2), y)
+BMI_OBJS += $(BMI_DIR)/src/bmi_2.o
+else
+BMI_OBJS += $(BMI_DIR)/src/bmi_1.o
+endif
+endif
+########### WMI ###########
+WMI_DIR := core/wmi
+
+WMI_INC := -I$(WLAN_ROOT)/$(WMI_DIR)
+
+WMI_OBJS := $(WMI_DIR)/wmi_unified.o \
+	    $(WMI_DIR)/wmi_tlv_helper.o
+
+########### FWLOG ###########
+FWLOG_DIR := core/utils/fwlog
+
+FWLOG_INC := -I$(WLAN_ROOT)/$(FWLOG_DIR)
+
+FWLOG_OBJS := $(FWLOG_DIR)/dbglog_host.o
+
+############ TXRX ############
+TXRX_DIR :=     core/dp/txrx
+TXRX_INC :=     -I$(WLAN_ROOT)/$(TXRX_DIR)
+
+TXRX_OBJS := $(TXRX_DIR)/ol_txrx.o \
+                $(TXRX_DIR)/ol_cfg.o \
+                $(TXRX_DIR)/ol_rx.o \
+                $(TXRX_DIR)/ol_rx_fwd.o \
+                $(TXRX_DIR)/ol_txrx.o \
+                $(TXRX_DIR)/ol_rx_defrag.o \
+                $(TXRX_DIR)/ol_tx_desc.o \
+                $(TXRX_DIR)/ol_tx.o \
+                $(TXRX_DIR)/ol_rx_reorder_timeout.o \
+                $(TXRX_DIR)/ol_rx_reorder.o \
+                $(TXRX_DIR)/ol_rx_pn.o \
+                $(TXRX_DIR)/ol_tx_queue.o \
+                $(TXRX_DIR)/ol_txrx_peer_find.o \
+                $(TXRX_DIR)/ol_txrx_event.o \
+                $(TXRX_DIR)/ol_txrx_encap.o \
+                $(TXRX_DIR)/ol_tx_send.o
+
+ifeq ($(CONFIG_WLAN_TX_FLOW_CONTROL_V2), y)
+TXRX_OBJS +=     $(TXRX_DIR)/ol_txrx_flow_control.o
+endif
+
+############ OL ############
+OL_DIR :=     core/dp/ol
+OL_INC :=     -I$(WLAN_ROOT)/$(OL_DIR)/inc
+
+############ PKTLOG ############
+PKTLOG_DIR :=      core/utils/pktlog
+PKTLOG_INC :=      -I$(WLAN_ROOT)/$(PKTLOG_DIR)/include
+
+PKTLOG_OBJS :=	$(PKTLOG_DIR)/pktlog_ac.o \
+		$(PKTLOG_DIR)/pktlog_internal.o \
+		$(PKTLOG_DIR)/linux_ac.o
+
+############ HTT ############
+HTT_DIR :=      core/dp/htt
+HTT_INC :=      -I$(WLAN_ROOT)/$(HTT_DIR)
+
+HTT_OBJS := $(HTT_DIR)/htt_tx.o \
+            $(HTT_DIR)/htt.o \
+            $(HTT_DIR)/htt_t2h.o \
+            $(HTT_DIR)/htt_h2t.o \
+            $(HTT_DIR)/htt_fw_stats.o \
+            $(HTT_DIR)/htt_rx.o
+
+############## HTC ##########
+HTC_DIR := core/htc
+HTC_INC := -I$(WLAN_ROOT)/$(HTC_DIR)
+
+HTC_OBJS := $(HTC_DIR)/htc.o \
+            $(HTC_DIR)/htc_send.o \
+            $(HTC_DIR)/htc_recv.o \
+            $(HTC_DIR)/htc_services.o
+
+########### HIF ###########
+HIF_DIR := core/hif
+HIF_CE_DIR := $(HIF_DIR)/src/ce
+HIF_CNSS_STUB_DIR := $(HIF_DIR)/src/icnss_stub
+
+ifeq ($(CONFIG_HIF_PCI), 1)
+HIF_PCIE_DIR := $(HIF_DIR)/src/pcie
+else
+HIF_SNOC_DIR := $(HIF_DIR)/src/snoc
+endif
+
+HIF_INC := -I$(WLAN_ROOT)/$(HIF_DIR)/inc \
+				-I$(WLAN_ROOT)/$(HIF_DIR)/src \
+				-I$(WLAN_ROOT)/$(HIF_CE_DIR) \
+				-I$(WLAN_ROOT)/$(HIF_CNSS_STUB_DIR)
+
+ifeq ($(CONFIG_HIF_PCI), 1)
+HIF_INC += -I$(WLAN_ROOT)/$(HIF_PCIE_DIR)
+else
+HIF_INC += -I$(WLAN_ROOT)/$(HIF_SNOC_DIR)
+endif
+
+HIF_OBJS := $(HIF_DIR)/src/ath_procfs.o \
+		$(HIF_CE_DIR)/ce_diag.o \
+		$(HIF_CE_DIR)/ce_main.o \
+		$(HIF_CE_DIR)/ce_service.o \
+		$(HIF_CE_DIR)/ce_tasklet.o \
+		$(HIF_DIR)/src/hif_main.o \
+		$(HIF_DIR)/src/mp_dev.o \
+		$(HIF_DIR)/src/regtable.o
+
+ifeq ($(CONFIG_CNSS), y)
+HIF_OBJS += $(HIF_CNSS_STUB_DIR)/icnss_stub.o \
+		$(HIF_CE_DIR)/ce_bmi.o
+endif
+
+ifeq ($(CONFIG_WLAN_NAPI), y)
+HIF_OBJS += $(HIF_DIR)/src/hif_napi.o
+endif
+
+ifeq ($(CONFIG_HIF_PCI), 1)
+HIF_PCIE_OBJS := $(HIF_PCIE_DIR)/if_pci.o
+
+HIF_OBJS += $(HIF_PCIE_OBJS)
+else
+HIF_SNOC_OBJS := $(HIF_SNOC_DIR)/if_snoc.o
+
+HIF_OBJS += $(HIF_SNOC_OBJS)
+endif
+
+############ WMA ############
+WMA_DIR :=	core/wma
+
+WMA_INC_DIR :=  $(WMA_DIR)/inc
+WMA_SRC_DIR :=  $(WMA_DIR)/src
+
+WMA_INC :=	-I$(WLAN_ROOT)/$(WMA_INC_DIR) \
+		-I$(WLAN_ROOT)/$(WMA_SRC_DIR)
+
+WMA_OBJS :=	$(WMA_SRC_DIR)/wma_main.o \
+		$(WMA_SRC_DIR)/wma_scan_roam.o \
+		$(WMA_SRC_DIR)/wma_dev_if.o \
+		$(WMA_SRC_DIR)/wma_mgmt.o \
+		$(WMA_SRC_DIR)/wma_power.o \
+		$(WMA_SRC_DIR)/wma_data.o \
+		$(WMA_SRC_DIR)/wma_utils.o \
+		$(WMA_SRC_DIR)/wma_features.o \
+		$(WMA_SRC_DIR)/wma_dfs_interface.o \
+		$(WMA_SRC_DIR)/wma_ocb.o \
+		$(WMA_SRC_DIR)/wlan_qct_wma_legacy.o
+
+ifeq ($(CONFIG_MPC_UT_FRAMEWORK),y)
+WMA_OBJS +=	$(WMA_SRC_DIR)/wma_utils_ut.o
+endif
+
+TARGET_INC :=	-I$(WLAN_ROOT)/target/inc
+
+LINUX_INC :=	-Iinclude/linux
+
+INCS :=		$(HDD_INC) \
+		$(EPPING_INC) \
+		$(LINUX_INC) \
+		$(MAC_INC) \
+		$(SAP_INC) \
+		$(SME_INC) \
+		$(SYS_INC) \
+		$(CDF_INC) \
+		$(CDS_INC) \
+		$(DFS_INC)
+
+INCS +=		$(WMA_INC) \
+		$(UAPI_INC) \
+		$(COMMON_INC) \
+		$(WMI_INC) \
+		$(FWLOG_INC) \
+		$(TXRX_INC) \
+		$(OL_INC) \
+		$(PKTLOG_INC) \
+		$(HTT_INC) \
+		$(HTC_INC) \
+		$(DFS_INC)
+
+INCS +=		$(HIF_INC) \
+		$(BMI_INC)
+
+INCS +=		$(TARGET_INC)
+
+INCS +=		$(NLINK_INC) \
+		$(PTT_INC) \
+		$(WLAN_LOGGING_INC)
+
+ifeq ($(CONFIG_REMOVE_PKT_LOG), 0)
+INCS +=		$(PKTLOG_INC)
+endif
+
+ifeq ($(BUILD_DIAG_VERSION), 1)
+INCS +=		$(HOST_DIAG_LOG_INC)
+endif
+
+OBJS :=		$(HDD_OBJS) \
+		$(EPPING_OBJS) \
+		$(MAC_OBJS) \
+		$(SAP_OBJS) \
+		$(SME_OBJS) \
+		$(SYS_OBJS) \
+		$(CDF_OBJS) \
+		$(CDS_OBJS) \
+		$(DFS_OBJS)
+
+OBJS +=		$(WMA_OBJS) \
+		$(TXRX_OBJS) \
+		$(WMI_OBJS) \
+		$(FWLOG_OBJS) \
+		$(HTC_OBJS) \
+		$(DFS_OBJS)
+
+OBJS +=		$(HIF_OBJS) \
+		$(BMI_OBJS) \
+		$(HTT_OBJS)
+
+OBJS +=		$(WLAN_LOGGING_OBJS)
+OBJS +=		$(NLINK_OBJS)
+OBJS +=		$(PTT_OBJS)
+
+ifeq ($(CONFIG_REMOVE_PKT_LOG), 0)
+OBJS +=		$(PKTLOG_OBJS)
+endif
+
+ifeq ($(BUILD_DIAG_VERSION), 1)
+OBJS +=		$(HOST_DIAG_LOG_OBJS)
+endif
+
+
+EXTRA_CFLAGS += $(INCS)
+
+CDEFINES :=	-DANI_LITTLE_BYTE_ENDIAN \
+		-DANI_LITTLE_BIT_ENDIAN \
+		-DQC_WLAN_CHIPSET_QCA_CLD \
+		-DDOT11F_LITTLE_ENDIAN_HOST \
+		-DANI_COMPILER_TYPE_GCC \
+		-DANI_OS_TYPE_ANDROID=6 \
+		-DWLAN_PERF \
+		-DPTT_SOCK_SVC_ENABLE \
+		-Wall\
+		-Werror\
+		-D__linux__ \
+		-DHAL_SELF_STA_PER_BSS=1 \
+		-DWLAN_FEATURE_VOWIFI_11R \
+		-DWLAN_FEATURE_NEIGHBOR_ROAMING \
+		-DWLAN_FEATURE_NEIGHBOR_ROAMING_DEBUG \
+		-DWLAN_FEATURE_VOWIFI_11R_DEBUG \
+		-DFEATURE_WLAN_WAPI \
+		-DFEATURE_OEM_DATA_SUPPORT\
+		-DSOFTAP_CHANNEL_RANGE \
+		-DWLAN_AP_STA_CONCURRENCY \
+		-DFEATURE_WLAN_SCAN_PNO \
+		-DWLAN_FEATURE_PACKET_FILTERING \
+		-DWLAN_FEATURE_VOWIFI \
+		-DWLAN_FEATURE_11AC \
+		-DWLAN_FEATURE_P2P_DEBUG \
+		-DWLAN_ENABLE_AGEIE_ON_SCAN_RESULTS \
+		-DWLANTL_DEBUG\
+		-DWLAN_NS_OFFLOAD \
+		-DWLAN_SOFTAP_VSTA_FEATURE \
+		-DWLAN_FEATURE_GTK_OFFLOAD \
+		-DWLAN_WAKEUP_EVENTS \
+		-DFEATURE_WLAN_RA_FILTERING\
+	        -DWLAN_KD_READY_NOTIFIER \
+		-DWLAN_NL80211_TESTMODE \
+		-DFEATURE_WLAN_LPHB \
+		-DFEATURE_WLAN_PAL_TIMER_DISABLE \
+		-DFEATURE_WLAN_PAL_MEM_DISABLE \
+		-DQCA_SUPPORT_TX_THROTTLE \
+		-DWMI_INTERFACE_EVENT_LOGGING \
+		-DATH_SUPPORT_WAPI \
+		-DWLAN_FEATURE_LINK_LAYER_STATS \
+		-DWLAN_LOGGING_SOCK_SVC_ENABLE \
+		-DFEATURE_WLAN_EXTSCAN \
+		-DFEATURE_WLAN_LFR \
+		-DWLAN_FEATURE_MBSSID \
+		-DCONFIG_160MHZ_SUPPORT
+
+ifeq (y,$(filter y,$(CONFIG_CNSS_EOS) $(CONFIG_ICNSS)))
+CDEFINES += -DQCA_WIFI_3_0
+CDEFINES += -DQCA_WIFI_3_0_EMU
+endif
+
+ifeq (y,$(filter y,$(CONFIG_CNSS_ADRASTEA) $(CONFIG_ICNSS)))
+CDEFINES += -DQCA_WIFI_3_0_ADRASTEA
+CDEFINES += -DADRASTEA_SHADOW_REGISTERS
+endif
+
+ifeq ($(CONFIG_WLAN_FASTPATH), y)
+CDEFINES +=	-DWLAN_FEATURE_FASTPATH
+endif
+
+ifeq ($(CONFIG_WLAN_NAPI), y)
+CDEFINES += -DFEATURE_NAPI
+ifeq ($(CONFIG_WLAN_NAPI_DEBUG), y)
+CDEFINES += -DFEATURE_NAPI_DEBUG
+endif
+endif
+
+ifeq ($(CONFIG_FEATURE_BMI_2), y)
+CDEFINES += -DFEATURE_BMI_2
+endif
+
+ifeq ($(CONFIG_ARCH_MSM), y)
+CDEFINES += -DMSM_PLATFORM
+endif
+
+CDEFINES +=	-DQCA_SUPPORT_TXRX_LOCAL_PEER_ID
+
+ifeq ($(CONFIG_WLAN_TX_FLOW_CONTROL_V2), y)
+CDEFINES +=	-DQCA_LL_TX_FLOW_CONTROL_V2
+CDEFINES +=	-DQCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
+else
+ifeq ($(CONFIG_ROME_IF),pci)
+CDEFINES +=	-DQCA_LL_LEGACY_TX_FLOW_CONTROL
+endif
+endif
+
+ifeq ($(CONFIG_DEBUG_LL),y)
+CDEFINES +=    	-DQCA_PKT_PROTO_TRACE
+endif
+
+ifneq ($(CONFIG_QCA_CLD_WLAN),)
+CDEFINES += -DWCN_PRONTO
+CDEFINES += -DWCN_PRONTO_V1
+endif
+
+ifeq ($(BUILD_DEBUG_VERSION),1)
+CDEFINES +=	-DWLAN_DEBUG \
+		-DTRACE_RECORD \
+		-DLIM_TRACE_RECORD \
+		-DSME_TRACE_RECORD \
+		-DHDD_TRACE_RECORD \
+		-DPE_DEBUG_LOGW \
+		-DPE_DEBUG_LOGE \
+		-DDEBUG
+endif
+
+ifeq ($(CONFIG_SLUB_DEBUG_ON),y)
+CDEFINES += -DTIMER_MANAGER
+CDEFINES += -DMEMORY_DEBUG
+endif
+
+ifeq ($(HAVE_CFG80211),1)
+CDEFINES += -DWLAN_FEATURE_P2P
+CDEFINES += -DWLAN_FEATURE_WFD
+ifeq ($(CONFIG_QCOM_VOWIFI_11R),y)
+CDEFINES += -DKERNEL_SUPPORT_11R_CFG80211
+CDEFINES += -DUSE_80211_WMMTSPEC_FOR_RIC
+endif
+endif
+
+ifeq ($(CONFIG_QCOM_ESE),y)
+CDEFINES += -DFEATURE_WLAN_ESE
+CDEFINES += -DQCA_COMPUTE_TX_DELAY
+CDEFINES += -DQCA_COMPUTE_TX_DELAY_PER_TID
+ifeq ($(CONFIG_QCOM_ESE_UPLOAD),y)
+CDEFINES += -DFEATURE_WLAN_ESE_UPLOAD
+endif
+endif
+
+#normally, TDLS negative behavior is not needed
+ifeq ($(CONFIG_QCOM_TDLS),y)
+CDEFINES += -DFEATURE_WLAN_TDLS
+endif
+
+ifeq ($(CONFIG_QCACLD_WLAN_LFR3),y)
+CDEFINES += -DWLAN_FEATURE_ROAM_OFFLOAD
+endif
+
+ifeq ($(CONFIG_PRIMA_WLAN_OKC),y)
+CDEFINES += -DFEATURE_WLAN_OKC
+endif
+
+ifeq ($(CONFIG_PRIMA_WLAN_11AC_HIGH_TP),y)
+CDEFINES += -DWLAN_FEATURE_11AC_HIGH_TP
+endif
+
+ifeq ($(BUILD_DIAG_VERSION),1)
+CDEFINES += -DFEATURE_WLAN_DIAG_SUPPORT
+CDEFINES += -DFEATURE_WLAN_DIAG_SUPPORT_CSR
+CDEFINES += -DFEATURE_WLAN_DIAG_SUPPORT_LIM
+ifeq ($(CONFIG_HIF_PCI), 1)
+CDEFINES += -DCONFIG_ATH_PROCFS_DIAG_SUPPORT
+endif
+endif
+
+ifeq ($(CONFIG_HIF_USB), 1)
+CDEFINES += -DCONFIG_ATH_PROCFS_DIAG_SUPPORT
+CDEFINES += -DQCA_SUPPORT_OL_RX_REORDER_TIMEOUT
+CDEFINES += -DCONFIG_ATH_PCIE_MAX_PERF=0 -DCONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD=0 -DCONFIG_DISABLE_CDC_MAX_PERF_WAR=0
+CDEFINES += -DQCA_TX_HTT2_SUPPORT
+endif
+
+# enable the MAC Address auto-generation feature
+CDEFINES += -DWLAN_AUTOGEN_MACADDR_FEATURE
+
+ifeq ($(CONFIG_WLAN_FEATURE_11W),y)
+CDEFINES += -DWLAN_FEATURE_11W
+endif
+
+ifeq ($(CONFIG_QCOM_LTE_COEX),y)
+CDEFINES += -DFEATURE_WLAN_CH_AVOID
+endif
+
+ifeq ($(CONFIG_WLAN_FEATURE_LPSS),y)
+CDEFINES += -DWLAN_FEATURE_LPSS
+endif
+
+ifeq ($(PANIC_ON_BUG),1)
+CDEFINES += -DPANIC_ON_BUG
+endif
+
+ifeq ($(WLAN_OPEN_SOURCE), 1)
+CDEFINES += -DWLAN_OPEN_SOURCE
+endif
+
+ifeq ($(CONFIG_FEATURE_STATS_EXT), 1)
+CDEFINES += -DWLAN_FEATURE_STATS_EXT
+endif
+
+ifeq ($(CONFIG_QCACLD_FEATURE_NAN),y)
+CDEFINES += -DWLAN_FEATURE_NAN
+endif
+
+ifeq ($(CONFIG_QCA_IBSS_SUPPORT), 1)
+CDEFINES += -DQCA_IBSS_SUPPORT
+endif
+
+#Enable OL debug and wmi unified functions
+ifeq ($(CONFIG_ATH_PERF_PWR_OFFLOAD), 1)
+CDEFINES += -DATH_PERF_PWR_OFFLOAD
+endif
+
+#Disable packet log
+ifeq ($(CONFIG_REMOVE_PKT_LOG), 1)
+CDEFINES += -DREMOVE_PKT_LOG
+endif
+
+#Enable 11AC TX
+ifeq ($(CONFIG_ATH_11AC_TXCOMPACT), 1)
+CDEFINES += -DATH_11AC_TXCOMPACT
+endif
+
+#Enable per vdev Tx desc pool
+ifeq ($(CONFIG_PER_VDEV_TX_DESC_POOL), 1)
+CDEFINES += -DCONFIG_PER_VDEV_TX_DESC_POOL
+endif
+
+#Enable OS specific IRQ abstraction
+ifeq ($(CONFIG_ATH_SUPPORT_SHARED_IRQ), 1)
+CDEFINES += -DATH_SUPPORT_SHARED_IRQ
+endif
+
+#Enable message based HIF instead of RAW access in BMI
+ifeq ($(CONFIG_HIF_MESSAGE_BASED), 1)
+CDEFINES += -DHIF_MESSAGE_BASED
+endif
+
+#Enable PCI specific APIS (dma, etc)
+ifeq ($(CONFIG_HIF_PCI), 1)
+CDEFINES += -DHIF_PCI
+endif
+
+#Enable USB specific APIS
+ifeq ($(CONFIG_HIF_USB), 1)
+CDEFINES += -DHIF_USB
+CDEFINES += -DCONFIG_HL_SUPPORT
+endif
+
+#Enable FW logs through ini
+CDEFINES += -DCONFIG_FW_LOGS_BASED_ON_INI
+
+#Enable pci read/write config functions
+ifeq ($(CONFIG_ATH_PCI), 1)
+CDEFINES += -DATH_PCI
+endif
+
+#Enable power management suspend/resume functionality
+ifeq ($(CONFIG_ATH_BUS_PM), 1)
+CDEFINES += -DATH_BUS_PM
+endif
+
+#Enable FLOWMAC module support
+ifeq ($(CONFIG_ATH_SUPPORT_FLOWMAC_MODULE), 1)
+CDEFINES += -DATH_SUPPORT_FLOWMAC_MODULE
+endif
+
+#Enable spectral support
+ifeq ($(CONFIG_ATH_SUPPORT_SPECTRAL), 1)
+CDEFINES += -DATH_SUPPORT_SPECTRAL
+endif
+
+#Enable WDI Event support
+ifeq ($(CONFIG_WDI_EVENT_ENABLE), 1)
+CDEFINES += -DWDI_EVENT_ENABLE
+endif
+
+#Endianess selection
+ifeq ($(CONFIG_LITTLE_ENDIAN), 1)
+AH_LITTLE_ENDIAN=1234
+CDEFINES += -DAH_BYTE_ORDER=$(AH_LITTLE_ENDIAN)
+else
+AH_BIG_ENDIAN=4321
+CDEFINES += -DAH_BYTE_ORDER=$(AH_BIG_ENDIAN)
+CDEFINES += -DBIG_ENDIAN_HOST
+endif
+
+#Enable TX reclaim support
+ifeq ($(CONFIG_TX_CREDIT_RECLAIM_SUPPORT), 1)
+CDEFINES += -DTX_CREDIT_RECLAIM_SUPPORT
+endif
+
+#Enable FTM support
+ifeq ($(CONFIG_QCA_WIFI_FTM), 1)
+CDEFINES += -DQCA_WIFI_FTM
+endif
+
+#Enable Checksum Offload support
+ifeq ($(CONFIG_CHECKSUM_OFFLOAD), 1)
+CDEFINES += -DCHECKSUM_OFFLOAD
+endif
+
+#Enable Checksum Offload support
+ifeq ($(CONFIG_IPA_OFFLOAD), 1)
+CDEFINES += -DIPA_OFFLOAD
+endif
+
+ifneq ($(CONFIG_ARCH_MDM9630), y)
+ifeq ($(CONFIG_ARCH_MDM9640), y)
+CDEFINES += -DQCA_CONFIG_SMP
+endif
+endif
+
+#Enable GTK Offload
+ifeq ($(CONFIG_GTK_OFFLOAD), 1)
+CDEFINES += -DWLAN_FEATURE_GTK_OFFLOAD
+CDEFINES += -DIGTK_OFFLOAD
+endif
+
+#Enable GTK Offload
+ifeq ($(CONFIG_EXT_WOW), 1)
+CDEFINES += -DWLAN_FEATURE_EXTWOW_SUPPORT
+endif
+
+#Mark it as SMP Kernel
+ifeq ($(CONFIG_SMP),y)
+CDEFINES += -DQCA_CONFIG_SMP
+endif
+
+ifeq ($(CONFIG_WLAN_FEATURE_RX_WAKELOCK), y)
+CDEFINES += -DWLAN_FEATURE_HOLD_RX_WAKELOCK
+endif
+
+#Enable Channel Matrix restriction for all targets
+CDEFINES += -DWLAN_ENABLE_CHNL_MATRIX_RESTRICTION
+
+#features specific to mobile router use case
+ifeq ($(CONFIG_MOBILE_ROUTER), y)
+
+#enable MCC TO SCC switch
+CDEFINES += -DFEATURE_WLAN_MCC_TO_SCC_SWITCH
+
+#enable wlan auto shutdown feature
+CDEFINES += -DFEATURE_WLAN_AUTO_SHUTDOWN
+
+#enable for MBSSID
+CDEFINES += -DWLAN_FEATURE_MBSSID
+
+#enable AP-AP ACS Optimization
+CDEFINES += -DFEATURE_WLAN_AP_AP_ACS_OPTIMIZE
+
+#Enable 4address scheme
+CDEFINES += -DFEATURE_WLAN_STA_4ADDR_SCHEME
+
+#Disable STA-AP Mode DFS support
+CDEFINES += -DFEATURE_WLAN_STA_AP_MODE_DFS_DISABLE
+
+#Enable OBSS feature
+CDEFINES += -DQCA_HT_2040_COEX
+
+#Disable HT40 in 2.4GHZ STA mode
+CDEFINES += -DQCA_HT_20_24G_STA_ONLY
+
+else #CONFIG_MOBILE_ROUTER
+
+#Open P2P device interface only for non-Mobile router use cases
+CDEFINES += -DWLAN_OPEN_P2P_INTERFACE
+
+#Enable 2.4 GHz social channels in 5 GHz only mode for p2p usage
+CDEFINES += -DWLAN_ENABLE_SOCIAL_CHANNELS_5G_ONLY
+
+endif #CONFIG_MOBILE_ROUTER
+
+#Green AP feature
+ifeq ($(CONFIG_QCACLD_FEATURE_GREEN_AP),y)
+CDEFINES += -DFEATURE_GREEN_AP
+endif
+
+#Enable RX Full re-order OL feature only "LL and NON-MDM9630 platform"
+ifneq ($(CONFIG_ARCH_MDM9630), y)
+ifeq ($(CONFIG_HIF_PCI), 1)
+CDEFINES += -DWLAN_FEATURE_RX_FULL_REORDER_OL
+endif
+endif
+
+#Enable Signed firmware support for split binary format
+ifeq ($(CONFIG_QCA_SIGNED_SPLIT_BINARY_SUPPORT), 1)
+CDEFINES += -DQCA_SIGNED_SPLIT_BINARY_SUPPORT
+endif
+
+#Enable single firmware binary format
+ifeq ($(CONFIG_QCA_SINGLE_BINARY_SUPPORT), 1)
+CDEFINES += -DQCA_SINGLE_BINARY_SUPPORT
+endif
+
+#Enable collecting target RAM dump after kernel panic
+ifeq ($(CONFIG_TARGET_RAMDUMP_AFTER_KERNEL_PANIC), 1)
+CDEFINES += -DTARGET_RAMDUMP_AFTER_KERNEL_PANIC
+endif
+
+#Enable/disable secure firmware feature
+ifeq ($(CONFIG_FEATURE_SECURE_FIRMWARE), 1)
+CDEFINES += -DFEATURE_SECURE_FIRMWARE
+endif
+
+ifeq ($(CONFIG_ATH_PCIE_ACCESS_DEBUG), 1)
+CDEFINES += -DCONFIG_ATH_PCIE_ACCESS_DEBUG
+endif
+
+# Some kernel include files are being moved.  Check to see if
+# the old version of the files are present
+
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/mach-msm/include/mach/msm_smd.h),)
+CDEFINES += -DEXISTS_MSM_SMD
+endif
+
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/mach-msm/include/mach/msm_smsm.h),)
+CDEFINES += -DEXISTS_MSM_SMSM
+endif
+
+# Enable feature support fo Linux version QCMBR
+ifeq ($(CONFIG_LINUX_QCMBR),y)
+CDEFINES += -DLINUX_QCMBR
+endif
+
+# NOTE: CONFIG_64BIT_PADDR requires CONFIG_HELIUMPLUS
+ifeq (y,$(filter y,$(CONFIG_CNSS_EOS) $(CONFIG_ICNSS)))
+CONFIG_HELIUMPLUS := y
+CONFIG_64BIT_PADDR := y
+CONFIG_FEATURE_TSO := y
+CONFIG_FEATURE_TSO_DEBUG := y
+ifeq ($(CONFIG_HELIUMPLUS),y)
+CDEFINES += -DHELIUMPLUS_PADDR64
+CDEFINES += -DHELIUMPLUS
+ifeq ($(CONFIG_64BIT_PADDR),y)
+CDEFINES += -DHTT_PADDR64
+endif
+endif
+endif
+
+ifeq ($(CONFIG_FEATURE_TSO),y)
+CDEFINES += -DFEATURE_TSO
+endif
+ifeq ($(CONFIG_FEATURE_TSO_DEBUG),y)
+CDEFINES += -DFEATURE_TSO_DEBUG
+endif
+
+ifeq ($(CONFIG_WLAN_LRO), y)
+CDEFINES += -DFEATURE_LRO
+endif
+
+ifeq ($(CONFIG_MOBILE_ROUTER), y)
+CDEFINES += -DFEATURE_AP_MCC_CH_AVOIDANCE
+endif
+
+ifeq ($(CONFIG_MPC_UT_FRAMEWORK), y)
+CDEFINES += -DMPC_UT_FRAMEWORK
+endif
+
+ifeq ($(CONFIG_WLAN_OFFLOAD_PACKETS),y)
+CDEFINES += -DWLAN_FEATURE_OFFLOAD_PACKETS
+endif
+
+ifeq ($(CONFIG_WLAN_FEATURE_MEMDUMP),y)
+CDEFINES += -DWLAN_FEATURE_MEMDUMP
+endif
+
+ifeq ($(CONFIG_STATICALLY_ADD_11P_CHANNELS),y)
+CDEFINES += -DFEATURE_STATICALLY_ADD_11P_CHANNELS
+endif
+
+KBUILD_CPPFLAGS += $(CDEFINES)
+
+# Currently, for versions of gcc which support it, the kernel Makefile
+# is disabling the maybe-uninitialized warning.  Re-enable it for the
+# WLAN driver.  Note that we must use EXTRA_CFLAGS here so that it
+# will override the kernel settings.
+ifeq ($(call cc-option-yn, -Wmaybe-uninitialized),y)
+EXTRA_CFLAGS += -Wmaybe-uninitialized
+endif
+
+# Module information used by KBuild framework
+obj-$(CONFIG_QCA_CLD_WLAN) += $(MODNAME).o
+$(MODNAME)-y := $(OBJS)

+ 110 - 0
Kconfig

@@ -0,0 +1,110 @@
+comment "Qualcomm Atheros CLD WLAN module"
+
+config QCA_CLD_WLAN
+
+	tristate "Qualcomm Atheros CLD WLAN module"
+	default n
+	help
+	Add support for the Qualcomm Atheros CLD WLAN module
+
+if QCA_CLD_WLAN != n
+
+config QCACLD_WLAN_LFR3
+	bool "Enable the WLAN Legacy Fast Roaming feature Version 3"
+	default n
+
+config PRIMA_WLAN_OKC
+	bool "Enable the Prima WLAN Opportunistic Key Caching feature"
+	default n
+
+config PRIMA_WLAN_11AC_HIGH_TP
+	bool "Enable the Prima WLAN 802.11ac High Throughput option (depends upon kernel support)"
+	default n
+
+config WLAN_FEATURE_11W
+	bool "Enable the WLAN 802.11w Protected Management Frames feature"
+	default n
+
+config WLAN_FEATURE_LPSS
+	bool "Enable the WLAN LPSS feature"
+	default n
+
+config QCOM_VOWIFI_11R
+	bool "Enable Fast Transition (11r) feature"
+	default n
+
+config QCACLD_FEATURE_NAN
+	bool "Enable NAN feature"
+	default n
+
+config QCACLD_FEATURE_GREEN_AP
+	bool "Enable Green AP feature"
+	default n
+
+config HELIUMPLUS
+	bool "Enable Beeliner based descriptor structures for Helium"
+	default n
+
+config 64BIT_PADDR
+	bool "Enable 37-bit physical/bus addresses"
+	depends on HELIUMPLUS
+	default n
+
+config QCOM_TDLS
+	bool "Enable TDLS feature"
+	default n
+
+config QCOM_LTE_COEX
+	bool "Enable QCOM LTE Coex feature"
+	default n
+
+config MPC_UT_FRAMEWORK
+	bool "Enable Unit test framework for multiport concurrency"
+	default n
+
+config WLAN_OFFLOAD_PACKETS
+	bool "Enable offload packets feature"
+	default n
+
+config WLAN_FEATURE_MEMDUMP
+	bool "Enable MEMDUMP feature"
+	default n
+
+config FEATURE_TSO
+	bool "Enable TCP Segmentation Offload"
+	depends on HELIUMPLUS
+	default n
+
+config FEATURE_TSO_DEBUG
+	bool "Enable TCP Segmentation Offload with debug"
+	depends on FEATURE_TSO
+	default n
+
+config WLAN_FASTPATH
+	bool "Enable fastpath for datapackets"
+	default n
+
+config WLAN_NAPI
+	bool "Enable NAPI - datapath rx"
+	default n
+
+config WLAN_NAPI_DEBUG
+       bool "Enable debug logging on NAPI"
+       depends on WLAN_NAPI
+       default n
+
+config WLAN_TX_FLOW_CONTROL_V2
+	bool "Enable tx flow control version:2"
+	default n
+
+config WLAN_LRO
+	bool "Enable Large Receive Offload"
+	depends on HELIUMPLUS
+	depends on CONFIG_INET_LRO
+	default n
+
+config WLAN_FEATURE_RX_WAKELOCK
+	bool "Enable RX wake lock feature"
+	default n
+
+endif # QCA_CLD_WLAN

+ 20 - 0
Makefile

@@ -0,0 +1,20 @@
+KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
+
+KBUILD_OPTIONS := WLAN_ROOT=$(PWD)
+KBUILD_OPTIONS += MODNAME=wlan
+
+#By default build for CLD
+WLAN_SELECT := CONFIG_QCA_CLD_WLAN=m
+KBUILD_OPTIONS += CONFIG_QCA_WIFI_ISOC=0
+KBUILD_OPTIONS += CONFIG_QCA_WIFI_2_0=1
+KBUILD_OPTIONS += $(WLAN_SELECT)
+KBUILD_OPTIONS += $(KBUILD_EXTRA) # Extra config if any
+
+all:
+	$(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules $(KBUILD_OPTIONS)
+
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install
+
+clean:
+	$(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean

+ 591 - 0
config/WCNSS_qcom_cfg.ini

@@ -0,0 +1,591 @@
+# This file allows user to override the factory
+
+# defaults for the WLAN Driver
+
+
+# Enable IMPS or not
+gEnableImps=1
+
+# Enable/Disable Idle Scan
+
+gEnableIdleScan=0
+
+
+# Increase sleep duration (seconds) during IMPS
+# 0 implies no periodic wake up from IMPS. Periodic wakeup is
+# unnecessary if Idle Scan is disabled.
+gImpsModSleepTime=0
+
+
+# Enable BMPS or not
+gEnableBmps=1
+
+# Enable suspend or not
+
+# 1: Enable standby, 2: Enable Deep sleep, 3: Enable Mcast/Bcast Filter
+
+gEnableSuspend=3
+
+
+# Phy Mode (auto, b, g, n, etc)
+# Valid values are 0-9, with 0 = Auto, 4 = 11n, 9 = 11ac
+# 1 = 11abg, 2 = 11b, 3 = 11g, 5 = 11g only, 6 = 11n only
+# 7 = 11b only 8 = 11ac only.
+gDot11Mode=0
+
+
+# CSR Roaming Enable(1) Disable(0)
+
+gRoamingTime=0
+
+
+# Assigned MAC Addresses - This will be used until NV items are in place
+
+# Each byte of MAC address is represented in Hex format as XX
+
+Intf0MacAddress=000AF58989FF
+Intf1MacAddress=000AF58989FE
+Intf2MacAddress=000AF58989FD
+
+Intf3MacAddress=000AF58989FC
+
+
+# UAPSD service interval for VO,VI, BE, BK traffic
+
+InfraUapsdVoSrvIntv=0
+
+InfraUapsdViSrvIntv=0
+
+InfraUapsdBeSrvIntv=0
+
+InfraUapsdBkSrvIntv=0
+
+# Flag to allow STA send AddTspec even when ACM is Off
+gAddTSWhenACMIsOff=1
+
+# Make 1x1 the default antenna configuration
+
+gNumRxAnt=1
+
+
+# Beacon filtering frequency (unit in beacon intervals)
+
+gNthBeaconFilter=50
+
+
+# Enable WAPI or not
+
+# WAPIIsEnabled=0
+
+
+# Flags to filter Mcast abd Bcast RX packets.
+
+# Value 0: No filtering, 1: Filter all Multicast.
+
+# 2: Filter all Broadcast. 3: Filter all Mcast abd Bcast
+
+McastBcastFilter=3
+
+
+#Flag to enable HostARPOffload feature or not
+
+hostArpOffload=1
+
+#Flag to enable HostNSOffload feature or not
+
+hostNSOffload=1
+
+# This flag enables IP, TCP and UDP checksum offload
+gEnableIpTcpUdpChecksumOffload=1
+
+#SoftAP Related Parameters
+
+# AP MAc addr
+
+gAPMacAddr=000AF589dcab
+
+
+# 802.11n Protection flag
+
+gEnableApProt=1
+
+
+#Enable OBSS protection
+
+gEnableApOBSSProt=1
+
+
+#Enable/Disable UAPSD for SoftAP
+
+gEnableApUapsd=1
+
+
+# Fixed Rate
+
+gFixedRate=0
+
+
+# Maximum Tx power
+
+# gTxPowerCap=30
+
+
+# Fragmentation Threshold
+
+# gFragmentationThreshold=2346
+
+
+# RTS threshold
+
+RTSThreshold=1048576
+
+
+# Intra-BSS forward
+
+gDisableIntraBssFwd=0
+
+
+# WMM Enable/Disable
+
+WmmIsEnabled=0
+
+
+# 802.11d support
+
+g11dSupportEnabled=1
+
+# 802.11h support
+
+g11hSupportEnabled=1
+
+# DFS Master Capability
+gEnableDFSMasterCap=1
+
+# ESE Support and fast transition
+EseEnabled=1
+ImplicitQosIsEnabled=0
+gNeighborScanTimerPeriod=200
+
+gNeighborLookupThreshold=76
+gNeighborReassocThreshold=81
+
+gNeighborScanChannelMinTime=20
+gNeighborScanChannelMaxTime=30
+gMaxNeighborReqTries=3
+
+# Legacy (non-ESE, non-802.11r) Fast Roaming Support
+# To enable, set FastRoamEnabled=1
+# To disable, set FastRoamEnabled=0
+FastRoamEnabled=1
+
+#Check if the AP to which we are roaming is better than current AP in terms of RSSI.
+#Checking is disabled if set to Zero.Otherwise it will use this value as to how better
+#the RSSI of the new/roamable AP should be for roaming
+RoamRssiDiff=3
+
+# If the RSSI of any available candidate is better than currently associated
+# AP by at least gImmediateRoamRssiDiff, then being to roam immediately (without
+# registering for reassoc threshold).
+# NOTE: Value of 0 means that we would register for reassoc threshold.
+gImmediateRoamRssiDiff=10
+
+# To enable, set gRoamIntraBand=1 (Roaming within band)
+# To disable, set gRoamIntraBand=0 (Roaming across band)
+gRoamIntraBand=0
+
+#Short Guard Interval Enable/disable
+
+gShortGI20Mhz=1
+
+gShortGI40Mhz=1
+
+
+#Auto Shutdown  Value in seconds. A value of 0 means Auto shutoff is disabled
+
+gAPAutoShutOff=0
+
+#Auto Shutdown wlan : Value in Seconds. 0 means disabled. Max 1 day = 86400 sec
+gWlanAutoShutdown = 0
+
+# Not used.
+gApAutoChannelSelection=0
+
+
+# Listen Energy Detect Mode Configuration
+
+# Valid values 0-128
+
+# 128 means disable Energy Detect feature
+
+# 0-9 are threshold code and 7 is recommended value from system if feature is to be enabled.
+
+# 10-128 are reserved.
+
+# The EDET threshold mapping is as follows in 3dB step:
+
+# 0 = -60 dBm
+
+# 1 = -63 dBm
+
+# 2 = -66 dBm
+
+# ...
+
+# 7 = -81 dBm
+
+# 8 = -84 dBm
+
+# 9 = -87 dBm
+
+# Note: Any of these settings are valid. Setting 0 would yield the highest power saving (in a noisy environment) at the cost of more range. The range impact is approximately #calculated as:
+
+#
+
+#  Range Loss  (dB)  =  EDET threshold level (dBm) + 97 dBm.
+
+#
+
+gEnablePhyAgcListenMode=128
+
+
+#Preferred band (both or 2.4 only or 5 only)
+
+BandCapability=0
+
+
+#Beacon Early Termination (1 = enable the BET feature, 0 = disable)
+
+enableBeaconEarlyTermination=0
+
+beaconEarlyTerminationWakeInterval=3
+
+
+#Channel Bonding
+gChannelBondingMode5GHz=1
+
+
+#Enable Keep alive with non-zero period value
+
+gStaKeepAlivePeriod = 30
+
+#Say gGoKeepAlivePeriod(5 seconds) and gGoLinkMonitorPeriod(10 seconds).
+#For every 10 seconds DUT send Qos Null frame(i.e., Keep Alive frame if link is idle for last 10 seconds.)
+#For both active and power save clients.
+
+#Power save clients: DUT set TIM bit from 10th second onwards and till client honors TIM bit.
+#If doesn't honor for 5 seconds then DUT remove client.
+
+#Active clients: DUT send Qos Null frame for 10th seconds onwards if it is not success still we try on
+#11th second if not tries on 12th and so on till 15th second. Hence before disconnection DUT will send 5 NULL frames.
+#Hence in any case DUT will detect client got removed in (10+5) seconds. i.e., (gGoKeepAlivePeriod + gGoLinkMonitorPeriod)..
+
+#gGoLinkMonitorPeriod/ gApLinkMonitorPeriod is period where link is idle and it is period
+#where we send NULL frame.
+
+#gApLinkMonitorPeriod = 10
+
+#gGoLinkMonitorPeriod = 10
+
+#gGoKeepAlivePeriod/gApKeepAlivePeriod is time to spend to check whether frame are succeed to send or not.
+#Hence total effective detection time is gGoLinkMonitorPeriod+ gGoKeepAlivePeriod/gApLinkMonitorPeriod+ gApKeepAlivePeriod.
+
+
+gGoKeepAlivePeriod = 20
+
+gApKeepAlivePeriod = 20
+
+
+#If set will start with active scan after driver load, otherwise will start with
+
+#passive scan to find out the domain
+
+gEnableBypass11d=1
+
+
+#If set to 0, will not scan DFS channels
+
+gEnableDFSChnlScan=1
+
+# Enable DFS channel roam
+# 0: DISABLE, 1: ENABLED_NORMAL, 2: ENABLED_ACTIVE
+gAllowDFSChannelRoam=1
+
+gVhtChannelWidth=2
+gEnableLogp=1
+
+
+# Enable Automatic Tx Power control
+
+gEnableAutomaticTxPowerControl=1
+
+# 0 for OLPC 1 for CLPC and SCPC
+gEnableCloseLoop=1
+
+#Data Inactivity Timeout when in powersave (in ms)
+gDataInactivityTimeout=200
+
+# VHT Tx/Rx MCS values
+# Valid values are 0,1,2. If commented out, the default value is 0.
+# 0=MCS0-7, 1=MCS0-8, 2=MCS0-9
+gVhtRxMCS=2
+gVhtTxMCS=2
+
+# VHT Tx/Rx MCS values for 2x2
+# Valid values are 0,1,2. If commented out, the default value is 0.
+# 0=MCS0-7, 1=MCS0-8, 2=MCS0-9
+gEnable2x2=1
+gVhtRxMCS2x2=2
+gVhtTxMCS2x2=2
+
+# Set txchainmask and rxchainmask
+# These parameters are used only if gEnable2x2 is 0
+# Valid values are 1,2
+# Set gSetTxChainmask1x1=1 or gSetRxChainmask1x1=1 to select chain0.
+# Set gSetTxChainmask1x1=2 or gSetRxChainmask1x1=2 to select chain1.
+gSetTxChainmask1x1=1
+gSetRxChainmask1x1=1
+
+# Scan Timing Parameters
+# gPassiveMaxChannelTime=110
+# gPassiveMinChannelTime=60
+gActiveMaxChannelTime=40
+gActiveMinChannelTime=20
+
+#If set to 0, MCC is not allowed.
+gEnableMCCMode=1
+
+# MCC to SCC Switch mode: 0-Disable 1-Enable 2-Force SCC if same band
+gWlanMccToSccSwitchMode = 0
+
+# 1=enable STBC; 0=disable STBC
+gEnableRXSTBC=1
+
+# 1=enable tx STBC; 0=disable
+gEnableTXSTBC=1
+
+# 1=enable rx LDPC; 0=disable
+gEnableRXLDPC=1
+
+#Enable/Disable Tx beamforming
+gTxBFEnable=1
+
+# Enable Tx beamforming in VHT20MHz
+# Valid values are 0,1. If commented out, the default value is 0.
+# 0=disable, 1=enable
+gEnableTxBFin20MHz=1
+
+#Enable/Disable SU Tx beamformer support.
+gEnableTxSUBeamformer=1
+
+#Enable Scan Results Aging based on timer
+#Timer value is in seconds
+#If Set to 0 it will not enable the feature
+gScanAgingTime=30
+
+#Enable Scan Results Aging based on number of scans
+gScanResultAgeCount=1
+
+#Enable Power saving mechanism Based on Android Framework
+#If set to 0 Driver internally control the Power saving mechanism
+#If set to 1 Android Framwrok control the Power saving mechanism
+isAndroidPsEn=0
+
+#Enable thermal mitigation
+gThermalMitigationEnable=0
+
+gEnableFastRoamInConcurrency=1
+
+#Maxium Channel time in msec
+gMaxMediumTime = 6000
+
+# 802.11K support
+gRrmEnable=1
+gRrmOperChanMax=8
+gRrmNonOperChanMax=8
+gRrmRandIntvl=100
+
+#Scan offload
+gEnableDirectedScanOffload=1
+
+#FlexConnect Power Factor
+#Default is set to 0 (disable)
+gFlexConnectPowerFactor=0
+
+#Disable split scan, the FW will take care of it
+gNumChanCombinedConc=60
+
+#Enable Power Save offload
+gEnablePowerSaveOffload=2
+
+#Enable firmware uart print
+gEnablefwprint=0
+
+#Enable firmware log
+gEnablefwlog=1
+
+#IPA config
+gIPAConfig=0
+gIPADescSize=800
+gIPAPreFilterEnable=1
+gIPARMEnable=1
+gIPAIPv6Enable=1
+
+IpaUcOffloadEnabled=0
+gIpaUcStaOffload=0
+
+#P2P Listen offload
+gEnableP2pListenOffload=1
+
+# Maximum Receive AMPDU size (VHT only. Valid values: 0->8k 1->16k 2->32k 3->64k 4->128k)
+gVhtAmpduLenExponent=7
+
+# Maximum MPDU length (VHT only. Valid values: 0->3895 octets, 1->7991 octets, 2->11454 octets)
+gVhtMpduLen=2
+
+# Maximum number of wow filters required
+#gMaxWoWFilters=22
+
+# WOW Enable/Disable.
+# 0 - Disable both magic pattern match and pattern byte match.
+# 1 - Enable magic pattern match on all interfaces.
+# 2 - Enable pattern byte match on all interfaces.
+# 3 - Enable both magic patter and pattern byte match on all interfaces.
+# Default value of gEnableWoW is 3.
+# gEnableWoW=0
+
+# Enable or Disable MCC Adaptive Scheduler at the FW
+# 1=Enable (default), 0=Disable
+gEnableMCCAdaptiveScheduler=1
+
+#Enable or Disable p2p device address administered
+isP2pDeviceAddrAdministrated=0
+
+#Enable Rx thread
+gEnableRxThread=1
+
+#Enable NAPI
+gEnableNAPI=0
+
+# Set Thermal Power limit
+TxPower2g=10
+TxPower5g=10
+
+# Remove Overlap channel restriction
+gEnableOverLapCh=0
+
+#Enable VHT on 2.4Ghz
+gEnableVhtFor24GHzBand=1
+
+#Enable or Disable 5G early beacon termination
+gEnable5gEBT=1
+
+#Maximum number of offload peers supported
+# gMaxOffloadPeers=2
+
+# controlling the following offload patterns
+# through ini parameter. Default value is 1
+# to disable set it to zero. ssdp = 0
+# Setup multicast pattern for mDNS 224.0.0.251,
+# SSDP 239.255.255.250 and LLMNR 224.0.0.252
+ssdp = 0
+
+#Enable Memory Deep Sleep
+gEnableMemDeepSleep=1
+
+# Bus bandwidth threshold values in terms of number of packets
+gBusBandwidthHighThreshold=2000
+gBusBandwidthMediumThreshold=500
+gBusBandwidthLowThreshold=150
+
+# Bus bandwidth compute timeout value in ms
+gBusBandwidthComputeInterval=100
+
+# Regulatory Setting; 0=STRICT; 1=CUSTOM
+gRegulatoryChangeCountry=1
+# RA filtering rate limit param, the current value would not
+# help if the lifetime in RA is less than 3*60=3min. Then
+# we need to change it, though it is uncommon.
+# gRAFilterEnable=0
+gRArateLimitInterval=600
+
+# Maximum number of concurrent connections
+gMaxConcurrentActiveSessions=2
+
+# Disable/Enable GreenAP
+# 0 to disable, 1 to enable, default: 1
+gEnableGreenAp=1
+
+# Radar PRI multiplier
+gDFSradarMappingPriMultiplier=4
+
+gPNOScanSupport=1
+
+# Enable/Disable RX full reorder offload
+gReorderOffloadSupported=1
+
+#Enable/Disable LPASS support
+# 0 to disable, 1 to enable
+gEnableLpassSupport=0
+
+# Whether userspace country code setting shld have priority
+gCountryCodePriority=1
+
+# Enable(1)/Disable(0) SIFS burst
+gEnableSifsBurst=1
+
+# Enable or Disable Multi-user MIMO
+# 1=Enable (default), 0=Disable
+gEnableMuBformee=1
+
+# Enable/Disable channel avoidance for SAP in SCC scenario
+# 0 - disable
+# 1 - enable
+gSapSccChanAvoidance=0
+
+# Inactivity time (in ms) to end TX Service Period while in IBSS power save mode
+gIbssTxSpEndInactivityTime=10
+
+# Enable/Disable Roaming Offload Support (a.k.a Key Management Offload)
+# 0 to disable, 1 to enable
+gRoamOffloadEnabled=0
+
+# Enable support for TDLS
+#  0 - disable
+#  1 - enable
+gEnableTDLSSupport=1
+
+# Enable support for Implicit Trigger of TDLS. That is, wlan driver shall
+# initiate TDLS Discovery towards a peer whenever setup criteria (throughput
+# and RSSI) is met and then will initiate teardown when teardown criteria
+# (idle packet count and RSSI) is met.
+#  0 - disable
+#  1 - enable
+gEnableTDLSImplicitTrigger=1
+
+# Enable TDLS External Control. That is, user space application has to
+# first configure a peer MAC in wlan driver towards which TDLS is desired.
+# Device will establish TDLS only towards those configured peers whenever
+# TDLS criteria (throughput and RSSI threshold) is met and teardown TDLS
+# when teardown criteria (idle packet count and RSSI) is met. However,
+# device will accept TDLS connection if it is initiated from any other peer,
+# even if that peer is not configured.
+#  0 - disable
+#  1 - enable
+# For TDLS External Control, Implicit Trigger must also be enabled.
+gTDLSExternalControl=1
+
+# Enable support for TDLS off-channel operation
+#  0 - disable
+#  1 - enable
+# TDLS off-channel operation will be invoked when there is only one
+# TDLS connection.
+gEnableTDLSOffChannel=1
+
+# Enable or Disable Random MAC (Spoofing)
+# 1=Enable, 0=Disable (default)
+gEnableMacAddrSpoof=0
+
+END
+
+# Note: Configuration parser would not read anything past the END marker
+

+ 60 - 0
core/bmi/inc/bmi.h

@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/* ================================================================ */
+/* BMI declarations and prototypes */
+/* */
+/* ================================================================= */
+
+#ifndef _BMI_H_
+#define _BMI_H_
+#include "bmi_msg.h"
+#include "cdf_trace.h"
+#include "ol_if_athvar.h"
+#include "hif.h"
+
+#ifdef HIF_PCI
+void bmi_cleanup(struct ol_softc *scn);
+CDF_STATUS bmi_done(struct ol_softc *scn);
+CDF_STATUS bmi_download_firmware(struct ol_softc *scn);
+#else
+static inline void bmi_cleanup(struct ol_softc *scn)
+{
+	return;
+}
+
+static inline CDF_STATUS bmi_done(struct ol_softc *scn)
+{
+	return CDF_STATUS_SUCCESS;
+}
+
+static inline CDF_STATUS bmi_download_firmware(struct ol_softc *scn)
+{
+	return CDF_STATUS_SUCCESS;
+}
+#endif
+#endif /* _BMI_H_ */

+ 64 - 0
core/bmi/inc/ol_fw.h

@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_FW_H_
+#define _OL_FW_H_
+
+#ifdef QCA_WIFI_FTM
+#include "cdf_types.h"
+#endif
+#include "hif.h"
+
+#define AR6004_VERSION_REV1_3        0x31c8088a
+
+#define AR9888_REV2_VERSION          0x4100016c
+#define AR6320_REV1_VERSION          0x5000000
+#define AR6320_REV1_1_VERSION        0x5000001
+#define AR6320_REV1_VERSION_1        AR6320_REV1_1_VERSION
+#define AR6320_REV1_3_VERSION        0x5000003
+#define AR6320_REV2_VERSION          AR6320_REV1_1_VERSION
+#define AR6320_REV2_1_VERSION        0x5010000
+#define AR6320_REV3_VERSION          0x5020000
+#define AR6320_REV3_2_VERSION        0x5030000
+#define AR6320_REV4_VERSION          AR6320_REV2_1_VERSION
+#define AR6320_DEV_VERSION           0x1000000
+
+#ifdef HIF_PCI
+void ol_target_failure(void *instance, CDF_STATUS status);
+uint8_t ol_get_number_of_peers_supported(struct ol_softc *scn);
+#else
+static inline void ol_target_failure(void *instance, CDF_STATUS status)
+{
+	return;
+}
+
+static inline uint8_t ol_get_number_of_peers_supported(struct ol_softc *scn)
+{
+	return 1;
+}
+#endif
+#endif /* _OL_FW_H_ */

+ 87 - 0
core/bmi/inc/ol_if_athvar.h

@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Defintions for the Atheros Wireless LAN controller driver.
+ */
+#ifndef _DEV_OL_ATH_ATHVAR_H
+#define _DEV_OL_ATH_ATHVAR_H
+
+#include <osapi_linux.h>
+#include "cdf_types.h"
+#include "cdf_lock.h"
+#include "wmi_unified_api.h"
+#include "htc_api.h"
+#include "bmi_msg.h"
+#include "ol_txrx_api.h"
+#include "ol_txrx_ctrl_api.h"
+#include "ol_txrx_osif_api.h"
+#include "ol_params.h"
+#include <wdi_event_api.h>
+
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+
+#include "ol_ctrl_addba_api.h"
+typedef void *hif_handle_t;
+
+struct ol_version {
+	uint32_t host_ver;
+	uint32_t target_ver;
+	uint32_t wlan_ver;
+	uint32_t wlan_ver_1;
+	uint32_t abi_ver;
+};
+
+typedef enum _ol_target_status {
+	OL_TRGET_STATUS_CONNECTED = 0,  /* target connected */
+	OL_TRGET_STATUS_RESET,  /* target got reset */
+	OL_TRGET_STATUS_EJECT,  /* target got ejected */
+	OL_TRGET_STATUS_SUSPEND /*target got suspend */
+} ol_target_status;
+
+enum ol_ath_tx_ecodes {
+	TX_IN_PKT_INCR = 0,
+	TX_OUT_HDR_COMPL,
+	TX_OUT_PKT_COMPL,
+	PKT_ENCAP_FAIL,
+	TX_PKT_BAD,
+	RX_RCV_MSG_RX_IND,
+	RX_RCV_MSG_PEER_MAP,
+	RX_RCV_MSG_TYPE_TEST
+};
+
+/*
+ * structure to hold the packet error count for CE and hif layer
+ */
+struct ol_ath_stats {
+	int hif_pipe_no_resrc_count;
+	int ce_ring_delta_fail_count;
+};
+
+#endif /* _DEV_OL_ATH_ATHVAR_H  */

+ 470 - 0
core/bmi/src/bmi.c

@@ -0,0 +1,470 @@
+/*
+ * copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include "i_bmi.h"
+
+/* APIs visible to the driver */
+
+/* BMI_1 refers QCA6174 target; the ADDR is AXI addr */
+#define BMI_1_TEST_ADDR	(0xa0000)
+/* BMI_2 ; */
+#define BMI_2_TEST_ADDR	(0x6E0000)
+/* Enable BMI_TEST COMMANDs; The Value 0x09 is randomly choosen */
+#define BMI_TEST_ENABLE (0x09)
+
+static CDF_STATUS
+bmi_command_test(uint32_t command, uint32_t address, uint8_t *data,
+				uint32_t length, struct ol_softc *scn)
+{
+	switch (command) {
+	case BMI_NO_COMMAND:
+		return bmi_no_command(scn);
+	case BMI_WRITE_MEMORY:
+		return bmi_write_memory(address, data, length, scn);
+	case BMI_READ_MEMORY:
+		return bmi_read_memory(address, data, length, scn);
+	case BMI_EXECUTE:
+		return bmi_execute(address, (uint32_t *)data, scn);
+	default:
+		break;
+	}
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS bmi_init(struct ol_softc *scn)
+{
+	if (!scn) {
+		BMI_ERR("Invalid scn Context");
+		bmi_assert(0);
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+	scn->bmi_done = false;
+
+	if (!scn->bmi_cmd_buff) {
+		scn->bmi_cmd_buff = cdf_os_mem_alloc_consistent(scn->cdf_dev,
+					MAX_BMI_CMDBUF_SZ, &scn->bmi_cmd_da, 0);
+		if (!scn->bmi_cmd_buff) {
+			BMI_ERR("No Memory for BMI Command");
+			return CDF_STATUS_E_NOMEM;
+		}
+	}
+
+	if (!scn->bmi_rsp_buff) {
+		scn->bmi_rsp_buff = cdf_os_mem_alloc_consistent(scn->cdf_dev,
+					MAX_BMI_CMDBUF_SZ, &scn->bmi_rsp_da, 0);
+		if (!scn->bmi_rsp_buff) {
+			BMI_ERR("No Memory for BMI Response");
+			goto end;
+		}
+	}
+	return CDF_STATUS_SUCCESS;
+end:
+	cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
+				 scn->bmi_cmd_buff, scn->bmi_cmd_da, 0);
+	scn->bmi_cmd_buff = NULL;
+	return CDF_STATUS_E_NOMEM;
+}
+
+void bmi_cleanup(struct ol_softc *scn)
+{
+	if (scn->bmi_cmd_buff) {
+		cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
+				    scn->bmi_cmd_buff, scn->bmi_cmd_da, 0);
+		scn->bmi_cmd_buff = NULL;
+		scn->bmi_cmd_da = 0;
+	}
+
+	if (scn->bmi_rsp_buff) {
+		cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
+				    scn->bmi_rsp_buff, scn->bmi_rsp_da, 0);
+		scn->bmi_rsp_buff = NULL;
+		scn->bmi_rsp_da = 0;
+	}
+}
+
+
+CDF_STATUS bmi_done(struct ol_softc *scn)
+{
+	CDF_STATUS status = CDF_STATUS_SUCCESS;
+	hif_claim_device(scn, scn);
+
+	if (IHELIUM_NO_BMI)
+		return status;
+
+	status = bmi_done_local(scn);
+
+	if (status != CDF_STATUS_SUCCESS)
+		BMI_ERR("BMI_DONE Failed status:%d", status);
+	return status;
+}
+
+CDF_STATUS
+bmi_get_target_info(struct bmi_target_info *targ_info,
+						struct ol_softc *scn)
+{
+	int status = 0;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+	uint32_t cid, length;
+
+	if (scn->bmi_done) {
+		BMI_ERR("BMI Phase is Already Done");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!bmi_cmd_buff || !bmi_rsp_buff) {
+		BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+	cid = BMI_GET_TARGET_INFO;
+
+	cdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
+	length = sizeof(struct bmi_target_info);
+
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, sizeof(cid),
+					(uint8_t *)bmi_rsp_buff, &length,
+					BMI_EXCHANGE_TIMEOUT_MS);
+	if (status) {
+		BMI_ERR("Failed to target info: status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	cdf_mem_copy(targ_info, bmi_rsp_buff, length);
+	return CDF_STATUS_SUCCESS;
+}
+
+#ifdef FEATURE_BMI_2
+static inline uint32_t bmi_get_test_addr(void)
+{
+	return BMI_2_TEST_ADDR;
+}
+#else
+static inline uint32_t bmi_get_test_addr(void)
+{
+	return BMI_1_TEST_ADDR;
+}
+#endif
+
+CDF_STATUS bmi_download_firmware(struct ol_softc *scn)
+{
+	uint8_t data[10], out[10];
+	uint32_t address;
+	int32_t ret;
+
+	if (IHELIUM_NO_BMI)
+		return CDF_STATUS_SUCCESS; /* no BMI for Q6 bring up */
+
+	if (!scn) {
+		BMI_ERR("Invalid scn context");
+		bmi_assert(0);
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+#ifdef CONFIG_CNSS
+	if (BMI_TEST_ENABLE == cnss_get_bmi_setup()) {
+		ret = snprintf(data, 10, "ABCDEFGHI");
+		BMI_DBG("ret:%d writing data:%s\n", ret, data);
+		address = bmi_get_test_addr();
+
+		if (bmi_init(scn) != CDF_STATUS_SUCCESS) {
+			BMI_WARN("BMI_INIT Failed; No Memory!");
+			goto end;
+		}
+		bmi_command_test(BMI_NO_COMMAND, address, data, 9, scn);
+		bmi_command_test(BMI_WRITE_MEMORY, address, data, 9, scn);
+		bmi_command_test(BMI_READ_MEMORY, address, out, 9, scn);
+		BMI_DBG("Output:%s", out);
+	}
+#endif
+end:
+	return bmi_firmware_download(scn);
+}
+
+CDF_STATUS
+bmi_read_soc_register(uint32_t address, uint32_t *param, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t offset, param_len;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+
+	bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
+	cdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
+	cdf_mem_set(bmi_rsp_buff, 0, sizeof(cid) + sizeof(address));
+
+	if (scn->bmi_done) {
+		BMI_DBG("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	BMI_DBG("BMI Read SOC Register:device: 0x%p, address: 0x%x",
+			 scn, address);
+
+	cid = BMI_READ_SOC_REGISTER;
+
+	offset = 0;
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+	offset += sizeof(cid);
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
+	offset += sizeof(address);
+	param_len = sizeof(*param);
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+			bmi_rsp_buff, &param_len, BMI_EXCHANGE_TIMEOUT_MS);
+	if (status) {
+		BMI_DBG("Unable to read from the device; status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+	cdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
+
+	BMI_DBG("BMI Read SOC Register: Exit value: %d", *param);
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_write_soc_register(uint32_t address, uint32_t param, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t offset;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
+	bmi_assert(BMI_COMMAND_FITS(size));
+	cdf_mem_set(bmi_cmd_buff, 0, size);
+
+	if (scn->bmi_done) {
+		BMI_DBG("Command disallowed");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	BMI_DBG("SOC Register Write:device:0x%p, addr:0x%x, param:%d",
+						scn, address, param);
+
+	cid = BMI_WRITE_SOC_REGISTER;
+
+	offset = 0;
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+	offset += sizeof(cid);
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
+	offset += sizeof(address);
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &param, sizeof(param));
+	offset += sizeof(param);
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+						NULL, NULL, 0);
+	if (status) {
+		BMI_ERR("Unable to write to the device: status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	BMI_DBG("BMI Read SOC Register: Exit");
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmilz_data(uint8_t *buffer, uint32_t length, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t offset;
+	uint32_t remaining, txlen;
+	const uint32_t header = sizeof(cid) + sizeof(length);
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+
+	bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
+	cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	BMI_DBG("BMI Send LZ Data: device: 0x%p, length: %d",
+						scn, length);
+
+	cid = BMI_LZ_DATA;
+
+	remaining = length;
+	while (remaining) {
+		txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
+			remaining : (BMI_DATASZ_MAX - header);
+		offset = 0;
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+		offset += sizeof(cid);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
+		offset += sizeof(txlen);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]),
+			&buffer[length - remaining], txlen);
+		offset += txlen;
+		status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+					NULL, NULL, 0);
+		if (status) {
+			BMI_ERR("Failed to write to the device: status:%d",
+								status);
+			return CDF_STATUS_E_FAILURE;
+		}
+		remaining -= txlen;
+	}
+
+	BMI_DBG("BMI LZ Data: Exit");
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_sign_stream_start(uint32_t address,
+		      uint8_t *buffer, uint32_t length, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t offset;
+	const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
+	uint8_t aligned_buf[BMI_DATASZ_MAX + 4];
+	uint8_t *src;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint32_t remaining, txlen;
+
+	bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
+	cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	BMI_ERR("Sign Stream start:device:0x%p, addr:0x%x, length:%d",
+						scn, address, length);
+
+	cid = BMI_SIGN_STREAM_START;
+	remaining = length;
+	while (remaining) {
+		src = &buffer[length - remaining];
+		if (remaining < (BMI_DATASZ_MAX - header)) {
+			if (remaining & 0x3) {
+				remaining = remaining + (4 - (remaining & 0x3));
+				memcpy(aligned_buf, src, remaining);
+				src = aligned_buf;
+			}
+			txlen = remaining;
+		} else {
+			txlen = (BMI_DATASZ_MAX - header);
+		}
+
+		offset = 0;
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+		offset += sizeof(cid);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
+						sizeof(address));
+		offset += sizeof(offset);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
+		offset += sizeof(txlen);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
+		offset += txlen;
+		status = hif_exchange_bmi_msg(scn,
+					bmi_cmd_buff, offset,
+					NULL, NULL, BMI_EXCHANGE_TIMEOUT_MS);
+		if (status) {
+			BMI_ERR("Unable to write to the device: status:%d",
+								status);
+			return CDF_STATUS_E_FAILURE;
+		}
+		remaining -= txlen;
+	}
+	BMI_DBG("BMI SIGN Stream Start: Exit");
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmilz_stream_start(uint32_t address, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t offset;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+
+	bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
+	cdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
+
+	if (scn->bmi_done) {
+		BMI_DBG("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+	BMI_DBG("BMI LZ Stream Start: (device: 0x%p, address: 0x%x)",
+						scn, address);
+
+	cid = BMI_LZ_STREAM_START;
+	offset = 0;
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+	offset += sizeof(cid);
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
+	offset += sizeof(address);
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+						NULL, NULL, 0);
+	if (status) {
+		BMI_ERR("Unable to Start LZ Stream to the device status:%d",
+								status);
+		return CDF_STATUS_E_FAILURE;
+	}
+	BMI_DBG("BMI LZ Stream: Exit");
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_fast_download(uint32_t address, uint8_t *buffer,
+		  uint32_t length, struct ol_softc *scn)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t last_word = 0;
+	uint32_t last_word_offset = length & ~0x3;
+	uint32_t unaligned_bytes = length & 0x3;
+
+	status = bmilz_stream_start(address, scn);
+	if (status != CDF_STATUS_SUCCESS)
+		goto end;
+
+	/* copy the last word into a zero padded buffer */
+	if (unaligned_bytes)
+		cdf_mem_copy(&last_word, &buffer[last_word_offset],
+						unaligned_bytes);
+
+	status = bmilz_data(buffer, last_word_offset, scn);
+
+	if (status != CDF_STATUS_SUCCESS)
+		goto end;
+
+	if (unaligned_bytes)
+		status = bmilz_data((uint8_t *) &last_word, 4, scn);
+
+	if (status != CDF_STATUS_SUCCESS)
+		/*
+		 * Close compressed stream and open a new (fake) one.
+		 * This serves mainly to flush Target caches.
+		 */
+		status = bmilz_stream_start(0x00, scn);
+end:
+	return status;
+}
+

+ 321 - 0
core/bmi/src/bmi_1.c

@@ -0,0 +1,321 @@
+/*
+ * copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include "i_bmi.h"
+
+/* APIs visible to the driver */
+
+CDF_STATUS
+bmi_read_memory(uint32_t address,
+		uint8_t *buffer, uint32_t length, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t offset;
+	uint32_t remaining, rxlen;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+	uint32_t align;
+
+	if (scn->bmi_done) {
+		BMI_DBG("command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!scn->bmi_cmd_buff || !scn->bmi_rsp_buff) {
+		BMI_ERR("BMI Initialization hasn't done");
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + sizeof(cid) +
+			sizeof(address) + sizeof(length)));
+	cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + sizeof(cid) +
+			sizeof(address) + sizeof(length));
+	cdf_mem_set(bmi_rsp_buff, 0, BMI_DATASZ_MAX + sizeof(cid) +
+			sizeof(address) + sizeof(length));
+
+	BMI_DBG("BMI Read: device: 0x%p, address: 0x%x, length: %d",
+						scn, address, length);
+
+	cid = BMI_READ_MEMORY;
+	align = 0;
+	remaining = length;
+
+	while (remaining) {
+		rxlen = (remaining < BMI_DATASZ_MAX) ?
+				remaining : BMI_DATASZ_MAX;
+		offset = 0;
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+		offset += sizeof(cid);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
+						sizeof(address));
+		offset += sizeof(address);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &rxlen, sizeof(rxlen));
+		offset += sizeof(length);
+
+		/* note we reuse the same buffer to receive on */
+		status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+				bmi_rsp_buff, &rxlen, BMI_EXCHANGE_TIMEOUT_MS);
+		if (status) {
+			BMI_ERR("Unable to read from the device");
+			return CDF_STATUS_E_FAILURE;
+		}
+		if (remaining == rxlen) {
+			cdf_mem_copy(&buffer[length - remaining + align],
+					bmi_rsp_buff, rxlen - align);
+			/* last align bytes are invalid */
+		} else {
+			cdf_mem_copy(&buffer[length - remaining + align],
+				 bmi_rsp_buff, rxlen);
+		}
+		remaining -= rxlen;
+		address += rxlen;
+	}
+
+	BMI_DBG("BMI Read Memory: Exit");
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_write_memory(uint32_t address,
+		 uint8_t *buffer, uint32_t length, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t offset;
+	uint32_t remaining, txlen;
+	const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
+	uint8_t aligned_buffer[BMI_DATASZ_MAX];
+	uint8_t *src;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!bmi_cmd_buff) {
+		BMI_ERR("BMI initialization hasn't done");
+		return CDF_STATUS_E_PERM;
+	}
+
+	bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
+	cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
+
+	BMI_DBG("BMI Write Memory:device: 0x%p, address: 0x%x, length: %d",
+						scn, address, length);
+
+	cid = BMI_WRITE_MEMORY;
+
+	remaining = length;
+	while (remaining) {
+		src = &buffer[length - remaining];
+		if (remaining < (BMI_DATASZ_MAX - header)) {
+			if (remaining & 3) {
+				/* align it with 4 bytes */
+				remaining = remaining + (4 - (remaining & 3));
+				memcpy(aligned_buffer, src, remaining);
+				src = aligned_buffer;
+			}
+			txlen = remaining;
+		} else {
+			txlen = (BMI_DATASZ_MAX - header);
+		}
+		offset = 0;
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+		offset += sizeof(cid);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
+						sizeof(address));
+		offset += sizeof(address);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
+		offset += sizeof(txlen);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
+		offset += txlen;
+		status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+					NULL, NULL, BMI_EXCHANGE_TIMEOUT_MS);
+		if (status) {
+			BMI_ERR("Unable to write to the device; status:%d",
+								status);
+			return CDF_STATUS_E_FAILURE;
+		}
+		remaining -= txlen;
+		address += txlen;
+	}
+
+	BMI_DBG("BMI Write Memory: Exit");
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_execute(uint32_t address, A_UINT32 *param, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t offset;
+	uint32_t param_len;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+	uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!bmi_cmd_buff || !bmi_rsp_buff) {
+		BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	bmi_assert(BMI_COMMAND_FITS(size));
+	cdf_mem_set(bmi_cmd_buff, 0, size);
+	cdf_mem_set(bmi_rsp_buff, 0, size);
+
+
+	BMI_DBG("BMI Execute: device: 0x%p, address: 0x%x, param: %d",
+						scn, address, *param);
+
+	cid = BMI_EXECUTE;
+
+	offset = 0;
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+	offset += sizeof(cid);
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
+	offset += sizeof(address);
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), param, sizeof(*param));
+	offset += sizeof(*param);
+	param_len = sizeof(*param);
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+					bmi_rsp_buff, &param_len, 0);
+	if (status) {
+		BMI_ERR("Unable to read from the device status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	cdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
+
+	BMI_DBG("BMI Execute: Exit (param: %d)", *param);
+	return CDF_STATUS_SUCCESS;
+}
+
+inline CDF_STATUS
+bmi_no_command(struct ol_softc *scn)
+{
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_firmware_download(struct ol_softc *scn)
+{
+	CDF_STATUS status;
+	struct bmi_target_info targ_info;
+	cdf_mem_zero(&targ_info, sizeof(targ_info));
+
+	/* Initialize BMI */
+	status = bmi_init(scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("BMI Initialization Failed err:%d", status);
+		return status;
+	}
+
+	/* Get target information */
+	status = bmi_get_target_info(&targ_info, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("BMI Target Info get failed: status:%d", status);
+		return status;
+	}
+
+	scn->target_type = targ_info.target_type;
+	scn->target_version = targ_info.target_ver;
+
+	/* Configure target */
+	status = ol_configure_target(scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("BMI Configure Target Failed status:%d", status);
+		return status;
+	}
+
+	status = ol_download_firmware(scn);
+	if (status != CDF_STATUS_SUCCESS)
+		BMI_ERR("BMI Download Firmware Failed Status:%d", status);
+
+	return status;
+}
+
+CDF_STATUS bmi_done_local(struct ol_softc *scn)
+{
+	int status;
+	uint32_t cid;
+
+	if (!scn) {
+		BMI_ERR("Invalid scn context");
+		bmi_assert(0);
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	if (scn->bmi_done) {
+		BMI_DBG("bmi_done_local skipped");
+		return CDF_STATUS_E_PERM;
+	}
+
+	BMI_DBG("BMI Done: Enter (device: 0x%p)", scn);
+
+	scn->bmi_done = true;
+	cid = BMI_DONE;
+
+	if (!scn->bmi_cmd_buff) {
+		BMI_ERR("Invalid scn BMICmdBuff");
+		bmi_assert(0);
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	cdf_mem_copy(scn->bmi_cmd_buff, &cid, sizeof(cid));
+
+	status = hif_exchange_bmi_msg(scn, scn->bmi_cmd_buff,
+				sizeof(cid), NULL, NULL, 0);
+	if (status) {
+		BMI_ERR("Failed to write to the device; status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if (scn->bmi_cmd_buff) {
+		cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
+				    scn->bmi_cmd_buff, scn->bmi_cmd_da, 0);
+		scn->bmi_cmd_buff = NULL;
+		scn->bmi_cmd_da = 0;
+	}
+
+	if (scn->bmi_rsp_buff) {
+		cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
+				    scn->bmi_rsp_buff, scn->bmi_rsp_da, 0);
+		scn->bmi_rsp_buff = NULL;
+		scn->bmi_rsp_da = 0;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}

+ 452 - 0
core/bmi/src/bmi_2.c

@@ -0,0 +1,452 @@
+/*
+ * copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+#include "i_bmi.h"
+/* This need to defined in firmware interface files.
+ * Defining here to address compilation issues.
+ * Will be deleted once firmware interface files for
+ * target are merged
+ */
+#define BMI_LOAD_IMAGE              18
+
+CDF_STATUS
+bmi_no_command(struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t length;
+	uint8_t ret = 0;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed: BMI DONE ALREADY");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!bmi_cmd_buff || !bmi_rsp_buff) {
+		BMI_ERR("No Memory Allocated for BMI CMD/RSP Buffer");
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+	cid = BMI_NO_COMMAND;
+
+	cdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
+	length = sizeof(ret);
+
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, sizeof(cid),
+			bmi_rsp_buff, &length, BMI_EXCHANGE_TIMEOUT_MS);
+
+	if (status) {
+		BMI_ERR("Failed to write bmi no command status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	cdf_mem_copy(&ret, bmi_rsp_buff, length);
+	if (ret != 0) {
+		BMI_ERR("bmi no command response error ret 0x%x", ret);
+		return CDF_STATUS_E_FAILURE;
+	}
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_done_local(struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t length;
+	uint8_t ret = 0;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!bmi_cmd_buff || !bmi_rsp_buff) {
+		BMI_ERR("No Memory Allocated for BMI CMD/RSP Buffer");
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+	cid = BMI_DONE;
+
+	cdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
+	length = sizeof(ret);
+
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, sizeof(cid),
+		   bmi_rsp_buff, &length, BMI_EXCHANGE_TIMEOUT_MS);
+
+	if (status) {
+		BMI_ERR("Failed to close BMI on target status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+	cdf_mem_copy(&ret, bmi_rsp_buff, length);
+
+	if (ret != 0) {
+		BMI_ERR("BMI DONE response failed:%d", ret);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if (scn->bmi_cmd_buff) {
+		cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
+				    scn->bmi_cmd_buff, scn->bmi_cmd_da, 0);
+		scn->bmi_cmd_buff = NULL;
+		scn->bmi_cmd_da = 0;
+	}
+
+	if (scn->bmi_rsp_buff) {
+		cdf_os_mem_free_consistent(scn->cdf_dev, MAX_BMI_CMDBUF_SZ,
+				    scn->bmi_rsp_buff, scn->bmi_rsp_da, 0);
+		scn->bmi_rsp_buff = NULL;
+		scn->bmi_rsp_da = 0;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_write_memory(uint32_t address,
+		uint8_t *buffer,
+		uint32_t length,
+		struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t rsp_len;
+	uint8_t ret = 0;
+	uint32_t offset;
+	uint32_t remaining, txlen;
+	const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
+	uint8_t aligned_buffer[BMI_DATASZ_MAX];
+	uint8_t *src;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!bmi_cmd_buff || !bmi_rsp_buff) {
+		BMI_ERR("BMI Initialization is not happened");
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
+	cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
+
+	cid = BMI_WRITE_MEMORY;
+	rsp_len = sizeof(ret);
+
+	remaining = length;
+	while (remaining) {
+		src = &buffer[length - remaining];
+		if (remaining < (BMI_DATASZ_MAX - header)) {
+			if (remaining & 3) {
+				/* align it with 4 bytes */
+				remaining = remaining + (4 - (remaining & 3));
+				memcpy(aligned_buffer, src, remaining);
+				src = aligned_buffer;
+			}
+			txlen = remaining;
+		} else {
+			txlen = (BMI_DATASZ_MAX - header);
+		}
+		offset = 0;
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+		offset += sizeof(cid);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
+						sizeof(address));
+		offset += sizeof(address);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
+		offset += sizeof(txlen);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
+		offset += txlen;
+		status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+			bmi_rsp_buff, &rsp_len, BMI_EXCHANGE_TIMEOUT_MS);
+		if (status) {
+			BMI_ERR("BMI Write Memory Failed status:%d", status);
+			return CDF_STATUS_E_FAILURE;
+		}
+		cdf_mem_copy(&ret, bmi_rsp_buff, rsp_len);
+		if (ret != 0) {
+			BMI_ERR("BMI Write memory response fail: %x", ret);
+			return CDF_STATUS_E_FAILURE;
+		}
+		remaining -= txlen; address += txlen;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_read_memory(uint32_t address, uint8_t *buffer,
+		uint32_t length, struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint8_t ret = 0;
+	uint32_t offset;
+	uint32_t remaining, rxlen, rsp_len, total_len;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	/* note we reuse the same buffer to receive on */
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+	uint32_t size = sizeof(cid) + sizeof(address) + sizeof(length);
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+	if (!bmi_cmd_buff || !bmi_rsp_buff) {
+		BMI_ERR("BMI Initialization is not done");
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + size));
+	cdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + size);
+	cdf_mem_set(bmi_rsp_buff, 0, BMI_DATASZ_MAX + size);
+
+	cid = BMI_READ_MEMORY;
+	rsp_len = sizeof(ret);
+	remaining = length;
+
+	while (remaining) {
+		rxlen = (remaining < BMI_DATASZ_MAX - rsp_len) ? remaining :
+						(BMI_DATASZ_MAX - rsp_len);
+		offset = 0;
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+		offset += sizeof(cid);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
+						sizeof(address));
+		offset += sizeof(address);
+		cdf_mem_copy(&(bmi_cmd_buff[offset]), &rxlen, sizeof(rxlen));
+		offset += sizeof(length);
+
+		total_len = rxlen + rsp_len;
+
+		status = hif_exchange_bmi_msg(scn,
+					   bmi_cmd_buff,
+					   offset,
+					   bmi_rsp_buff,
+					   &total_len,
+					   BMI_EXCHANGE_TIMEOUT_MS);
+
+		if (status) {
+			BMI_ERR("BMI Read memory failed status:%d", status);
+			return CDF_STATUS_E_FAILURE;
+		}
+
+		cdf_mem_copy(&ret, bmi_rsp_buff, rsp_len);
+
+		if (ret != 0) {
+			BMI_ERR("bmi read memory response fail %x", ret);
+			return CDF_STATUS_E_FAILURE;
+		}
+
+		cdf_mem_copy(&buffer[length - remaining],
+				(uint8_t *)bmi_rsp_buff	+ rsp_len, rxlen);
+		remaining -= rxlen; address += rxlen;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS
+bmi_execute(uint32_t address, uint32_t *param,
+					struct ol_softc *scn)
+{
+	uint32_t cid;
+	int status;
+	uint32_t length;
+	uint8_t ret = 0;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!bmi_cmd_buff || !bmi_rsp_buff) {
+		BMI_ERR("No Memory Allocated for bmi buffers");
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	cid = BMI_EXECUTE;
+
+	cdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
+	length = sizeof(ret);
+
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, sizeof(cid),
+		   bmi_rsp_buff, &length, BMI_EXCHANGE_TIMEOUT_MS);
+
+	if (status) {
+		BMI_ERR("Failed to do BMI_EXECUTE status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	cdf_mem_copy(&ret, bmi_rsp_buff, length);
+
+	if (ret != 0) {
+		BMI_ERR("%s: ret 0x%x", __func__, ret);
+		return CDF_STATUS_E_FAILURE;
+	}
+	return CDF_STATUS_SUCCESS;
+}
+
+static CDF_STATUS
+bmi_load_image(dma_addr_t address,
+		uint32_t size, struct ol_softc *scn)
+{
+	uint32_t cid;
+	CDF_STATUS status;
+	uint32_t offset;
+	uint32_t length;
+	uint8_t ret = 0;
+	uint8_t *bmi_cmd_buff = scn->bmi_cmd_buff;
+	uint8_t *bmi_rsp_buff = scn->bmi_rsp_buff;
+	uint32_t addr_h, addr_l;
+
+	if (scn->bmi_done) {
+		BMI_ERR("Command disallowed");
+		return CDF_STATUS_E_PERM;
+	}
+
+	if (!bmi_cmd_buff || !bmi_rsp_buff) {
+		BMI_ERR("No Memory Allocated for BMI CMD/RSP Buffer");
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
+	cdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
+
+
+	BMI_DBG("%s: Enter device: 0x%p, size %d", __func__, scn, size);
+
+	cid = BMI_LOAD_IMAGE;
+
+	offset = 0;
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
+	offset += sizeof(cid);
+	addr_l = address & 0xffffffff;
+	addr_h = 0x00;
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &addr_l, sizeof(addr_l));
+	offset += sizeof(addr_l);
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &addr_h, sizeof(addr_h));
+	offset += sizeof(addr_h);
+	cdf_mem_copy(&(bmi_cmd_buff[offset]), &size, sizeof(size));
+	offset += sizeof(size);
+	length = sizeof(ret);
+
+	status = hif_exchange_bmi_msg(scn, bmi_cmd_buff, offset,
+		   bmi_rsp_buff, &length, BMI_EXCHANGE_TIMEOUT_MS);
+
+	if (status) {
+		BMI_ERR("BMI Load Image Failed; status:%d", status);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	cdf_mem_copy(&ret, bmi_rsp_buff, length);
+	if (ret != 0) {
+		BMI_ERR("%s: ret 0x%x", __func__, ret);
+		return CDF_STATUS_E_FAILURE;
+	}
+	return CDF_STATUS_SUCCESS;
+}
+
+static CDF_STATUS bmi_enable(struct ol_softc *scn)
+{
+	struct bmi_target_info targ_info;
+	struct image_desc_info image_desc_info;
+	CDF_STATUS status;
+
+	if (!scn) {
+		BMI_ERR("Invalid scn context");
+		bmi_assert(0);
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	if (scn->bmi_cmd_buff == NULL || scn->bmi_rsp_buff == NULL) {
+		BMI_ERR("bmi_open failed!");
+		return CDF_STATUS_NOT_INITIALIZED;
+	}
+
+	status = bmi_get_target_info(&targ_info, scn);
+	if (status != CDF_STATUS_SUCCESS)
+			return status;
+
+	BMI_DBG("%s: target type 0x%x, target ver 0x%x", __func__,
+	       targ_info.target_type, targ_info.target_ver);
+	scn->target_type = targ_info.target_type;
+	scn->target_version = targ_info.target_ver;
+
+	if (cnss_get_fw_image(&image_desc_info) != 0) {
+		BMI_ERR("Failed to get fw image");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	status = bmi_load_image(image_desc_info.bdata_addr,
+				image_desc_info.bdata_size,
+				scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Load board data failed! status:%d", status);
+		return status;
+	}
+
+	status = bmi_load_image(image_desc_info.fw_addr,
+				image_desc_info.fw_size,
+				scn);
+	if (status != CDF_STATUS_SUCCESS)
+		BMI_ERR("Load fw image failed! status:%d", status);
+
+	return status;
+}
+
+CDF_STATUS bmi_firmware_download(struct ol_softc *scn)
+{
+	CDF_STATUS status;
+
+	if (IHELIUM_NO_BMI)
+		return CDF_STATUS_SUCCESS;
+
+	status = bmi_init(scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("BMI_INIT Failed status:%d", status);
+		goto end;
+	}
+
+	status = bmi_enable(scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("BMI_ENABLE failed status:%d\n", status);
+		goto err_bmi_enable;
+	}
+
+	return status;
+err_bmi_enable:
+	bmi_cleanup(scn);
+end:
+	return status;
+}

+ 607 - 0
core/bmi/src/i_ar6320v2_regtable.h

@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _AR6320V2_DBG_REGTABLE_H_
+#define _AR6320V2_DBG_REGTABLE_H_
+
+#include "regtable.h"
+
+#define AR6320_REV2_1_REG_SIZE 0x0007F820
+#define AR6320_REV3_REG_SIZE   0x0007F820
+
+#ifdef HIF_PCI
+/*
+ * Redefine the register list. To minimize the size of the array, the list must
+ * obey the below format. {start0, end0}, {start1, end1}, {start2, end2}.......
+ * The value below must obey to "start0 < end0 < start1 < end1 < start2 < ...",
+ * otherwise we may encouter error in the dump processing.
+ */
+
+static const tgt_reg_section ar6320v2_reg_table[] = {
+	{0x800, 0x810},
+	{0x820, 0x82C},
+	{0x830, 0x8F4},
+	{0x90C, 0x91C},
+	{0xA14, 0xA18},
+	{0xA84, 0xA94},
+	{0xAA8, 0xAD4},
+	{0xADC, 0xB40},
+	{0x1000, 0x10A4},
+	{0x10BC, 0x111C},
+	{0x1134, 0x1138},
+	{0x1144, 0x114C},
+	{0x1150, 0x115C},
+	{0x1160, 0x1178},
+	{0x1240, 0x1260},
+	{0x2000, 0x207C},
+	{0x3000, 0x3014},
+	{0x4000, 0x4014},
+	{0x5000, 0x5124},
+	{0x6000, 0x6040},
+	{0x6080, 0x60CC},
+	{0x6100, 0x611C},
+	{0x6140, 0x61D8},
+	{0x6200, 0x6238},
+	{0x6240, 0x628C},
+	{0x62C0, 0x62EC},
+	{0x6380, 0x63E8},
+	{0x6400, 0x6440},
+	{0x6480, 0x64CC},
+	{0x6500, 0x651C},
+	{0x6540, 0x6580},
+	{0x6600, 0x6638},
+	{0x6640, 0x668C},
+	{0x66C0, 0x66EC},
+	{0x6780, 0x67E8},
+	{0x7080, 0x708C},
+	{0x70C0, 0x70C8},
+	{0x7400, 0x741C},
+	{0x7440, 0x7454},
+	{0x7800, 0x7818},
+	{0x8000, 0x8004},
+	{0x8010, 0x8064},
+	{0x8080, 0x8084},
+	{0x80A0, 0x80A4},
+	{0x80C0, 0x80C4},
+	{0x80E0, 0x80F4},
+	{0x8100, 0x8104},
+	{0x8110, 0x812C},
+	{0x9000, 0x9004},
+	{0x9800, 0x982C},
+	{0x9830, 0x9838},
+	{0x9840, 0x986C},
+	{0x9870, 0x9898},
+	{0x9A00, 0x9C00},
+	{0xD580, 0xD59C},
+	{0xF000, 0xF0E0},
+	{0xF140, 0xF190},
+	{0xF250, 0xF25C},
+	{0xF260, 0xF268},
+	{0xF26C, 0xF2A8},
+	{0x10008, 0x1000C},
+	{0x10014, 0x10018},
+	{0x1001C, 0x10020},
+	{0x10024, 0x10028},
+	{0x10030, 0x10034},
+	{0x10040, 0x10054},
+	{0x10058, 0x1007C},
+	{0x10080, 0x100C4},
+	{0x100C8, 0x10114},
+	{0x1012C, 0x10130},
+	{0x10138, 0x10144},
+	{0x10200, 0x10220},
+	{0x10230, 0x10250},
+	{0x10260, 0x10280},
+	{0x10290, 0x102B0},
+	{0x102C0, 0x102DC},
+	{0x102E0, 0x102F4},
+	{0x102FC, 0x1037C},
+	{0x10380, 0x10390},
+	{0x10800, 0x10828},
+	{0x10840, 0x10844},
+	{0x10880, 0x10884},
+	{0x108C0, 0x108E8},
+	{0x10900, 0x10928},
+	{0x10940, 0x10944},
+	{0x10980, 0x10984},
+	{0x109C0, 0x109E8},
+	{0x10A00, 0x10A28},
+	{0x10A40, 0x10A50},
+	{0x11000, 0x11028},
+	{0x11030, 0x11034},
+	{0x11038, 0x11068},
+	{0x11070, 0x11074},
+	{0x11078, 0x110A8},
+	{0x110B0, 0x110B4},
+	{0x110B8, 0x110E8},
+	{0x110F0, 0x110F4},
+	{0x110F8, 0x11128},
+	{0x11138, 0x11144},
+	{0x11178, 0x11180},
+	{0x111B8, 0x111C0},
+	{0x111F8, 0x11200},
+	{0x11238, 0x1123C},
+	{0x11270, 0x11274},
+	{0x11278, 0x1127C},
+	{0x112B0, 0x112B4},
+	{0x112B8, 0x112BC},
+	{0x112F0, 0x112F4},
+	{0x112F8, 0x112FC},
+	{0x11338, 0x1133C},
+	{0x11378, 0x1137C},
+	{0x113B8, 0x113BC},
+	{0x113F8, 0x113FC},
+	{0x11438, 0x11440},
+	{0x11478, 0x11480},
+	{0x114B8, 0x114BC},
+	{0x114F8, 0x114FC},
+	{0x11538, 0x1153C},
+	{0x11578, 0x1157C},
+	{0x115B8, 0x115BC},
+	{0x115F8, 0x115FC},
+	{0x11638, 0x1163C},
+	{0x11678, 0x1167C},
+	{0x116B8, 0x116BC},
+	{0x116F8, 0x116FC},
+	{0x11738, 0x1173C},
+	{0x11778, 0x1177C},
+	{0x117B8, 0x117BC},
+	{0x117F8, 0x117FC},
+	{0x17000, 0x1701C},
+	{0x17020, 0x170AC},
+	{0x18000, 0x18050},
+	{0x18054, 0x18074},
+	{0x18080, 0x180D4},
+	{0x180DC, 0x18104},
+	{0x18108, 0x1813C},
+	{0x18144, 0x18148},
+	{0x18168, 0x18174},
+	{0x18178, 0x18180},
+	{0x181C8, 0x181E0},
+	{0x181E4, 0x181E8},
+	{0x181EC, 0x1820C},
+	{0x1825C, 0x18280},
+	{0x18284, 0x18290},
+	{0x18294, 0x182A0},
+	{0x18300, 0x18304},
+	{0x18314, 0x18320},
+	{0x18328, 0x18350},
+	{0x1835C, 0x1836C},
+	{0x18370, 0x18390},
+	{0x18398, 0x183AC},
+	{0x183BC, 0x183D8},
+	{0x183DC, 0x183F4},
+	{0x18400, 0x186F4},
+	{0x186F8, 0x1871C},
+	{0x18720, 0x18790},
+	{0x19800, 0x19830},
+	{0x19834, 0x19840},
+	{0x19880, 0x1989C},
+	{0x198A4, 0x198B0},
+	{0x198BC, 0x19900},
+	{0x19C00, 0x19C88},
+	{0x19D00, 0x19D20},
+	{0x19E00, 0x19E7C},
+	{0x19E80, 0x19E94},
+	{0x19E98, 0x19EAC},
+	{0x19EB0, 0x19EBC},
+	{0x19F70, 0x19F74},
+	{0x19F80, 0x19F8C},
+	{0x19FA0, 0x19FB4},
+	{0x19FC0, 0x19FD8},
+	{0x1A000, 0x1A200},
+	{0x1A204, 0x1A210},
+	{0x1A228, 0x1A22C},
+	{0x1A230, 0x1A248},
+	{0x1A250, 0x1A270},
+	{0x1A280, 0x1A290},
+	{0x1A2A0, 0x1A2A4},
+	{0x1A2C0, 0x1A2EC},
+	{0x1A300, 0x1A3BC},
+	{0x1A3F0, 0x1A3F4},
+	{0x1A3F8, 0x1A434},
+	{0x1A438, 0x1A444},
+	{0x1A448, 0x1A468},
+	{0x1A580, 0x1A58C},
+	{0x1A644, 0x1A654},
+	{0x1A670, 0x1A698},
+	{0x1A6AC, 0x1A6B0},
+	{0x1A6D0, 0x1A6D4},
+	{0x1A6EC, 0x1A70C},
+	{0x1A710, 0x1A738},
+	{0x1A7C0, 0x1A7D0},
+	{0x1A7D4, 0x1A7D8},
+	{0x1A7DC, 0x1A7E4},
+	{0x1A7F0, 0x1A7F8},
+	{0x1A888, 0x1A89C},
+	{0x1A8A8, 0x1A8AC},
+	{0x1A8C0, 0x1A8DC},
+	{0x1A8F0, 0x1A8FC},
+	{0x1AE04, 0x1AE08},
+	{0x1AE18, 0x1AE24},
+	{0x1AF80, 0x1AF8C},
+	{0x1AFA0, 0x1AFB4},
+	{0x1B000, 0x1B200},
+	{0x1B284, 0x1B288},
+	{0x1B2D0, 0x1B2D8},
+	{0x1B2DC, 0x1B2EC},
+	{0x1B300, 0x1B340},
+	{0x1B374, 0x1B378},
+	{0x1B380, 0x1B384},
+	{0x1B388, 0x1B38C},
+	{0x1B404, 0x1B408},
+	{0x1B420, 0x1B428},
+	{0x1B440, 0x1B444},
+	{0x1B448, 0x1B44C},
+	{0x1B450, 0x1B458},
+	{0x1B45C, 0x1B468},
+	{0x1B584, 0x1B58C},
+	{0x1B68C, 0x1B690},
+	{0x1B6AC, 0x1B6B0},
+	{0x1B7F0, 0x1B7F8},
+	{0x1C800, 0x1CC00},
+	{0x1CE00, 0x1CE04},
+	{0x1CF80, 0x1CF84},
+	{0x1D200, 0x1D800},
+	{0x1E000, 0x20014},
+	{0x20100, 0x20124},
+	{0x21400, 0x217A8},
+	{0x21800, 0x21BA8},
+	{0x21C00, 0x21FA8},
+	{0x22000, 0x223A8},
+	{0x22400, 0x227A8},
+	{0x22800, 0x22BA8},
+	{0x22C00, 0x22FA8},
+	{0x23000, 0x233A8},
+	{0x24000, 0x24034},
+
+	/*
+	 * EFUSE0,1,2 is disabled here
+	 * because it's state may be reset
+	 *
+	 * {0x24800, 0x24804},
+	 * {0x25000, 0x25004},
+	 * {0x25800, 0x25804},
+	 */
+
+	{0x26000, 0x26064},
+	{0x27000, 0x27024},
+	{0x34000, 0x3400C},
+	{0x34400, 0x3445C},
+	{0x34800, 0x3485C},
+	{0x34C00, 0x34C5C},
+	{0x35000, 0x3505C},
+	{0x35400, 0x3545C},
+	{0x35800, 0x3585C},
+	{0x35C00, 0x35C5C},
+	{0x36000, 0x3605C},
+	{0x38000, 0x38064},
+	{0x38070, 0x380E0},
+	{0x3A000, 0x3A064},
+
+	/* DBI windows is skipped here, it can be only accessed when pcie
+	 * is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 &&
+	 * PCIE_CTRL_APP_LTSSM_ENALBE=0.
+	 * {0x3C000 , 0x3C004},
+	 */
+
+	{0x40000, 0x400A4},
+
+	/*
+	 * SI register is skiped here.
+	 * Because it will cause bus hang
+	 *
+	 * {0x50000, 0x50018},
+	 */
+
+	{0x80000, 0x8000C},
+	{0x80010, 0x80020},
+};
+
+static const tgt_reg_section ar6320v3_reg_table[] = {
+	{0x800, 0x810},
+	{0x820, 0x82C},
+	{0x830, 0x8F4},
+	{0x90C, 0x91C},
+	{0xA14, 0xA18},
+	{0xA84, 0xA94},
+	{0xAA8, 0xAD4},
+	{0xADC, 0xB40},
+	{0x1000, 0x10A4},
+	{0x10BC, 0x111C},
+	{0x1134, 0x1138},
+	{0x1144, 0x114C},
+	{0x1150, 0x115C},
+	{0x1160, 0x1178},
+	{0x1240, 0x1260},
+	{0x2000, 0x207C},
+	{0x3000, 0x3014},
+	{0x4000, 0x4014},
+	{0x5000, 0x5124},
+	{0x6000, 0x6040},
+	{0x6080, 0x60CC},
+	{0x6100, 0x611C},
+	{0x6140, 0x61D8},
+	{0x6200, 0x6238},
+	{0x6240, 0x628C},
+	{0x62C0, 0x62EC},
+	{0x6380, 0x63E8},
+	{0x6400, 0x6440},
+	{0x6480, 0x64CC},
+	{0x6500, 0x651C},
+	{0x6540, 0x6580},
+	{0x6600, 0x6638},
+	{0x6640, 0x668C},
+	{0x66C0, 0x66EC},
+	{0x6780, 0x67E8},
+	{0x7080, 0x708C},
+	{0x70C0, 0x70C8},
+	{0x7400, 0x741C},
+	{0x7440, 0x7454},
+	{0x7800, 0x7818},
+	{0x8000, 0x8004},
+	{0x8010, 0x8064},
+	{0x8080, 0x8084},
+	{0x80A0, 0x80A4},
+	{0x80C0, 0x80C4},
+	{0x80E0, 0x80F4},
+	{0x8100, 0x8104},
+	{0x8110, 0x812C},
+	{0x9000, 0x9004},
+	{0x9800, 0x982C},
+	{0x9830, 0x9838},
+	{0x9840, 0x986C},
+	{0x9870, 0x9898},
+	{0x9A00, 0x9C00},
+	{0xD580, 0xD59C},
+	{0xF000, 0xF0E0},
+	{0xF140, 0xF190},
+	{0xF250, 0xF25C},
+	{0xF260, 0xF268},
+	{0xF26C, 0xF2A8},
+	{0x10008, 0x1000C},
+	{0x10014, 0x10018},
+	{0x1001C, 0x10020},
+	{0x10024, 0x10028},
+	{0x10030, 0x10034},
+	{0x10040, 0x10054},
+	{0x10058, 0x1007C},
+	{0x10080, 0x100C4},
+	{0x100C8, 0x10114},
+	{0x1012C, 0x10130},
+	{0x10138, 0x10144},
+	{0x10200, 0x10220},
+	{0x10230, 0x10250},
+	{0x10260, 0x10280},
+	{0x10290, 0x102B0},
+	{0x102C0, 0x102DC},
+	{0x102E0, 0x102F4},
+	{0x102FC, 0x1037C},
+	{0x10380, 0x10390},
+	{0x10800, 0x10828},
+	{0x10840, 0x10844},
+	{0x10880, 0x10884},
+	{0x108C0, 0x108E8},
+	{0x10900, 0x10928},
+	{0x10940, 0x10944},
+	{0x10980, 0x10984},
+	{0x109C0, 0x109E8},
+	{0x10A00, 0x10A28},
+	{0x10A40, 0x10A50},
+	{0x11000, 0x11028},
+	{0x11030, 0x11034},
+	{0x11038, 0x11068},
+	{0x11070, 0x11074},
+	{0x11078, 0x110A8},
+	{0x110B0, 0x110B4},
+	{0x110B8, 0x110E8},
+	{0x110F0, 0x110F4},
+	{0x110F8, 0x11128},
+	{0x11138, 0x11144},
+	{0x11178, 0x11180},
+	{0x111B8, 0x111C0},
+	{0x111F8, 0x11200},
+	{0x11238, 0x1123C},
+	{0x11270, 0x11274},
+	{0x11278, 0x1127C},
+	{0x112B0, 0x112B4},
+	{0x112B8, 0x112BC},
+	{0x112F0, 0x112F4},
+	{0x112F8, 0x112FC},
+	{0x11338, 0x1133C},
+	{0x11378, 0x1137C},
+	{0x113B8, 0x113BC},
+	{0x113F8, 0x113FC},
+	{0x11438, 0x11440},
+	{0x11478, 0x11480},
+	{0x114B8, 0x114BC},
+	{0x114F8, 0x114FC},
+	{0x11538, 0x1153C},
+	{0x11578, 0x1157C},
+	{0x115B8, 0x115BC},
+	{0x115F8, 0x115FC},
+	{0x11638, 0x1163C},
+	{0x11678, 0x1167C},
+	{0x116B8, 0x116BC},
+	{0x116F8, 0x116FC},
+	{0x11738, 0x1173C},
+	{0x11778, 0x1177C},
+	{0x117B8, 0x117BC},
+	{0x117F8, 0x117FC},
+	{0x17000, 0x1701C},
+	{0x17020, 0x170AC},
+	{0x18000, 0x18050},
+	{0x18054, 0x18074},
+	{0x18080, 0x180D4},
+	{0x180DC, 0x18104},
+	{0x18108, 0x1813C},
+	{0x18144, 0x18148},
+	{0x18168, 0x18174},
+	{0x18178, 0x18180},
+	{0x181C8, 0x181E0},
+	{0x181E4, 0x181E8},
+	{0x181EC, 0x1820C},
+	{0x1825C, 0x18280},
+	{0x18284, 0x18290},
+	{0x18294, 0x182A0},
+	{0x18300, 0x18304},
+	{0x18314, 0x18320},
+	{0x18328, 0x18350},
+	{0x1835C, 0x1836C},
+	{0x18370, 0x18390},
+	{0x18398, 0x183AC},
+	{0x183BC, 0x183D8},
+	{0x183DC, 0x183F4},
+	{0x18400, 0x186F4},
+	{0x186F8, 0x1871C},
+	{0x18720, 0x18790},
+	{0x19800, 0x19830},
+	{0x19834, 0x19840},
+	{0x19880, 0x1989C},
+	{0x198A4, 0x198B0},
+	{0x198BC, 0x19900},
+	{0x19C00, 0x19C88},
+	{0x19D00, 0x19D20},
+	{0x19E00, 0x19E7C},
+	{0x19E80, 0x19E94},
+	{0x19E98, 0x19EAC},
+	{0x19EB0, 0x19EBC},
+	{0x19F70, 0x19F74},
+	{0x19F80, 0x19F8C},
+	{0x19FA0, 0x19FB4},
+	{0x19FC0, 0x19FD8},
+	{0x1A000, 0x1A200},
+	{0x1A204, 0x1A210},
+	{0x1A228, 0x1A22C},
+	{0x1A230, 0x1A248},
+	{0x1A250, 0x1A270},
+	{0x1A280, 0x1A290},
+	{0x1A2A0, 0x1A2A4},
+	{0x1A2C0, 0x1A2EC},
+	{0x1A300, 0x1A3BC},
+	{0x1A3F0, 0x1A3F4},
+	{0x1A3F8, 0x1A434},
+	{0x1A438, 0x1A444},
+	{0x1A448, 0x1A468},
+	{0x1A580, 0x1A58C},
+	{0x1A644, 0x1A654},
+	{0x1A670, 0x1A698},
+	{0x1A6AC, 0x1A6B0},
+	{0x1A6D0, 0x1A6D4},
+	{0x1A6EC, 0x1A70C},
+	{0x1A710, 0x1A738},
+	{0x1A7C0, 0x1A7D0},
+	{0x1A7D4, 0x1A7D8},
+	{0x1A7DC, 0x1A7E4},
+	{0x1A7F0, 0x1A7F8},
+	{0x1A888, 0x1A89C},
+	{0x1A8A8, 0x1A8AC},
+	{0x1A8C0, 0x1A8DC},
+	{0x1A8F0, 0x1A8FC},
+	{0x1AE04, 0x1AE08},
+	{0x1AE18, 0x1AE24},
+	{0x1AF80, 0x1AF8C},
+	{0x1AFA0, 0x1AFB4},
+	{0x1B000, 0x1B200},
+	{0x1B284, 0x1B288},
+	{0x1B2D0, 0x1B2D8},
+	{0x1B2DC, 0x1B2EC},
+	{0x1B300, 0x1B340},
+	{0x1B374, 0x1B378},
+	{0x1B380, 0x1B384},
+	{0x1B388, 0x1B38C},
+	{0x1B404, 0x1B408},
+	{0x1B420, 0x1B428},
+	{0x1B440, 0x1B444},
+	{0x1B448, 0x1B44C},
+	{0x1B450, 0x1B458},
+	{0x1B45C, 0x1B468},
+	{0x1B584, 0x1B58C},
+	{0x1B68C, 0x1B690},
+	{0x1B6AC, 0x1B6B0},
+	{0x1B7F0, 0x1B7F8},
+	{0x1C800, 0x1CC00},
+	{0x1CE00, 0x1CE04},
+	{0x1CF80, 0x1CF84},
+	{0x1D200, 0x1D800},
+	{0x1E000, 0x20014},
+	{0x20100, 0x20124},
+	{0x21400, 0x217A8},
+	{0x21800, 0x21BA8},
+	{0x21C00, 0x21FA8},
+	{0x22000, 0x223A8},
+	{0x22400, 0x227A8},
+	{0x22800, 0x22BA8},
+	{0x22C00, 0x22FA8},
+	{0x23000, 0x233A8},
+	{0x24000, 0x24034},
+
+	/*
+	 * EFUSE0,1,2 is disabled here
+	 * because it's state may be reset
+	 *
+	 * {0x24800, 0x24804},
+	 * {0x25000, 0x25004},
+	 * {0x25800, 0x25804},
+	 */
+
+	{0x26000, 0x26064},
+	{0x27000, 0x27024},
+	{0x34000, 0x3400C},
+	{0x34400, 0x3445C},
+	{0x34800, 0x3485C},
+	{0x34C00, 0x34C5C},
+	{0x35000, 0x3505C},
+	{0x35400, 0x3545C},
+	{0x35800, 0x3585C},
+	{0x35C00, 0x35C5C},
+	{0x36000, 0x3605C},
+	{0x38000, 0x38064},
+	{0x38070, 0x380E0},
+	{0x3A000, 0x3A074},
+
+	/*
+	 * DBI windows is skipped here, it can be only accessed when pcie
+	 * is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 &&
+	 * PCIE_CTRL_APP_LTSSM_ENALBE=0.
+	 * {0x3C000 , 0x3C004},
+	 */
+
+	{0x40000, 0x400A4},
+
+	/*
+	 * SI register is skiped here.
+	 * Because it will cause bus hang
+	 *
+	 * {0x50000, 0x50018},
+	 */
+
+	{0x80000, 0x8000C},
+	{0x80010, 0x80020},
+};
+#endif
+#endif /* #ifndef _AR6320V2_DBG_REGTABLE_H_ */

+ 149 - 0
core/bmi/src/i_bmi.h

@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+/* ===================================================================
+ * Internal BMI Header File
+ */
+
+#ifndef _I_BMI_H_
+#define _I_BMI_H_
+
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+
+#include "hif.h"
+#include "bmi_msg.h"
+#include "bmi.h"
+#include "ol_fw.h"
+
+#define QCA_FIRMWARE_FILE            "athwlan.bin"
+#define QCA_UTF_FIRMWARE_FILE        "utf.bin"
+#define QCA_BOARD_DATA_FILE          "fakeboar.bin"
+#define QCA_OTP_FILE                 "otp.bin"
+#define QCA_SETUP_FILE               "athsetup.bin"
+#define QCA_FIRMWARE_EPPING_FILE     "epping.bin"
+/*
+ * Note that not all the register locations are accessible.
+ * A list of accessible target registers are specified with
+ * their start and end addresses in a table for given target
+ * version. We should NOT access other locations as either
+ * they are invalid locations or host does not have read
+ * access to it or the value of the particular register
+ * read might change
+ */
+#define REGISTER_LOCATION       0x00000800
+
+#define DRAM_LOCATION           0x00400000
+#define DRAM_SIZE               0x000a8000
+/* The local base addr is used to read the target dump using pcie I/O reads */
+#define DRAM_LOCAL_BASE_ADDR    (0x100000)
+
+#define IRAM_LOCATION           0x00980000
+#define IRAM_SIZE               0x00038000
+
+#define AXI_LOCATION            0x000a0000
+#define AXI_SIZE                0x00018000
+
+#define CE_OFFSET               0x00000400
+#define CE_USEFUL_SIZE          0x00000058
+
+#define TOTAL_DUMP_SIZE         0x00200000
+#define PCIE_READ_LIMIT         0x00005000
+
+#define SHA256_DIGEST_SIZE      32
+
+/* BMI LOGGING WRAPPERS */
+
+#define BMI_LOG(level, args...) CDF_TRACE(CDF_MODULE_ID_BMI, \
+					level, ##args)
+#define BMI_ERR(args ...)	BMI_LOG(CDF_TRACE_LEVEL_ERROR, args)
+#define BMI_DBG(args ...)	BMI_LOG(CDF_TRACE_LEVEL_DEBUG, args)
+#define BMI_WARN(args ...)	BMI_LOG(CDF_TRACE_LEVEL_WARN, args)
+#define BMI_INFO(args ...)	BMI_LOG(CDF_TRACE_LEVEL_INFO, args)
+/* End of BMI Logging Wrappers */
+
+/* BMI Assert Wrappers */
+#define bmi_assert CDF_BUG
+/*
+ * Although we had envisioned BMI to run on top of HTC, this is not how the
+ * final implementation ended up. On the Target side, BMI is a part of the BSP
+ * and does not use the HTC protocol nor even DMA -- it is intentionally kept
+ * very simple.
+ */
+
+#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
+			sizeof(uint32_t) /* cmd */ + \
+			sizeof(uint32_t) /* addr */ + \
+			sizeof(uint32_t))    /* length */
+#define BMI_COMMAND_FITS(sz) ((sz) <= MAX_BMI_CMDBUF_SZ)
+#define BMI_EXCHANGE_TIMEOUT_MS  1000
+
+struct hash_fw {
+	u8 qwlan[SHA256_DIGEST_SIZE];
+	u8 otp[SHA256_DIGEST_SIZE];
+	u8 bdwlan[SHA256_DIGEST_SIZE];
+	u8 utf[SHA256_DIGEST_SIZE];
+};
+
+typedef enum _ATH_BIN_FILE {
+	ATH_OTP_FILE,
+	ATH_FIRMWARE_FILE,
+	ATH_PATCH_FILE,
+	ATH_BOARD_DATA_FILE,
+	ATH_FLASH_FILE,
+	ATH_SETUP_FILE,
+} ATH_BIN_FILE;
+
+#if defined(QCA_WIFI_3_0_IHELIUM) || defined(QCA_WIFI_3_0_ADRASTEA)
+#define IHELIUM_NO_BMI 1
+#else
+#define IHELIUM_NO_BMI 0
+#endif
+
+CDF_STATUS bmi_execute(uint32_t address, uint32_t *param,
+						struct ol_softc *scn);
+CDF_STATUS bmi_init(struct ol_softc *scn);
+CDF_STATUS bmi_no_command(struct ol_softc *scn);
+CDF_STATUS bmi_read_memory(uint32_t address,
+		uint8_t *buffer, uint32_t length, struct ol_softc *scn);
+CDF_STATUS bmi_write_memory(uint32_t address,
+		uint8_t *buffer, uint32_t length, struct ol_softc *scn);
+CDF_STATUS bmi_fast_download(uint32_t address,
+		uint8_t *buffer, uint32_t length, struct ol_softc *scn);
+CDF_STATUS bmi_read_soc_register(uint32_t address,
+				uint32_t *param, struct ol_softc *scn);
+CDF_STATUS bmi_write_soc_register(uint32_t address,
+				uint32_t param, struct ol_softc *scn);
+CDF_STATUS bmi_get_target_info(
+		struct bmi_target_info *targ_info, struct ol_softc *scn);
+
+CDF_STATUS bmi_firmware_download(struct ol_softc *scn);
+CDF_STATUS bmi_done_local(struct ol_softc *scn);
+
+CDF_STATUS ol_download_firmware(struct ol_softc *scn);
+CDF_STATUS ol_configure_target(struct ol_softc *scn);
+#endif

+ 1637 - 0
core/bmi/src/ol_fw.c

@@ -0,0 +1,1637 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <linux/firmware.h>
+#include "ol_if_athvar.h"
+#include "targaddrs.h"
+#include "ol_cfg.h"
+#include "cds_api.h"
+#include "wma_api.h"
+#include "wma.h"
+#include "bin_sig.h"
+#include "i_ar6320v2_regtable.h"
+#include "epping_main.h"
+#include "ce_reg.h"
+#if  defined(CONFIG_CNSS)
+#include <net/cnss.h>
+#endif
+
+#include "i_bmi.h"
+#include "qwlan_version.h"
+
+#ifdef FEATURE_SECURE_FIRMWARE
+static struct hash_fw fw_hash;
+#endif
+
+static uint32_t refclk_speed_to_hz[] = {
+	48000000,               /* SOC_REFCLK_48_MHZ */
+	19200000,               /* SOC_REFCLK_19_2_MHZ */
+	24000000,               /* SOC_REFCLK_24_MHZ */
+	26000000,               /* SOC_REFCLK_26_MHZ */
+	37400000,               /* SOC_REFCLK_37_4_MHZ */
+	38400000,               /* SOC_REFCLK_38_4_MHZ */
+	40000000,               /* SOC_REFCLK_40_MHZ */
+	52000000,               /* SOC_REFCLK_52_MHZ */
+};
+
+static int ol_target_coredump(void *inst, void *memory_block,
+					uint32_t block_len);
+#ifdef FEATURE_SECURE_FIRMWARE
+static int ol_check_fw_hash(const u8 *data, u32 fw_size, ATH_BIN_FILE file)
+{
+	u8 *hash = NULL;
+#ifdef CONFIG_CNSS
+	u8 *fw_mem = NULL;
+	u8 digest[SHA256_DIGEST_SIZE];
+#endif
+	u8 temp[SHA256_DIGEST_SIZE] = { };
+	int ret = 0;
+
+	switch (file) {
+	case ATH_BOARD_DATA_FILE:
+		hash = fw_hash.bdwlan;
+		break;
+	case ATH_OTP_FILE:
+		hash = fw_hash.otp;
+		break;
+	case ATH_FIRMWARE_FILE:
+#ifdef QCA_WIFI_FTM
+		if (cds_get_conparam() == CDF_FTM_MODE) {
+			hash = fw_hash.utf;
+			break;
+		}
+#endif
+		hash = fw_hash.qwlan;
+	default:
+		break;
+	}
+
+	if (!hash) {
+		BMI_INFO("No entry for file:%d Download FW in non-secure mode",
+									file);
+		goto end;
+	}
+
+	if (!cdf_mem_compare(hash, temp, SHA256_DIGEST_SIZE)) {
+		BMI_INFO("Download FW in non-secure mode:%d", file);
+		goto end;
+	}
+
+#ifdef CONFIG_CNSS
+	fw_mem = (u8 *)cnss_get_fw_ptr();
+	if (!fw_mem || (fw_size > MAX_FIRMWARE_SIZE)) {
+		BMI_ERR("No Memory to copy FW data");
+		ret = -1;
+		goto end;
+	}
+	cdf_mem_copy(fw_mem, data, fw_size);
+
+	ret = cnss_get_sha_hash(fw_mem, fw_size, "sha256", digest);
+
+	if (ret) {
+		BMI_ERR("Sha256 Hash computation failed err:%d", ret);
+		goto end;
+	}
+
+	if (cdf_mem_compare(hash, digest, SHA256_DIGEST_SIZE) != 0) {
+		BMI_ERR("Hash Mismatch");
+		cdf_trace_hex_dump(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+				   digest, SHA256_DIGEST_SIZE);
+		cdf_trace_hex_dump(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+				   hash, SHA256_DIGEST_SIZE);
+		ret = CDF_STATUS_E_FAILURE;
+	}
+#endif
+end:
+	return ret;
+}
+#endif
+
+static int __ol_transfer_bin_file(struct ol_softc *scn, ATH_BIN_FILE file,
+				  uint32_t address, bool compressed)
+{
+	int status = EOK;
+	const char *filename = NULL;
+	const struct firmware *fw_entry;
+	uint32_t fw_entry_size;
+	uint8_t *temp_eeprom;
+	uint32_t board_data_size;
+#ifdef QCA_SIGNED_SPLIT_BINARY_SUPPORT
+	bool bin_sign = false;
+	int bin_off, bin_len;
+	SIGN_HEADER_T *sign_header;
+#endif
+
+	switch (file) {
+	default:
+		BMI_ERR("%s: Unknown file type", __func__);
+		return -1;
+	case ATH_OTP_FILE:
+#if defined(CONFIG_CNSS)
+		filename = scn->fw_files.otp_data;
+#else
+		filename = QCA_OTP_FILE;
+#endif
+#ifdef QCA_SIGNED_SPLIT_BINARY_SUPPORT
+		bin_sign = true;
+#endif
+		break;
+	case ATH_FIRMWARE_FILE:
+		if (WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
+#if defined(CONFIG_CNSS)
+			filename = scn->fw_files.epping_file;
+#else
+			filename = QCA_FIRMWARE_EPPING_FILE;
+#endif
+			BMI_INFO("%s: Loading epping firmware file %s",
+						__func__, filename);
+			break;
+		}
+#ifdef QCA_WIFI_FTM
+		if (cds_get_conparam() == CDF_FTM_MODE) {
+#if defined(CONFIG_CNSS)
+			filename = scn->fw_files.utf_file;
+#else
+			filename = QCA_UTF_FIRMWARE_FILE;
+#endif
+#ifdef QCA_SIGNED_SPLIT_BINARY_SUPPORT
+			bin_sign = true;
+#endif
+			BMI_INFO("%s: Loading firmware file %s",
+						__func__, filename);
+			break;
+		}
+#endif
+#if defined(CONFIG_CNSS)
+		filename = scn->fw_files.image_file;
+#else
+		filename = QCA_FIRMWARE_FILE;
+#endif
+#ifdef QCA_SIGNED_SPLIT_BINARY_SUPPORT
+		bin_sign = true;
+#endif
+		break;
+	case ATH_PATCH_FILE:
+		BMI_INFO("%s: no Patch file defined", __func__);
+		return 0;
+	case ATH_BOARD_DATA_FILE:
+#ifdef QCA_WIFI_FTM
+		if (cds_get_conparam() == CDF_FTM_MODE) {
+#if defined(CONFIG_CNSS)
+			filename = scn->fw_files.utf_board_data;
+#else
+			filename = QCA_BOARD_DATA_FILE;
+#endif
+#ifdef QCA_SIGNED_SPLIT_BINARY_SUPPORT
+			bin_sign = true;
+#endif
+			BMI_INFO("%s: Loading board data file %s",
+						__func__, filename);
+			break;
+		}
+#endif /* QCA_WIFI_FTM */
+#if defined(CONFIG_CNSS)
+		filename = scn->fw_files.board_data;
+#else
+		filename = QCA_BOARD_DATA_FILE;
+#endif
+#ifdef QCA_SIGNED_SPLIT_BINARY_SUPPORT
+		bin_sign = false;
+#endif
+		break;
+	case ATH_SETUP_FILE:
+		if (cds_get_conparam() != CDF_FTM_MODE &&
+		    !WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
+#ifdef CONFIG_CNSS
+			BMI_INFO("%s: no Setup file defined", __func__);
+			return -1;
+#else
+			filename = QCA_SETUP_FILE;
+#ifdef QCA_SIGNED_SPLIT_BINARY_SUPPORT
+			bin_sign = true;
+#endif
+			BMI_INFO("%s: Loading setup file %s",
+			       __func__, filename);
+#endif /* CONFIG_CNSS */
+		} else {
+			BMI_INFO("%s: no Setup file needed", __func__);
+			return -1;
+		}
+		break;
+	}
+
+	if (request_firmware(&fw_entry, filename, scn->aps_osdev.device) != 0) {
+		BMI_ERR("%s: Failed to get %s", __func__, filename);
+
+		if (file == ATH_OTP_FILE)
+			return -ENOENT;
+
+#if defined(QCA_WIFI_FTM) && defined(CONFIG_CNSS)
+		/* Try default board data file if FTM specific
+		 * board data file is not present. */
+		if (filename == scn->fw_files.utf_board_data) {
+			filename = scn->fw_files.board_data;
+			BMI_INFO("%s: Trying to load default %s",
+			       __func__, filename);
+			if (request_firmware(&fw_entry, filename,
+					     scn->aps_osdev.device) != 0) {
+				BMI_ERR("%s: Failed to get %s",
+				       __func__, filename);
+				return -1;
+			}
+		} else {
+			return -1;
+		}
+#else
+		return -1;
+#endif
+	}
+
+	if (!fw_entry || !fw_entry->data) {
+		BMI_ERR("Invalid fw_entries");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	fw_entry_size = fw_entry->size;
+	temp_eeprom = NULL;
+
+#ifdef FEATURE_SECURE_FIRMWARE
+
+	if (ol_check_fw_hash(fw_entry->data, fw_entry_size, file)) {
+		BMI_ERR("Hash Check failed for file:%s", filename);
+		status = CDF_STATUS_E_FAILURE;
+		goto end;
+	}
+#endif
+
+	if (file == ATH_BOARD_DATA_FILE) {
+		uint32_t board_ext_address;
+		int32_t board_ext_data_size;
+
+		temp_eeprom = cdf_mem_malloc(fw_entry_size);
+		if (!temp_eeprom) {
+			BMI_ERR("%s: Memory allocation failed", __func__);
+			release_firmware(fw_entry);
+			return CDF_STATUS_E_NOMEM;
+		}
+
+		cdf_mem_copy(temp_eeprom, (uint8_t *) fw_entry->data,
+			  fw_entry_size);
+
+		switch (scn->target_type) {
+		default:
+			board_data_size = 0;
+			board_ext_data_size = 0;
+			break;
+		case TARGET_TYPE_AR6004:
+			board_data_size = AR6004_BOARD_DATA_SZ;
+			board_ext_data_size = AR6004_BOARD_EXT_DATA_SZ;
+		case TARGET_TYPE_AR9888:
+			board_data_size = AR9888_BOARD_DATA_SZ;
+			board_ext_data_size = AR9888_BOARD_EXT_DATA_SZ;
+			break;
+		}
+
+		/* Determine where in Target RAM to write Board Data */
+		bmi_read_memory(HOST_INTEREST_ITEM_ADDRESS(scn->target_type,
+							   hi_board_ext_data),
+				(uint8_t *) &board_ext_address, 4, scn);
+		BMI_INFO("Board extended Data download address: 0x%x",
+		       board_ext_address);
+
+		/* Check whether the target has allocated memory for extended
+		 * board data and file contains extended board data
+		 */
+
+		if ((board_ext_address)
+		    && (fw_entry_size ==
+			(board_data_size + board_ext_data_size))) {
+			uint32_t param;
+
+			status = bmi_write_memory(board_ext_address,
+					(uint8_t *)(temp_eeprom +
+					board_data_size),
+					board_ext_data_size, scn);
+
+			if (status != EOK)
+				goto end;
+
+			/* Record extended board Data initialized */
+			param = (board_ext_data_size << 16) | 1;
+			bmi_write_memory(
+				HOST_INTEREST_ITEM_ADDRESS(scn->target_type,
+					hi_board_ext_data_config),
+					(uint8_t *)&param, 4, scn);
+
+			fw_entry_size = board_data_size;
+		}
+	}
+#ifdef QCA_SIGNED_SPLIT_BINARY_SUPPORT
+	if (bin_sign) {
+		uint32_t chip_id;
+
+		if (fw_entry_size < sizeof(SIGN_HEADER_T)) {
+			BMI_ERR("Invalid binary size %d", fw_entry_size);
+			status = CDF_STATUS_E_FAILURE;
+			goto end;
+		}
+
+		sign_header = (SIGN_HEADER_T *) fw_entry->data;
+		chip_id = cpu_to_le32(sign_header->product_id);
+		if (sign_header->magic_num == SIGN_HEADER_MAGIC
+		    && (chip_id == AR6320_REV1_1_VERSION
+			|| chip_id == AR6320_REV1_3_VERSION
+			|| chip_id == AR6320_REV2_1_VERSION)) {
+
+			status = bmi_sign_stream_start(address,
+						(uint8_t *)fw_entry->data,
+						sizeof(SIGN_HEADER_T), scn);
+			if (status != EOK) {
+				BMI_ERR("unable to start sign stream");
+				status = CDF_STATUS_E_FAILURE;
+				goto end;
+			}
+
+			bin_off = sizeof(SIGN_HEADER_T);
+			bin_len = sign_header->rampatch_len
+				  - sizeof(SIGN_HEADER_T);
+		} else {
+			bin_sign = false;
+			bin_off = 0;
+			bin_len = fw_entry_size;
+		}
+	} else {
+		bin_len = fw_entry_size;
+		bin_off = 0;
+	}
+
+	if (compressed) {
+		status = bmi_fast_download(address,
+					   (uint8_t *) fw_entry->data + bin_off,
+					   bin_len, scn);
+	} else {
+		if (file == ATH_BOARD_DATA_FILE && fw_entry->data) {
+			status = bmi_write_memory(address,
+						  (uint8_t *) temp_eeprom,
+						  fw_entry_size, scn);
+		} else {
+			status = bmi_write_memory(address,
+						  (uint8_t *) fw_entry->data
+						  + bin_off, bin_len, scn);
+		}
+	}
+
+	if (bin_sign) {
+		bin_off += bin_len;
+		bin_len = sign_header->total_len - sign_header->rampatch_len;
+
+		if (bin_len > 0) {
+			status = bmi_sign_stream_start(0,
+					(uint8_t *)fw_entry->data +
+					bin_off, bin_len, scn);
+			if (status != EOK)
+				BMI_ERR("sign stream error");
+		}
+	}
+#else
+	if (compressed) {
+		status = bmi_fast_download(address,
+					   (uint8_t *) fw_entry->data,
+					   fw_entry_size, scn);
+	} else {
+		if (file == ATH_BOARD_DATA_FILE && fw_entry->data) {
+			status = bmi_write_memory(address,
+						  (uint8_t *) temp_eeprom,
+						  fw_entry_size, scn);
+		} else {
+			status = bmi_write_memory(address,
+						  (uint8_t *) fw_entry->data,
+						  fw_entry_size, scn);
+		}
+	}
+#endif /* QCA_SIGNED_SPLIT_BINARY_SUPPORT */
+
+end:
+	if (temp_eeprom)
+		cdf_mem_free(temp_eeprom);
+
+	if (status != EOK) {
+		BMI_ERR("%s, BMI operation failed: %d", __func__, __LINE__);
+		release_firmware(fw_entry);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	release_firmware(fw_entry);
+
+	BMI_INFO("transferring file: %s size %d bytes done!",
+		(filename != NULL) ? filename : " ", fw_entry_size);
+
+	return status;
+}
+
+static int ol_transfer_bin_file(struct ol_softc *scn, ATH_BIN_FILE file,
+				uint32_t address, bool compressed)
+{
+	int ret;
+
+#ifdef CONFIG_CNSS
+	/* Wait until suspend and resume are completed before loading FW */
+	cnss_lock_pm_sem();
+#endif
+
+	ret = __ol_transfer_bin_file(scn, file, address, compressed);
+
+#ifdef CONFIG_CNSS
+	cnss_release_pm_sem();
+#endif
+
+	return ret;
+}
+
+int dump_ce_register(struct ol_softc *scn)
+{
+	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
+	uint32_t ce_reg_values[8][CE_USEFUL_SIZE >> 2];
+	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
+	uint16_t i, j;
+
+	for (i = 0; i < 8; i++, ce_reg_address += CE_OFFSET) {
+		if (hif_diag_read_mem(scn, ce_reg_address,
+			(uint8_t *) &ce_reg_values[i][0], ce_reg_word_size *
+			sizeof(uint32_t)) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("Dumping CE register failed!");
+			return -EACCES;
+		}
+	}
+
+	for (i = 0; i < 8; i++) {
+		BMI_ERR("CE%d Registers:", i);
+		for (j = 0; j < ce_reg_word_size; j++) {
+			BMI_ERR("0x%08x ", ce_reg_values[i][j]);
+			if (!((j + 1) % 5) || (ce_reg_word_size - 1) == j)
+				BMI_ERR(" ");
+		}
+	}
+
+	return 0;
+}
+
+#if  defined(CONFIG_CNSS)
+
+static struct ol_softc *ramdump_scn;
+
+int ol_copy_ramdump(struct ol_softc *scn)
+{
+	int ret;
+
+	if (!scn->ramdump_base || !scn->ramdump_size) {
+		BMI_ERR("%s:ramdump collection fail", __func__);
+		ret = -EACCES;
+		goto out;
+	}
+
+	ret = ol_target_coredump(scn, scn->ramdump_base, scn->ramdump_size);
+
+out:
+	return ret;
+}
+
+static void ramdump_work_handler(struct work_struct *ramdump)
+{
+	int ret;
+	uint32_t host_interest_address;
+	uint32_t dram_dump_values[4];
+
+	if (!ramdump_scn) {
+		BMI_ERR("%s:Ramdump_scn is null:", __func__);
+		goto out_fail;
+	}
+#ifdef DEBUG
+	ret = hif_check_soc_status(ramdump_scn);
+	if (ret)
+		goto out_fail;
+
+	ret = dump_ce_register(ramdump_scn);
+	if (ret)
+		goto out_fail;
+
+	dump_ce_debug_register(ramdump_scn);
+#endif
+
+	if (hif_diag_read_mem(ramdump_scn,
+			hif_hia_item_address(ramdump_scn->target_type,
+			offsetof(struct host_interest_s, hi_failure_state)),
+			(uint8_t *)&host_interest_address,
+			sizeof(uint32_t)) != CDF_STATUS_SUCCESS) {
+		BMI_ERR("HifDiagReadiMem FW Dump Area Pointer failed!");
+		ol_copy_ramdump(ramdump_scn);
+		cnss_device_crashed();
+		return;
+	}
+
+	BMI_ERR("Host interest item address: 0x%08x", host_interest_address);
+
+	if (hif_diag_read_mem(ramdump_scn, host_interest_address,
+			      (uint8_t *) &dram_dump_values[0],
+			      4 * sizeof(uint32_t)) != CDF_STATUS_SUCCESS) {
+		BMI_ERR("HifDiagReadiMem FW Dump Area failed!");
+		goto out_fail;
+	}
+	BMI_ERR("FW Assertion at PC: 0x%08x BadVA: 0x%08x TargetID: 0x%08x",
+	       dram_dump_values[2], dram_dump_values[3], dram_dump_values[0]);
+
+	if (ol_copy_ramdump(ramdump_scn))
+		goto out_fail;
+
+	BMI_ERR("%s: RAM dump collecting completed!", __func__);
+	/* notify SSR framework the target has crashed. */
+	cnss_device_crashed();
+	return;
+
+out_fail:
+	/* Silent SSR on dump failure */
+#ifdef CNSS_SELF_RECOVERY
+	cnss_device_self_recovery();
+#else
+	cnss_device_crashed();
+#endif
+	return;
+}
+
+static DECLARE_WORK(ramdump_work, ramdump_work_handler);
+
+void ol_schedule_ramdump_work(struct ol_softc *scn)
+{
+	ramdump_scn = scn;
+	schedule_work(&ramdump_work);
+}
+
+static void fw_indication_work_handler(struct work_struct *fw_indication)
+{
+	cnss_device_self_recovery();
+}
+
+static DECLARE_WORK(fw_indication_work, fw_indication_work_handler);
+
+void ol_schedule_fw_indication_work(struct ol_softc *scn)
+{
+	schedule_work(&fw_indication_work);
+}
+#endif
+
+void ol_target_failure(void *instance, CDF_STATUS status)
+{
+	struct ol_softc *scn = (struct ol_softc *)instance;
+	tp_wma_handle wma = cds_get_context(CDF_MODULE_ID_WMA);
+	int ret;
+
+	cdf_event_set(&wma->recovery_event);
+
+	if (OL_TRGET_STATUS_RESET == scn->target_status) {
+		BMI_ERR("Target is already asserted, ignore!");
+		return;
+	}
+	scn->target_status = OL_TRGET_STATUS_RESET;
+
+	if (cds_is_logp_in_progress()) {
+		BMI_ERR("%s: LOGP is in progress, ignore!\n", __func__);
+		return;
+	}
+
+	if (cds_is_load_unload_in_progress()) {
+		BMI_ERR("%s: Loading/Unloading is in progress, ignore!",
+		       __func__);
+		return;
+	}
+	cds_set_logp_in_progress(true);
+
+#ifdef CONFIG_CNSS
+	ret = hif_check_fw_reg(scn);
+	if (0 == ret) {
+		if (scn->enable_self_recovery) {
+			ol_schedule_fw_indication_work(scn);
+			return;
+		}
+	} else if (-1 == ret) {
+		return;
+	}
+#endif
+
+	BMI_ERR("XXX TARGET ASSERTED XXX");
+
+#if  defined(CONFIG_CNSS)
+	/* Collect the RAM dump through a workqueue */
+	if (scn->enable_ramdump_collection)
+		ol_schedule_ramdump_work(scn);
+	else
+		pr_debug("%s: athdiag read for target reg\n", __func__);
+#endif
+
+	return;
+}
+
+CDF_STATUS ol_configure_target(struct ol_softc *scn)
+{
+	uint32_t param;
+#ifdef CONFIG_CNSS
+	struct cnss_platform_cap cap;
+	int ret;
+#endif
+
+	/* Tell target which HTC version it is used */
+	param = HTC_PROTOCOL_VERSION;
+	if (bmi_write_memory(
+		hif_hia_item_address(scn->target_type,
+		offsetof(struct host_interest_s, hi_app_host_interest)),
+		(uint8_t *) &param, 4, scn) != CDF_STATUS_SUCCESS) {
+		BMI_ERR("bmi_write_memory for htc version failed");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	/* set the firmware mode to STA/IBSS/AP */
+	{
+		if (bmi_read_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag)),
+			(uint8_t *)&param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("bmi_read_memory for setting fwmode failed");
+			return CDF_STATUS_E_FAILURE;
+		}
+
+		/* TODO following parameters need to be re-visited. */
+		param |= (1 << HI_OPTION_NUM_DEV_SHIFT); /* num_device */
+		/* Firmware mode ?? */
+		param |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
+		/* mac_addr_method */
+		param |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+		/* firmware_bridge */
+		param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+		/* fwsubmode */
+		param |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
+
+		BMI_INFO("NUM_DEV=%d FWMODE=0x%x FWSUBMODE=0x%x FWBR_BUF %d",
+		       1, HI_OPTION_FW_MODE_AP, 0, 0);
+
+		if (bmi_write_memory(
+			hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag)),
+			(uint8_t *)&param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("BMI WRITE for setting fwmode failed");
+			return CDF_STATUS_E_FAILURE;
+		}
+	}
+
+#if (CONFIG_DISABLE_CDC_MAX_PERF_WAR)
+	{
+		/* set the firmware to disable CDC max perf WAR */
+		if (bmi_read_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag2)),
+			(uint8_t *) &param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("BMI READ for setting cdc max perf failed");
+			return CDF_STATUS_E_FAILURE;
+		}
+
+		param |= HI_OPTION_DISABLE_CDC_MAX_PERF_WAR;
+		if (bmi_write_memory(
+			hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag2)),
+			(uint8_t *)&param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("setting cdc max perf failed");
+			return CDF_STATUS_E_FAILURE;
+		}
+	}
+#endif /* CONFIG_CDC_MAX_PERF_WAR */
+
+#ifdef CONFIG_CNSS
+
+	ret = cnss_get_platform_cap(&cap);
+	if (ret)
+		BMI_ERR("platform capability info from CNSS not available");
+
+	if (!ret && cap.cap_flag & CNSS_HAS_EXTERNAL_SWREG) {
+		if (bmi_read_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag2)),
+			(uint8_t *)&param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("bmi_read_memory for setting"
+				"external SWREG failed");
+			return CDF_STATUS_E_FAILURE;
+		}
+
+		param |= HI_OPTION_USE_EXT_LDO;
+		if (bmi_write_memory(
+			hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag2)),
+			(uint8_t *)&param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("BMI WRITE for setting external SWREG fail");
+			return CDF_STATUS_E_FAILURE;
+		}
+	}
+#endif
+
+#ifdef WLAN_FEATURE_LPSS
+	if (scn->enablelpasssupport) {
+		if (bmi_read_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag2)),
+			(uint8_t *) &param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("BMI READ:Setting LPASS Support failed");
+			return CDF_STATUS_E_FAILURE;
+		}
+
+		param |= HI_OPTION_DBUART_SUPPORT;
+		if (bmi_write_memory(
+			hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag2)),
+			(uint8_t *)&param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("BMI_READ for setting LPASS Support fail");
+			return CDF_STATUS_E_FAILURE;
+		}
+	}
+#endif
+
+	/* If host is running on a BE CPU, set the host interest area */
+	{
+#ifdef BIG_ENDIAN_HOST
+		param = 1;
+#else
+		param = 0;
+#endif
+		if (bmi_write_memory(
+			hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_be)),
+			(uint8_t *) &param, 4, scn) != CDF_STATUS_SUCCESS) {
+			BMI_ERR("setting host CPU BE mode failed");
+			return CDF_STATUS_E_FAILURE;
+		}
+	}
+
+	/* FW descriptor/Data swap flags */
+	param = 0;
+	if (bmi_write_memory(
+		hif_hia_item_address(scn->target_type,
+		offsetof(struct host_interest_s, hi_fw_swap)),
+		(uint8_t *) &param, 4, scn) != CDF_STATUS_SUCCESS) {
+		BMI_ERR("BMI WRITE failed setting FW data/desc swap flags");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+static int ol_check_dataset_patch(struct ol_softc *scn, uint32_t *address)
+{
+	/* Check if patch file needed for this target type/version. */
+	return 0;
+}
+
+
+CDF_STATUS ol_fw_populate_clk_settings(A_refclk_speed_t refclk,
+				     struct cmnos_clock_s *clock_s)
+{
+	if (!clock_s)
+		return CDF_STATUS_E_FAILURE;
+
+	switch (refclk) {
+	case SOC_REFCLK_48_MHZ:
+		clock_s->wlan_pll.div = 0xE;
+		clock_s->wlan_pll.rnfrac = 0x2AAA8;
+		clock_s->pll_settling_time = 2400;
+		break;
+	case SOC_REFCLK_19_2_MHZ:
+		clock_s->wlan_pll.div = 0x24;
+		clock_s->wlan_pll.rnfrac = 0x2AAA8;
+		clock_s->pll_settling_time = 960;
+		break;
+	case SOC_REFCLK_24_MHZ:
+		clock_s->wlan_pll.div = 0x1D;
+		clock_s->wlan_pll.rnfrac = 0x15551;
+		clock_s->pll_settling_time = 1200;
+		break;
+	case SOC_REFCLK_26_MHZ:
+		clock_s->wlan_pll.div = 0x1B;
+		clock_s->wlan_pll.rnfrac = 0x4EC4;
+		clock_s->pll_settling_time = 1300;
+		break;
+	case SOC_REFCLK_37_4_MHZ:
+		clock_s->wlan_pll.div = 0x12;
+		clock_s->wlan_pll.rnfrac = 0x34B49;
+		clock_s->pll_settling_time = 1870;
+		break;
+	case SOC_REFCLK_38_4_MHZ:
+		clock_s->wlan_pll.div = 0x12;
+		clock_s->wlan_pll.rnfrac = 0x15551;
+		clock_s->pll_settling_time = 1920;
+		break;
+	case SOC_REFCLK_40_MHZ:
+		clock_s->wlan_pll.div = 0x11;
+		clock_s->wlan_pll.rnfrac = 0x26665;
+		clock_s->pll_settling_time = 2000;
+		break;
+	case SOC_REFCLK_52_MHZ:
+		clock_s->wlan_pll.div = 0x1B;
+		clock_s->wlan_pll.rnfrac = 0x4EC4;
+		clock_s->pll_settling_time = 2600;
+		break;
+	case SOC_REFCLK_UNKNOWN:
+		clock_s->wlan_pll.refdiv = 0;
+		clock_s->wlan_pll.div = 0;
+		clock_s->wlan_pll.rnfrac = 0;
+		clock_s->wlan_pll.outdiv = 0;
+		clock_s->pll_settling_time = 1024;
+		clock_s->refclk_hz = 0;
+	default:
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	clock_s->refclk_hz = refclk_speed_to_hz[refclk];
+	clock_s->wlan_pll.refdiv = 0;
+	clock_s->wlan_pll.outdiv = 1;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS ol_patch_pll_switch(struct ol_softc *scn)
+{
+	CDF_STATUS status = CDF_STATUS_SUCCESS;
+	uint32_t addr = 0;
+	uint32_t reg_val = 0;
+	uint32_t mem_val = 0;
+	struct cmnos_clock_s clock_s;
+	uint32_t cmnos_core_clk_div_addr = 0;
+	uint32_t cmnos_cpu_pll_init_done_addr = 0;
+	uint32_t cmnos_cpu_speed_addr = 0;
+
+	switch (scn->target_version) {
+	case AR6320_REV1_1_VERSION:
+		cmnos_core_clk_div_addr = AR6320_CORE_CLK_DIV_ADDR;
+		cmnos_cpu_pll_init_done_addr = AR6320_CPU_PLL_INIT_DONE_ADDR;
+		cmnos_cpu_speed_addr = AR6320_CPU_SPEED_ADDR;
+		break;
+	case AR6320_REV1_3_VERSION:
+	case AR6320_REV2_1_VERSION:
+		cmnos_core_clk_div_addr = AR6320V2_CORE_CLK_DIV_ADDR;
+		cmnos_cpu_pll_init_done_addr = AR6320V2_CPU_PLL_INIT_DONE_ADDR;
+		cmnos_cpu_speed_addr = AR6320V2_CPU_SPEED_ADDR;
+		break;
+	case AR6320_REV3_VERSION:
+	case AR6320_REV3_2_VERSION:
+		cmnos_core_clk_div_addr = AR6320V3_CORE_CLK_DIV_ADDR;
+		cmnos_cpu_pll_init_done_addr = AR6320V3_CPU_PLL_INIT_DONE_ADDR;
+		cmnos_cpu_speed_addr = AR6320V3_CPU_SPEED_ADDR;
+		break;
+	default:
+		BMI_ERR("%s: Unsupported target version %x", __func__,
+		       scn->target_version);
+		goto end;
+	}
+
+	addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET);
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read EFUSE Addr");
+		goto end;
+	}
+
+	status = ol_fw_populate_clk_settings(EFUSE_XTAL_SEL_GET(reg_val),
+					     &clock_s);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to set clock settings");
+		goto end;
+	}
+	BMI_DBG("crystal_freq: %dHz", clock_s.refclk_hz);
+
+	/* ------Step 1---- */
+	reg_val = 0;
+	addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET);
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read PLL_CONFIG Addr");
+		goto end;
+	}
+	BMI_DBG("Step 1a: %8X", reg_val);
+
+	reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK);
+	reg_val |= (BB_PLL_CONFIG_FRAC_SET(clock_s.wlan_pll.rnfrac) |
+		    BB_PLL_CONFIG_OUTDIV_SET(clock_s.wlan_pll.outdiv));
+	status = bmi_write_soc_register(addr, reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write PLL_CONFIG Addr");
+		goto end;
+	}
+
+	reg_val = 0;
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read back PLL_CONFIG Addr");
+		goto end;
+	}
+	BMI_DBG("Step 1b: %8X", reg_val);
+
+	/* ------Step 2---- */
+	reg_val = 0;
+	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET);
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read PLL_SETTLE Addr");
+		goto end;
+	}
+	BMI_DBG("Step 2a: %8X", reg_val);
+
+	reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK;
+	reg_val |= WLAN_PLL_SETTLE_TIME_SET(clock_s.pll_settling_time);
+	status = bmi_write_soc_register(addr, reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write PLL_SETTLE Addr");
+		goto end;
+	}
+
+	reg_val = 0;
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read back PLL_SETTLE Addr");
+		goto end;
+	}
+	BMI_DBG("Step 2b: %8X", reg_val);
+
+	/* ------Step 3---- */
+	reg_val = 0;
+	addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET);
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read CLK_CTRL Addr");
+		goto end;
+	}
+	BMI_DBG("Step 3a: %8X", reg_val);
+
+	reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK;
+	reg_val |= SOC_CORE_CLK_CTRL_DIV_SET(1);
+	status = bmi_write_soc_register(addr, reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write CLK_CTRL Addr");
+		goto end;
+	}
+
+	reg_val = 0;
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read back CLK_CTRL Addr");
+		goto end;
+	}
+	BMI_DBG("Step 3b: %8X", reg_val);
+
+	/* ------Step 4----- */
+	mem_val = 1;
+	status = bmi_write_memory(cmnos_core_clk_div_addr,
+				  (uint8_t *) &mem_val, 4, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write CLK_DIV Addr");
+		goto end;
+	}
+
+	/* ------Step 5----- */
+	reg_val = 0;
+	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read PLL_CTRL Addr");
+		goto end;
+	}
+	BMI_DBG("Step 5a: %8X", reg_val);
+
+	reg_val &= ~(WLAN_PLL_CONTROL_REFDIV_MASK | WLAN_PLL_CONTROL_DIV_MASK |
+		     WLAN_PLL_CONTROL_NOPWD_MASK);
+	reg_val |= (WLAN_PLL_CONTROL_REFDIV_SET(clock_s.wlan_pll.refdiv) |
+		    WLAN_PLL_CONTROL_DIV_SET(clock_s.wlan_pll.div) |
+		    WLAN_PLL_CONTROL_NOPWD_SET(1));
+	status = bmi_write_soc_register(addr, reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write PLL_CTRL Addr");
+		goto end;
+	}
+
+	reg_val = 0;
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read back PLL_CTRL Addr");
+		goto end;
+	}
+	OS_DELAY(100);
+	BMI_DBG("Step 5b: %8X", reg_val);
+
+	/* ------Step 6------- */
+	do {
+		reg_val = 0;
+		status = bmi_read_soc_register((RTC_WMAC_BASE_ADDRESS |
+				RTC_SYNC_STATUS_OFFSET), &reg_val, scn);
+		if (status != CDF_STATUS_SUCCESS) {
+			BMI_ERR("Failed to read RTC_SYNC_STATUS Addr");
+			goto end;
+		}
+	} while (RTC_SYNC_STATUS_PLL_CHANGING_GET(reg_val));
+
+	/* ------Step 7------- */
+	reg_val = 0;
+	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read PLL_CTRL Addr for CTRL_BYPASS");
+		goto end;
+	}
+	BMI_DBG("Step 7a: %8X", reg_val);
+
+	reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK;
+	reg_val |= WLAN_PLL_CONTROL_BYPASS_SET(0);
+	status = bmi_write_soc_register(addr, reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write PLL_CTRL Addr for CTRL_BYPASS");
+		goto end;
+	}
+
+	reg_val = 0;
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read back PLL_CTRL Addr for CTRL_BYPASS");
+		goto end;
+	}
+	BMI_DBG("Step 7b: %8X", reg_val);
+
+	/* ------Step 8-------- */
+	do {
+		reg_val = 0;
+		status = bmi_read_soc_register((RTC_WMAC_BASE_ADDRESS |
+				RTC_SYNC_STATUS_OFFSET), &reg_val, scn);
+		if (status != CDF_STATUS_SUCCESS) {
+			BMI_ERR("Failed to read SYNC_STATUS Addr");
+			goto end;
+		}
+	} while (RTC_SYNC_STATUS_PLL_CHANGING_GET(reg_val));
+
+	/* ------Step 9-------- */
+	reg_val = 0;
+	addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET);
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read CPU_CLK Addr");
+		goto end;
+	}
+	BMI_DBG("Step 9a: %8X", reg_val);
+
+	reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK;
+	reg_val |= SOC_CPU_CLOCK_STANDARD_SET(1);
+	status = bmi_write_soc_register(addr, reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write CPU_CLK Addr");
+		goto end;
+	}
+
+	reg_val = 0;
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read back CPU_CLK Addr");
+		goto end;
+	}
+	BMI_DBG("Step 9b: %8X", reg_val);
+
+	/* ------Step 10------- */
+	reg_val = 0;
+	addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read PLL_CTRL Addr for NOPWD");
+		goto end;
+	}
+	BMI_DBG("Step 10a: %8X", reg_val);
+
+	reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK;
+	status = bmi_write_soc_register(addr, reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write PLL_CTRL Addr for NOPWD");
+		goto end;
+	}
+	reg_val = 0;
+	status = bmi_read_soc_register(addr, &reg_val, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to read back PLL_CTRL Addr for NOPWD");
+		goto end;
+	}
+	BMI_DBG("Step 10b: %8X", reg_val);
+
+	/* ------Step 11------- */
+	mem_val = 1;
+	status = bmi_write_memory(cmnos_cpu_pll_init_done_addr,
+				  (uint8_t *) &mem_val, 4, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write PLL_INIT Addr");
+		goto end;
+	}
+
+	mem_val = TARGET_CPU_FREQ;
+	status = bmi_write_memory(cmnos_cpu_speed_addr,
+				  (uint8_t *) &mem_val, 4, scn);
+	if (status != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to write CPU_SPEED Addr");
+		goto end;
+	}
+
+end:
+	return status;
+}
+
+#ifdef CONFIG_CNSS
+/* AXI Start Address */
+#define TARGET_ADDR (0xa0000)
+
+void ol_transfer_codeswap_struct(struct ol_softc *scn)
+{
+	struct codeswap_codeseg_info wlan_codeswap;
+	CDF_STATUS rv;
+
+	if (!scn) {
+		BMI_ERR("%s: ol_softc is null", __func__);
+		return;
+	}
+	if (cnss_get_codeswap_struct(&wlan_codeswap)) {
+		BMI_ERR("%s: failed to get codeswap structure", __func__);
+		return;
+	}
+
+	rv = bmi_write_memory(TARGET_ADDR,
+			      (uint8_t *) &wlan_codeswap, sizeof(wlan_codeswap),
+			      scn);
+
+	if (rv != CDF_STATUS_SUCCESS) {
+		BMI_ERR("Failed to Write 0xa0000 to Target");
+		return;
+	}
+	BMI_INFO("codeswap structure is successfully downloaded");
+}
+#endif
+
+CDF_STATUS ol_download_firmware(struct ol_softc *scn)
+{
+	uint32_t param, address = 0;
+	int status = !EOK;
+	CDF_STATUS ret;
+
+#ifdef CONFIG_CNSS
+	if (0 != cnss_get_fw_files_for_target(&scn->fw_files,
+					      scn->target_type,
+					      scn->target_version)) {
+		BMI_ERR("%s: No FW files from CNSS driver", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+#endif
+	/* Transfer Board Data from Target EEPROM to Target RAM */
+	/* Determine where in Target RAM to write Board Data */
+	bmi_read_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_board_data)),
+			(uint8_t *)&address, 4, scn);
+
+	if (!address) {
+		address = AR6004_REV5_BOARD_DATA_ADDRESS;
+		BMI_DBG("%s: Target address not known! Using 0x%x",
+						__func__, address);
+	}
+	ret = ol_patch_pll_switch(scn);
+	if (ret != CDF_STATUS_SUCCESS) {
+		BMI_ERR("pll switch failed. status %d", ret);
+		return ret;
+	}
+	if (scn->cal_in_flash) {
+		/* Write EEPROM or Flash data to Target RAM */
+		status = ol_transfer_bin_file(scn, ATH_FLASH_FILE,
+						address, false);
+	}
+
+	if (status == EOK) {
+		/* Record the fact that Board Data is initialized */
+		param = 1;
+		bmi_write_memory(
+			hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s,
+				hi_board_data_initialized)),
+				(uint8_t *) &param, 4, scn);
+	} else {
+		/* Flash is either not available or invalid */
+		if (ol_transfer_bin_file
+			    (scn, ATH_BOARD_DATA_FILE, address, false) != EOK) {
+			return -1;
+		}
+
+		/* Record the fact that Board Data is initialized */
+		param = 1;
+		bmi_write_memory(
+			hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s,
+				hi_board_data_initialized)),
+				(uint8_t *) &param, 4, scn);
+
+		/* Transfer One Time Programmable data */
+		address = BMI_SEGMENTED_WRITE_ADDR;
+		BMI_INFO("%s: Using 0x%x for the remainder of init",
+				__func__, address);
+
+#ifdef CONFIG_CNSS
+		ol_transfer_codeswap_struct(scn);
+#endif
+		status = ol_transfer_bin_file(scn, ATH_OTP_FILE,
+						address, true);
+		/* Execute the OTP code only if entry found and downloaded */
+		if (status == EOK) {
+			param = 0;
+#ifndef FEATURE_BMI_2
+			bmi_execute(address, &param, scn);
+#endif
+		} else if (status < 0) {
+			return status;
+		}
+	}
+
+	if (ol_transfer_bin_file(scn, ATH_SETUP_FILE,
+		BMI_SEGMENTED_WRITE_ADDR, true) == EOK) {
+		param = 0;
+#ifndef FEATURE_BMI_2
+		bmi_execute(address, &param, scn);
+#endif
+	}
+
+	/* Download Target firmware
+	 * TODO point to target specific files in runtime
+	 */
+	address = BMI_SEGMENTED_WRITE_ADDR;
+	if (ol_transfer_bin_file(scn, ATH_FIRMWARE_FILE,
+				address, true) != EOK) {
+		return -1;
+	}
+
+	/* Apply the patches */
+	if (ol_check_dataset_patch(scn, &address)) {
+		if ((ol_transfer_bin_file(scn, ATH_PATCH_FILE, address, false))
+		    != EOK) {
+			return -1;
+		}
+		bmi_write_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_dset_list_head)),
+			(uint8_t *) &address, 4, scn);
+	}
+
+	if (scn->enableuartprint ||
+	    (WLAN_IS_EPPING_ENABLED(cds_get_conparam()) &&
+	     WLAN_IS_EPPING_FW_UART(cds_get_conparam()))) {
+		switch (scn->target_version) {
+		case AR6004_VERSION_REV1_3:
+			param = 11;
+			break;
+		case AR6320_REV1_VERSION:
+		case AR6320_REV2_VERSION:
+		case AR6320_REV3_VERSION:
+		case AR6320_REV3_2_VERSION:
+		case AR6320_REV4_VERSION:
+		case AR6320_DEV_VERSION:
+			param = 6;
+			break;
+		default:
+			/* Configure GPIO AR9888 UART */
+			param = 7;
+		}
+
+		bmi_write_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_dbg_uart_txpin)),
+			(uint8_t *)&param, 4, scn);
+		param = 1;
+		bmi_write_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_serial_enable)),
+			(uint8_t *)&param, 4, scn);
+	} else {
+		/*
+		 * Explicitly setting UART prints to zero as target turns it on
+		 * based on scratch registers.
+		 */
+		param = 0;
+		bmi_write_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_serial_enable)),
+			(uint8_t *)&param, 4, scn);
+	}
+
+	if (scn->enablefwlog) {
+		bmi_read_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag)),
+			(uint8_t *)&param, 4, scn);
+
+		param &= ~(HI_OPTION_DISABLE_DBGLOG);
+		bmi_write_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag)),
+			(uint8_t *)&param, 4, scn);
+	} else {
+		/*
+		 * Explicitly setting fwlog prints to zero as target turns it on
+		 * based on scratch registers.
+		 */
+		bmi_read_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag)),
+			(uint8_t *)&param, 4, scn);
+
+		param |= HI_OPTION_DISABLE_DBGLOG;
+		bmi_write_memory(hif_hia_item_address(scn->target_type,
+			offsetof(struct host_interest_s, hi_option_flag)),
+			(uint8_t *) &param, 4, scn);
+	}
+
+	return status;
+}
+
+int ol_diag_read(struct ol_softc *scn, uint8_t *buffer,
+		 uint32_t pos, size_t count)
+{
+	int result = 0;
+
+	if ((4 == count) && ((pos & 3) == 0)) {
+		result = hif_diag_read_access(scn, pos,
+					      (uint32_t *) buffer);
+	} else {
+		size_t amount_read = 0;
+		size_t readSize = PCIE_READ_LIMIT;
+		size_t remainder = 0;
+		if (count > PCIE_READ_LIMIT) {
+			while ((amount_read < count) && (0 == result)) {
+				result = hif_diag_read_mem(scn, pos,
+							   buffer, readSize);
+				if (0 == result) {
+					buffer += readSize;
+					pos += readSize;
+					amount_read += readSize;
+					remainder = count - amount_read;
+					if (remainder < PCIE_READ_LIMIT)
+						readSize = remainder;
+				}
+			}
+		} else {
+		result = hif_diag_read_mem(scn, pos,
+					   buffer, count);
+		}
+	}
+
+	if (!result)
+		return count;
+	else
+		return -EIO;
+}
+
+static int ol_ath_get_reg_table(uint32_t target_version,
+				tgt_reg_table *reg_table)
+{
+	int section_len = 0;
+
+	if (!reg_table) {
+		cdf_assert(0);
+		return section_len;
+	}
+
+	switch (target_version) {
+	case AR6320_REV2_1_VERSION:
+		reg_table->section =
+			(tgt_reg_section *) &ar6320v2_reg_table[0];
+		reg_table->section_size = sizeof(ar6320v2_reg_table)
+					  / sizeof(ar6320v2_reg_table[0]);
+		section_len = AR6320_REV2_1_REG_SIZE;
+		break;
+	case AR6320_REV3_VERSION:
+	case AR6320_REV3_2_VERSION:
+		reg_table->section =
+			(tgt_reg_section *) &ar6320v3_reg_table[0];
+		reg_table->section_size = sizeof(ar6320v3_reg_table)
+					  / sizeof(ar6320v3_reg_table[0]);
+		section_len = AR6320_REV3_REG_SIZE;
+		break;
+	default:
+		reg_table->section = (void *)NULL;
+		reg_table->section_size = 0;
+		section_len = 0;
+	}
+
+	return section_len;
+}
+
+static int ol_diag_read_reg_loc(struct ol_softc *scn, uint8_t *buffer,
+				uint32_t buffer_len)
+{
+	int i, len, section_len, fill_len;
+	int dump_len, result = 0;
+	tgt_reg_table reg_table;
+	tgt_reg_section *curr_sec, *next_sec;
+
+	section_len = ol_ath_get_reg_table(scn->target_version, &reg_table);
+
+	if (!reg_table.section || !reg_table.section_size || !section_len) {
+		BMI_ERR("%s: failed to get reg table", __func__);
+		result = -EIO;
+		goto out;
+	}
+
+	curr_sec = reg_table.section;
+	for (i = 0; i < reg_table.section_size; i++) {
+
+		dump_len = curr_sec->end_addr - curr_sec->start_addr;
+
+		if ((buffer_len - result) < dump_len) {
+			BMI_ERR("Not enough memory to dump the registers:"
+			       " %d: 0x%08x-0x%08x", i,
+			       curr_sec->start_addr, curr_sec->end_addr);
+			goto out;
+		}
+
+		len = ol_diag_read(scn, buffer, curr_sec->start_addr, dump_len);
+
+		if (len != -EIO) {
+			buffer += len;
+			result += len;
+		} else {
+			BMI_ERR("%s: can't read reg 0x%08x len = %d",
+				__func__, curr_sec->start_addr, dump_len);
+			result = -EIO;
+			goto out;
+		}
+
+		if (result < section_len) {
+			next_sec = (tgt_reg_section *) ((uint8_t *) curr_sec
+							+ sizeof(*curr_sec));
+			fill_len = next_sec->start_addr - curr_sec->end_addr;
+			if ((buffer_len - result) < fill_len) {
+				BMI_ERR("Not enough memory to fill registers:"
+				       " %d: 0x%08x-0x%08x", i,
+				       curr_sec->end_addr,
+				       next_sec->start_addr);
+				goto out;
+			}
+
+			if (fill_len) {
+				buffer += fill_len;
+				result += fill_len;
+			}
+		}
+		curr_sec++;
+	}
+
+out:
+	return result;
+}
+
+void ol_dump_target_memory(struct ol_softc *scn, void *memory_block)
+{
+	char *buffer_loc = memory_block;
+	u_int32_t section_count = 0;
+	u_int32_t address = 0;
+	u_int32_t size = 0;
+
+	for (; section_count < 2; section_count++) {
+		switch (section_count) {
+		case 0:
+			address = DRAM_LOCAL_BASE_ADDR;
+			size = DRAM_SIZE;
+			break;
+		case 1:
+			address = AXI_LOCATION;
+			size = AXI_SIZE;
+		default:
+			break;
+		}
+		hif_dump_target_memory(scn, buffer_loc, address, size);
+		buffer_loc += size;
+	}
+}
+
+/**---------------------------------------------------------------------------
+*   \brief  ol_target_coredump
+*
+*   Function to perform core dump for the target
+*
+*   \param:   scn - ol_softc handler
+*             memory_block - non-NULL reserved memory location
+*             block_len - size of the dump to collect
+*
+*   \return:  None
+* --------------------------------------------------------------------------*/
+static int ol_target_coredump(void *inst, void *memory_block,
+					uint32_t block_len)
+{
+	struct ol_softc *scn = (struct ol_softc *)inst;
+	int8_t *buffer_loc = memory_block;
+	int result = 0;
+	int ret = 0;
+	uint32_t amount_read = 0;
+	uint32_t section_count = 0;
+	uint32_t pos = 0;
+	uint32_t read_len = 0;
+
+	/*
+	 * SECTION = DRAM
+	 * START   = 0x00400000
+	 * LENGTH  = 0x000a8000
+	 *
+	 * SECTION = AXI
+	 * START   = 0x000a0000
+	 * LENGTH  = 0x00018000
+	 *
+	 * SECTION = REG
+	 * START   = 0x00000800
+	 * LENGTH  = 0x0007F820
+	 */
+
+	while ((section_count < 3) && (amount_read < block_len)) {
+		switch (section_count) {
+		case 0:
+			/* DRAM SECTION */
+			pos = DRAM_LOCATION;
+			read_len = DRAM_SIZE;
+			BMI_ERR("%s: Dumping DRAM section...", __func__);
+			break;
+		case 1:
+			/* AXI SECTION */
+			pos = AXI_LOCATION;
+			read_len = AXI_SIZE;
+			BMI_ERR("%s: Dumping AXI section...", __func__);
+			break;
+		case 2:
+			/* REG SECTION */
+			pos = REGISTER_LOCATION;
+			/* ol_diag_read_reg_loc checks for buffer overrun */
+			read_len = 0;
+			BMI_ERR("%s: Dumping Register section...", __func__);
+			break;
+		}
+
+		if ((block_len - amount_read) >= read_len) {
+			if (pos == REGISTER_LOCATION)
+				result = ol_diag_read_reg_loc(scn, buffer_loc,
+							      block_len -
+							      amount_read);
+			else
+				result = ol_diag_read(scn, buffer_loc,
+					      pos, read_len);
+			if (result != -EIO) {
+				amount_read += result;
+				buffer_loc += result;
+				section_count++;
+			} else {
+				BMI_ERR("Could not read dump section!");
+				dump_ce_register(scn);
+				dump_ce_debug_register(scn);
+				ol_dump_target_memory(scn, memory_block);
+				ret = -EACCES;
+				break;  /* Could not read the section */
+			}
+		} else {
+			BMI_ERR("Insufficient room in dump buffer!");
+			break;  /* Insufficient room in buffer */
+		}
+	}
+	return ret;
+}
+
+#define MAX_SUPPORTED_PEERS_REV1_1 14
+#define MAX_SUPPORTED_PEERS_REV1_3 32
+
+uint8_t ol_get_number_of_peers_supported(struct ol_softc *scn)
+{
+	uint8_t max_no_of_peers = 0;
+
+	switch (scn->target_version) {
+	case AR6320_REV1_1_VERSION:
+		if (scn->max_no_of_peers > MAX_SUPPORTED_PEERS_REV1_1)
+			max_no_of_peers = MAX_SUPPORTED_PEERS_REV1_1;
+		else
+			max_no_of_peers = scn->max_no_of_peers;
+		break;
+
+	default:
+		if (scn->max_no_of_peers > MAX_SUPPORTED_PEERS_REV1_3)
+			max_no_of_peers = MAX_SUPPORTED_PEERS_REV1_3;
+		else
+			max_no_of_peers = scn->max_no_of_peers;
+		break;
+
+	}
+	return max_no_of_peers;
+}

+ 140 - 0
core/cdf/inc/cdf_atomic.h

@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_atomic.h
+ * This file abstracts an atomic counter.
+ */
+
+#ifndef _CDF_ATOMIC_H
+#define _CDF_ATOMIC_H
+
+#include <i_cdf_atomic.h>
+
+/**
+ * cdf_atomic_t - atomic type of variable
+ *
+ * Use this when you want a simple resource counter etc. which is atomic
+ * across multiple CPU's. These maybe slower than usual counters on some
+ * platforms/OS'es, so use them with caution.
+ */
+
+typedef __cdf_atomic_t cdf_atomic_t;
+
+/**
+ * cdf_atomic_init() - initialize an atomic type variable
+ * @v:	A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void cdf_atomic_init(cdf_atomic_t *v)
+{
+	__cdf_atomic_init(v);
+}
+
+/**
+ * cdf_atomic_read() - read the value of an atomic variable
+ * @v:	A pointer to an opaque atomic variable
+ *
+ * Return: The current value of the variable
+ */
+static inline uint32_t cdf_atomic_read(cdf_atomic_t *v)
+{
+	return __cdf_atomic_read(v);
+}
+
+/**
+ * cdf_atomic_inc() - increment the value of an atomic variable
+ * @v:	A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void cdf_atomic_inc(cdf_atomic_t *v)
+{
+	__cdf_atomic_inc(v);
+}
+
+/**
+ * cdf_atomic_dec() - decrement the value of an atomic variable
+ * @v:	A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void cdf_atomic_dec(cdf_atomic_t *v)
+{
+	__cdf_atomic_dec(v);
+}
+
+/**
+ * cdf_atomic_add() - add a value to the value of an atomic variable
+ * @v:	A pointer to an opaque atomic variable
+ * @i:	The amount by which to increase the atomic counter
+ *
+ * Return: None
+ */
+static inline void cdf_atomic_add(int i, cdf_atomic_t *v)
+{
+	__cdf_atomic_add(i, v);
+}
+
+/**
+ * cdf_atomic_dec_and_test() - decrement an atomic variable and check if the
+ *				new value is zero
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return:
+ *    true (non-zero) if the new value is zero,
+ *    or false (0) if the new value is non-zero
+ */
+static inline uint32_t cdf_atomic_dec_and_test(cdf_atomic_t *v)
+{
+	return __cdf_atomic_dec_and_test(v);
+}
+
+/**
+ * cdf_atomic_set() - set a value to the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void cdf_atomic_set(cdf_atomic_t *v, int i)
+{
+	__cdf_atomic_set(v, i);
+}
+
+/**
+ * cdf_atomic_inc_return() - return the incremented value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: The current value of the variable
+ */
+static inline uint32_t cdf_atomic_inc_return(cdf_atomic_t *v)
+{
+	return __cdf_atomic_inc_return(v);
+}
+
+#endif

+ 138 - 0
core/cdf/inc/cdf_defer.h

@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_defer.h
+ * This file abstracts deferred execution contexts.
+ */
+
+#ifndef __CDF_DEFER_H
+#define __CDF_DEFER_H
+
+#include <cdf_types.h>
+#include <i_cdf_defer.h>
+
+/**
+ * This implements work queues (worker threads, kernel threads etc.).
+ * Note that there is no cancel on a scheduled work. You cannot free a work
+ * item if its queued. You cannot know if a work item is queued or not unless
+ * its running, whence you know its not queued.
+ *
+ * so if, say, a module is asked to unload itself, how exactly will it make
+ * sure that the work's not queued, for OS'es that dont provide such a
+ * mechanism??
+ */
+
+/* cdf_work_t - representation of a work queue */
+typedef __cdf_work_t cdf_work_t;
+
+/* cdf_work_t - representation of a bottom half */
+typedef __cdf_bh_t cdf_bh_t;
+
+/**
+ * cdf_create_bh() - this creates the Bottom half deferred handler
+ * @hdl:   OS handle
+ * @bh:    Bottom instance
+ * @func:  Func deferred function to run at bottom half interrupt
+ *         context
+ * Return: None
+ */
+static inline void
+cdf_create_bh(cdf_handle_t hdl, cdf_bh_t *bh, cdf_defer_fn_t func, void *arg)
+{
+	__cdf_init_bh(hdl, bh, func, arg);
+}
+
+/**
+ * cdf_sched_bh() - schedule a bottom half (DPC)
+ * @hdl:	OS handle
+ * @bh:		Bottom instance
+ *
+ * Return: None
+ */
+static inline void cdf_sched_bh(cdf_handle_t hdl, cdf_bh_t *bh)
+{
+	__cdf_sched_bh(hdl, bh);
+}
+
+/**
+ * cdf_destroy_bh() - destroy a bottom half (DPC)
+ * @hdl:	OS handle
+ * @bh:		Bottom instance
+ *
+ * Return: None
+ */
+static inline void cdf_destroy_bh(cdf_handle_t hdl, cdf_bh_t *bh)
+{
+	__cdf_disable_bh(hdl, bh);
+}
+
+/*********************Non-Interrupt Context deferred Execution***************/
+
+/**
+ * cdf_create_work() - create a work/task queue, This runs in non-interrupt
+ *		       context, so can be preempted by H/W & S/W intr
+ * @hdl:	OS handle
+ * @work:	Work instance
+ * @func:	Deferred function to run at bottom half non-interrupt
+ *		context
+ * @arg:	Argument for the deferred function
+ *
+ * Return: None
+ */
+static inline void
+cdf_create_work(cdf_handle_t hdl, cdf_work_t *work,
+		cdf_defer_fn_t func, void *arg)
+{
+	__cdf_init_work(hdl, work, func, arg);
+}
+
+/**
+ * cdf_sched_work() - schedule a deferred task on non-interrupt context
+ * @hdl:	OS handle
+ * @work:	Work instance
+ *
+ * Return: None
+ */
+static inline void cdf_sched_work(cdf_handle_t hdl, cdf_work_t *work)
+{
+	__cdf_sched_work(hdl, work);
+}
+
+/**
+ * cdf_destroy_work() -  destroy the deferred task (synchronous)
+ * @hdl:	OS handle
+ * @work:	Work instance
+ *
+ * Return: None
+ */
+static inline void cdf_destroy_work(cdf_handle_t hdl, cdf_work_t *work)
+{
+	__cdf_disable_work(hdl, work);
+}
+
+#endif /*__CDF_DEFER_H*/

+ 154 - 0
core/cdf/inc/cdf_event.h

@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_EVENT_H)
+#define __CDF_EVENT_H
+
+/**
+ * DOC: cdf_event.h
+ *
+ * Connectivity driver framework (CDF) events API
+ *
+ **/
+
+/* Include Files */
+#include "cdf_status.h"
+#include "cdf_types.h"
+#include "i_cdf_event.h"
+
+/* Preprocessor definitions and constants */
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* Type declarations */
+
+/* Function declarations and documenation */
+
+/**
+ * cdf_event_init() - initializes the specified event
+ *
+ * @event:	Pointer to CDF event object to initialize
+ *
+ * Initializes the specified event. Upon successful initialization the state
+ * of the event becomes initialized and not signaled.
+ *
+ * Return:
+ *    CDF_STATUS_SUCCESS - Event was successfully initialized and is ready to
+ *                         be used
+ *    Otherwise failure CDF reason code
+ */
+
+CDF_STATUS cdf_event_init(cdf_event_t *event);
+
+/**
+ * cdf_event_set() -  set a CDF event
+ *
+ * @event:	Pointer of CDF event to set to the signalled state
+ *
+ * The state of the specified event is set to 'signalled by calling
+ * cdf_event_set().  The state of the event remains signalled until an
+ * explicit call to cdf_event_reset().
+ *
+ * Any threads waiting on the event as a result of a cdf_event_wait() will
+ * be unblocked and available to be scheduled for execution when the event
+ * is signaled by a call to cdf_event_set().
+ *
+ * Return:
+ *      CDF_STATUS_SUCCESS - Event was successfully set
+ *      Otherwise failure CDF reason code
+ */
+CDF_STATUS cdf_event_set(cdf_event_t *event);
+
+/**
+ * cdf_event_reset() -  reset a CDF event
+ *
+ * @event:	Pointer of CDF event to reset
+ *
+ * The state of the specified event is set to 'NOT signalled' by calling
+ * cdf_event_reset().  The state of the event remains NOT signalled until an
+ * explicit call to cdf_event_set().
+ *
+ * This function sets the event to a NOT signalled state even if the event was
+ * signalled multiple times before being signaled.
+ *
+ * Return:
+ *      CDF_STATUS_SUCCESS - Event was successfully reset
+ *      Otherwise failure CDF reason code
+ */
+CDF_STATUS cdf_event_reset(cdf_event_t *event);
+
+/**
+ * cdf_event_destroy() -  destroy a CDF event
+ *
+ * @event:	Pointer of CDF event to destroy
+ *
+ * The function destroys the event object referenced by event.
+ * After a successful return from cdf_event_destroy() the event object becomes,
+ * in effect, uninitialized.
+ *
+ * A destroyed event object can be reinitialized using cdf_event_init();
+ * the results of otherwise referencing the object after it has been destroyed
+ * are undefined.  Calls to CDF event functions to manipulate the lock such
+ * as cdf_event_set() will fail if the event is destroyed.  Therefore,
+ * don't use the event after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ *      CDF_STATUS_SUCCESS - Event was successfully destroyed
+ *      Otherwise failure CDF reason code
+ */
+CDF_STATUS cdf_event_destroy(cdf_event_t *event);
+
+/**
+ * cdf_wait_single_event() -  wait for a single input CDF event to be set
+ *
+ * @event:	Pointer of CDF event to wait on
+ * @timeout:	Timeout value in milli seconds
+ *
+ * This API waits for the event to be set. This function returns
+ * if this interval elapses, regardless if any of the events have
+ * been set.  An input value of 0 for this timeout parameter means
+ * to wait infinitely, meaning a timeout will never occur.
+ *
+ *
+ * Return:
+ *    CDF_STATUS_SUCCESS - the wait was satisifed by the event being
+ *                         set.
+ *
+ *    CDF_STATUS_E_TIMEOUT - the timeout interval elapsed before the
+ *    event was set.
+ *
+ *    CDF_STATUS_E_INVAL - The value specified by event is invalid.
+ */
+CDF_STATUS cdf_wait_single_event(cdf_event_t *pEvent,
+				 uint32_t timeout);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __CDF_EVENT_H */

+ 110 - 0
core/cdf/inc/cdf_list.h

@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_LIST_H)
+#define __CDF_LIST_H
+
+/**
+ *  DOC: cdf_list.h
+ *
+ *  Connectivity driver framework (CDF) list APIs
+ *
+ *  Definitions for CDF Linked Lists API
+ *
+ *  Lists are implemented as a doubly linked list. An item in a list can
+ *  be of any type as long as the datatype contains a field of type
+ *  cdf_link_t.
+ *
+ *  In general, a list is a doubly linked list of items with a pointer
+ *  to the front of the list and a pointer to the end of the list.  The
+ *  list items contain a forward and back link.
+ *
+ *  CDF linked list APIs are NOT thread safe so make sure to use appropriate
+ *  locking mechanisms to assure operations on the list are thread safe.
+ */
+
+/* Include Files */
+#include <cdf_types.h>
+#include <cdf_status.h>
+#include <cdf_trace.h>
+#include <linux/list.h>
+
+/* Preprocessor definitions and constants */
+
+/* Type declarations */
+
+typedef struct list_head cdf_list_node_t;
+
+typedef struct cdf_list_s {
+	cdf_list_node_t anchor;
+	uint32_t count;
+	uint32_t max_size;
+} cdf_list_t;
+
+/* Function declarations */
+
+CDF_INLINE_FN void cdf_list_init(cdf_list_t *p_list, uint32_t max_size)
+{
+	INIT_LIST_HEAD(&p_list->anchor);
+	p_list->count = 0;
+	p_list->max_size = max_size;
+}
+
+CDF_INLINE_FN void cdf_list_destroy(cdf_list_t *p_list)
+{
+	if (p_list->count != 0) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+			  "%s: list length not equal to zero", __func__);
+		CDF_ASSERT(0);
+	}
+}
+
+CDF_INLINE_FN void cdf_list_size(cdf_list_t *p_list, uint32_t *p_size)
+{
+	*p_size = p_list->count;
+}
+
+CDF_STATUS cdf_list_insert_front(cdf_list_t *p_list, cdf_list_node_t *p_node);
+
+CDF_STATUS cdf_list_insert_back(cdf_list_t *p_list, cdf_list_node_t *p_node);
+
+CDF_STATUS cdf_list_insert_back_size(cdf_list_t *p_list,
+				     cdf_list_node_t *p_node, uint32_t *p_size);
+
+CDF_STATUS cdf_list_remove_front(cdf_list_t *p_list, cdf_list_node_t **pp_node);
+
+CDF_STATUS cdf_list_remove_back(cdf_list_t *p_list, cdf_list_node_t **pp_node);
+
+CDF_STATUS cdf_list_peek_front(cdf_list_t *p_list, cdf_list_node_t **pp_node);
+
+CDF_STATUS cdf_list_peek_next(cdf_list_t *p_list, cdf_list_node_t *p_node,
+			      cdf_list_node_t **pp_node);
+
+CDF_STATUS cdf_list_remove_node(cdf_list_t *p_list,
+				cdf_list_node_t *p_node_to_remove);
+
+#endif /* __CDF_LIST_H */

+ 296 - 0
core/cdf/inc/cdf_lock.h

@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_LOCK_H)
+#define __CDF_LOCK_H
+
+/**
+ *
+ * @file  cdf_lock.h
+ *
+ * @brief Connectivity driver framework (CDF) lock APIs
+ *
+ * Definitions for CDF locks
+ *
+ */
+
+/* Include Files */
+#include "cdf_status.h"
+#include "i_cdf_lock.h"
+
+/* Preprocessor definitions and constants */
+
+/* Type declarations */
+/**
+ * @brief Platform spinlock object
+ */
+typedef __cdf_spinlock_t cdf_spinlock_t;
+/**
+ * @brief Platform mutex object
+ */
+typedef __cdf_semaphore_t cdf_semaphore_t;
+
+/* Function declarations and documenation */
+
+/**
+ * cdf_semaphore_init() - initialize a semaphore
+ * @m:  Semaphore to initialize
+ *
+ * Return: None
+ */
+
+static inline void cdf_semaphore_init(cdf_semaphore_t *m)
+{
+	__cdf_semaphore_init(m);
+}
+
+/**
+ * cdf_semaphore_acquire() - take the semaphore
+ * @m:  Semaphore to take
+ *
+ * Return: None
+ */
+static inline int cdf_semaphore_acquire(cdf_device_t osdev, cdf_semaphore_t *m)
+{
+	return __cdf_semaphore_acquire(osdev, m);
+}
+
+/**
+ * cdf_semaphore_release () - give the semaphore
+ * @m:  Semaphore to give
+ *
+ * Return: None
+ */
+static inline void
+cdf_semaphore_release(cdf_device_t osdev, cdf_semaphore_t *m)
+{
+	__cdf_semaphore_release(osdev, m);
+}
+
+/**
+ * cdf_mutex_init() - initialize a CDF lock
+ * @lock:	 Pointer to the opaque lock object to initialize
+ *
+ * cdf_mutex_init() function initializes the specified lock. Upon
+ * successful initialization, the state of the lock becomes initialized
+ * and unlocked.
+ *
+ * A lock must be initialized by calling cdf_mutex_init() before it
+ * may be used in any other lock functions.
+ *
+ * Attempting to initialize an already initialized lock results in
+ * a failure.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS:	lock was successfully initialized
+ *	CDF failure reason codes: lock is not initialized and can't be used
+ */
+CDF_STATUS cdf_mutex_init(cdf_mutex_t *lock);
+
+/**
+ * cdf_mutex_acquire () - acquire a CDF lock
+ * @lock:	 Pointer to the opaque lock object to acquire
+ *
+ * A lock object is acquired by calling cdf_mutex_acquire().  If the lock
+ * is already locked, the calling thread shall block until the lock becomes
+ * available. This operation shall return with the lock object referenced by
+ * lock in the locked state with the calling thread as its owner.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS:	lock was successfully initialized
+ *	CDF failure reason codes: lock is not initialized and can't be used
+ */
+CDF_STATUS cdf_mutex_acquire(cdf_mutex_t *lock);
+
+/**
+ * cdf_mutex_release() - release a CDF lock
+ * @lock:	 Pointer to the opaque lock object to be released
+ *
+ * cdf_mutex_release() function shall release the lock object
+ * referenced by 'lock'.
+ *
+ * If a thread attempts to release a lock that it unlocked or is not
+ * initialized, an error is returned.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS:	lock was successfully initialized
+ *	CDF failure reason codes: lock is not initialized and can't be used
+ */
+CDF_STATUS cdf_mutex_release(cdf_mutex_t *lock);
+
+/**
+ * cdf_mutex_destroy() - destroy a CDF lock
+ * @lock:	 Pointer to the opaque lock object to be destroyed
+ *
+ * cdf_mutex_destroy() function shall destroy the lock object
+ * referenced by lock.  After a successful return from \a cdf_mutex_destroy()
+ * the lock object becomes, in effect, uninitialized.
+ *
+ * A destroyed lock object can be reinitialized using cdf_mutex_init();
+ * the results of otherwise referencing the object after it has been destroyed
+ * are undefined.  Calls to CDF lock functions to manipulate the lock such
+ * as cdf_mutex_acquire() will fail if the lock is destroyed.  Therefore,
+ * don't use the lock after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS:	lock was successfully initialized
+ *	CDF failure reason codes: lock is not initialized and can't be used
+ */
+CDF_STATUS cdf_mutex_destroy(cdf_mutex_t *lock);
+
+/**
+ * cdf_spinlock_init() - initialize a spinlock
+ * @lock: Spinlock object pointer
+ *
+ * Return: None
+ */
+static inline void cdf_spinlock_init(cdf_spinlock_t *lock)
+{
+	__cdf_spinlock_init(lock);
+}
+
+/**
+ * cdf_spinlock_destroy() - delete a spinlock
+ * @lock: Spinlock object pointer
+ *
+ * Return: None
+ */
+static inline void cdf_spinlock_destroy(cdf_spinlock_t *lock)
+{
+	__cdf_spinlock_destroy(lock);
+}
+
+/**
+ * cdf_spin_lock_bh() - locks the spinlock semaphore in soft irq context
+ * @lock: Spinlock object pointer
+ *
+ * Return: None
+ */
+static inline void cdf_spin_lock_bh(cdf_spinlock_t *lock)
+{
+	__cdf_spin_lock_bh(lock);
+}
+
+/**
+ * cdf_spin_lock_bh() - unlocks the spinlock semaphore in soft irq context
+ * @lock: Spinlock object pointer
+ *
+ * Return: None
+ */
+static inline void cdf_spin_unlock_bh(cdf_spinlock_t *lock)
+{
+	__cdf_spin_unlock_bh(lock);
+}
+
+/**
+ * cdf_wake_lock_init() - initializes a CDF wake lock
+ * @lock: The wake lock to initialize
+ * @name: Name of wake lock
+ *
+ * Return:
+ *    CDF status success : if wake lock is initialized
+ *    CDF status fialure : if wake lock was not initialized
+ */
+CDF_STATUS cdf_wake_lock_init(cdf_wake_lock_t *lock, const char *name);
+
+/**
+ * cdf_wake_lock_acquire() - acquires a wake lock
+ * @lock:	The wake lock to acquire
+ * @reason:	Reason for taking wakelock
+ *
+ * Return:
+ *    CDF status success : if wake lock is acquired
+ *    CDF status fialure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_wake_lock_acquire(cdf_wake_lock_t *pLock, uint32_t reason);
+
+/**
+ * cdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
+ * @lock:	The wake lock to acquire
+ * @reason:	Reason for taking wakelock
+ *
+ * Return:
+ *   CDF status success : if wake lock is acquired
+ *   CDF status fialure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_wake_lock_timeout_acquire(cdf_wake_lock_t *pLock,
+					 uint32_t msec, uint32_t reason);
+
+/**
+ * cdf_wake_lock_release() - releases a wake lock
+ * @lock:	the wake lock to release
+ * @@reason:	Reason for taking wakelock
+ *
+ * Return:
+ *    CDF status success : if wake lock is acquired
+ *    CDF status fialure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_wake_lock_release(cdf_wake_lock_t *pLock, uint32_t reason);
+
+/**
+ * cdf_wake_lock_destroy() - destroys a wake lock
+ * @lock:	The wake lock to destroy
+ *
+ * Return:
+ * CDF status success :	if wake lock is acquired
+ * CDF status fialure :	if wake lock was not acquired
+ */
+CDF_STATUS cdf_wake_lock_destroy(cdf_wake_lock_t *pLock);
+
+/**
+ * cdf_spinlock_acquire() - acquires a spin lock
+ * @lock:	Spin lock to acquire
+ *
+ * Return:
+ *    CDF status success : if wake lock is acquired
+ *    CDF status fialure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_spinlock_acquire(cdf_spinlock_t *pLock);
+
+/**
+ * cdf_spinlock_release() - release a spin lock
+ * @lock:	Spin lock to release
+ *
+ * Return:
+ * CDF status success :	if wake lock is acquired
+ * CDF status fialure :	if wake lock was not acquired
+ */
+CDF_STATUS cdf_spinlock_release(cdf_spinlock_t *pLock);
+
+#define cdf_spin_lock(_lock) __cdf_spin_lock(_lock)
+#define cdf_spin_unlock(_lock) __cdf_spin_unlock(_lock)
+#define cdf_spin_lock_irqsave(_lock) __cdf_spin_lock_irqsave(_lock)
+#define cdf_spin_unlock_irqrestore(_lock) \
+	__cdf_spin_unlock_irqrestore(_lock)
+#define cdf_spin_lock_irq(_pLock, _flags)   __cdf_spin_lock_irq(_pLock, _flags)
+#define cdf_spin_unlock_irq(_pLock, _flags) \
+	__cdf_spin_unlock_irq(_pLock, _flags)
+
+#define cdf_in_softirq() __cdf_in_softirq()
+
+#endif /* __CDF_LOCK_H */

+ 253 - 0
core/cdf/inc/cdf_mc_timer.h

@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_MC_TIMER_H)
+#define __CDF_MC_TIMER_H
+
+/**
+ * DOC: cdf_mc_timer
+ *
+ * Connectivity driver framework timer APIs serialized to MC thread
+ */
+
+/* Include Files */
+#include <cdf_types.h>
+#include <cdf_status.h>
+#include <cdf_lock.h>
+#include <i_cdf_mc_timer.h>
+
+#ifdef TIMER_MANAGER
+#include <cdf_list.h>
+#endif
+
+/* Preprocessor definitions and constants */
+#define CDF_TIMER_STATE_COOKIE (0x12)
+#define CDF_MC_TIMER_TO_MS_UNIT   (1000)
+#define CDF_MC_TIMER_TO_SEC_UNIT  (1000000)
+
+/* Type declarations */
+/* cdf Timer callback function prototype (well, actually a prototype for
+   a pointer to this callback function) */
+typedef void (*cdf_mc_timer_callback_t)(void *userData);
+
+typedef enum {
+	CDF_TIMER_STATE_UNUSED = CDF_TIMER_STATE_COOKIE,
+	CDF_TIMER_STATE_STOPPED,
+	CDF_TIMER_STATE_STARTING,
+	CDF_TIMER_STATE_RUNNING,
+} CDF_TIMER_STATE;
+
+#ifdef TIMER_MANAGER
+struct cdf_mc_timer_s;
+typedef struct cdf_mc_timer_node_s {
+	cdf_list_node_t pNode;
+	char *fileName;
+	unsigned int lineNum;
+	struct cdf_mc_timer_s *cdf_timer;
+} cdf_mc_timer_node_t;
+#endif
+
+typedef struct cdf_mc_timer_s {
+#ifdef TIMER_MANAGER
+	cdf_mc_timer_node_t *ptimerNode;
+#endif
+
+	cdf_mc_timer_platform_t platformInfo;
+	cdf_mc_timer_callback_t callback;
+	void *userData;
+	cdf_mutex_t lock;
+	CDF_TIMER_TYPE type;
+	CDF_TIMER_STATE state;
+} cdf_mc_timer_t;
+
+/* Function declarations and documenation */
+#ifdef TIMER_MANAGER
+void cdf_mc_timer_manager_init(void);
+void cdf_mc_timer_exit(void);
+#else
+/**
+ * cdf_mc_timer_manager_init() - initialize CDF debug timer manager
+ *
+ * This API initializes CDF timer debug functionality.
+ *
+ * Return: none
+ */
+static inline void cdf_mc_timer_manager_init(void)
+{
+}
+
+/**
+ * cdf_mc_timer_exit() - exit CDF timer debug functionality
+ *
+ * This API exists CDF timer debug functionality
+ *
+ * Return: none
+ */
+static inline void cdf_mc_timer_exit(void)
+{
+}
+#endif
+/**
+ * cdf_mc_timer_get_current_state() - get the current state of the timer
+ * @pTimer:  Pointer to timer object
+ *
+ * Return:
+ *	CDF_TIMER_STATE - cdf timer state
+ */
+
+CDF_TIMER_STATE cdf_mc_timer_get_current_state(cdf_mc_timer_t *pTimer);
+
+/**
+ * cdf_mc_timer_init() - initialize a CDF timer
+ * @pTimer:	Pointer to timer object
+ * @timerType:	Type of timer
+ * @callback:	Callback to be called after timer expiry
+ * @serData:	User data which will be passed to callback function
+ *
+ * This API initializes a CDF Timer object.
+ *
+ * cdf_mc_timer_init() initializes a CDF Timer object.  A timer must be
+ * initialized by calling cdf_mc_timer_initialize() before it may be used in
+ * any other timer functions.
+ *
+ * Attempting to initialize timer that is already initialized results in
+ * a failure. A destroyed timer object can be re-initialized with a call to
+ * cdf_mc_timer_init().  The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ *  Calls to CDF timer functions to manipulate the timer such
+ *  as cdf_mc_timer_set() will fail if the timer is not initialized or has
+ *  been destroyed.  Therefore, don't use the timer after it has been
+ *  destroyed until it has been re-initialized.
+ *
+ *  All callback will be executed within the CDS main thread unless it is
+ *  initialized from the Tx thread flow, in which case it will be executed
+ *  within the tx thread flow.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *	CDF failure status - Timer initialization failed
+ */
+#ifdef TIMER_MANAGER
+#define cdf_mc_timer_init(timer, timerType, callback, userdata)	\
+	cdf_mc_timer_init_debug(timer, timerType, callback, userdata, \
+		__FILE__, __LINE__)
+
+CDF_STATUS cdf_mc_timer_init_debug(cdf_mc_timer_t *timer,
+				   CDF_TIMER_TYPE timerType,
+				   cdf_mc_timer_callback_t callback,
+				   void *userData, char *fileName,
+				   uint32_t lineNum);
+#else
+CDF_STATUS cdf_mc_timer_init(cdf_mc_timer_t *timer, CDF_TIMER_TYPE timerType,
+			     cdf_mc_timer_callback_t callback,
+			     void *userData);
+#endif
+
+/**
+ * cdf_mc_timer_destroy() - destroy CDF timer
+ * @timer:	Pointer to timer object
+ *
+ * cdf_mc_timer_destroy() function shall destroy the timer object.
+ * After a successful return from \a cdf_mc_timer_destroy() the timer
+ * object becomes, in effect, uninitialized.
+ *
+ * A destroyed timer object can be re-initialized by calling
+ * cdf_mc_timer_init().  The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ * Calls to CDF timer functions to manipulate the timer, such
+ * as cdf_mc_timer_set() will fail if the lock is destroyed.  Therefore,
+ * don't use the timer after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *	CDF failure status - Timer initialization failed
+ */
+CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer);
+
+/**
+ * cdf_mc_timer_start() - start a CDF Timer object
+ * @timer:	Pointer to timer object
+ * @expirationTime:	Time to expire
+ *
+ * cdf_mc_timer_start() function starts a timer to expire after the
+ * specified interval, thus running the timer callback function when
+ * the interval expires.
+ *
+ * A timer only runs once (a one-shot timer).  To re-start the
+ * timer, cdf_mc_timer_start() has to be called after the timer runs
+ * or has been cancelled.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *	CDF failure status - Timer initialization failed
+ */
+CDF_STATUS cdf_mc_timer_start(cdf_mc_timer_t *timer, uint32_t expirationTime);
+
+/**
+ * cdf_mc_timer_stop() - stop a CDF Timer
+ * @timer:	Pointer to timer object
+ * cdf_mc_timer_stop() function stops a timer that has been started but
+ * has not expired, essentially cancelling the 'start' request.
+ *
+ * After a timer is stopped, it goes back to the state it was in after it
+ * was created and can be started again via a call to cdf_mc_timer_start().
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *	CDF failure status - Timer initialization failed
+ */
+CDF_STATUS cdf_mc_timer_stop(cdf_mc_timer_t *timer);
+
+/**
+ * cdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
+
+ * cdf_mc_timer_get_system_ticks() function returns the current number
+ * of timer ticks in 10msec intervals.  This function is suitable timestamping
+ * and calculating time intervals by calculating the difference between two
+ * timestamps.
+ *
+ * Return:
+ *	The current system tick count (in 10msec intervals).  This
+ *	function cannot fail.
+ */
+v_TIME_t cdf_mc_timer_get_system_ticks(void);
+
+/**
+ * cdf_mc_timer_get_system_time() - Get the system time in milliseconds
+ *
+ * cdf_mc_timer_get_system_time() function returns the number of milliseconds
+ * that have elapsed since the system was started
+ *
+ * Return:
+ *	The current system time in milliseconds
+ */
+v_TIME_t cdf_mc_timer_get_system_time(void);
+
+#endif /* #if !defined __CDF_MC_TIMER_H */

+ 225 - 0
core/cdf/inc/cdf_memory.h

@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_MEMORY_H)
+#define __CDF_MEMORY_H
+
+/**
+ * DOC: cdf_memory
+ *
+ * Connectivity driver framework (CDF) memory management APIs
+ */
+
+/* Include Files */
+#include <cdf_types.h>
+
+/* Preprocessor definitions and constants */
+
+#ifdef MEMORY_DEBUG
+void cdf_mem_clean(void);
+void cdf_mem_init(void);
+void cdf_mem_exit(void);
+#else
+/**
+ * cdf_mem_init() - initialize cdf memory debug functionality
+ *
+ * Return: none
+ */
+static inline void cdf_mem_init(void)
+{
+}
+
+/**
+ * cdf_mem_exit() - exit cdf memory debug functionality
+ *
+ * Return: none
+ */
+static inline void cdf_mem_exit(void)
+{
+}
+#endif
+/* Type declarations */
+
+/* Function declarations and documenation */
+
+/**
+ * cdf_mem_malloc() - allocation CDF memory
+ * @size:	Number of bytes of memory to allocate.
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory.
+ *
+ *
+ * Return:
+ *	Upon successful allocate, returns a non-NULL pointer to the allocated
+ *	memory.  If this function is unable to allocate the amount of memory
+ *	specified (for any reason) it returns %NULL.
+ *
+ */
+#ifdef MEMORY_DEBUG
+#define cdf_mem_malloc(size) cdf_mem_malloc_debug(size, __FILE__, __LINE__)
+void *cdf_mem_malloc_debug(size_t size, char *fileName, uint32_t lineNum);
+#else
+void *cdf_mem_malloc(size_t size);
+#endif
+
+/**
+ *  cdf_mem_free() - free CDF memory
+ *  @ptr:	Pointer to the starting address of the memory to be free'd.
+ *
+ *  This function will free the memory pointed to by 'ptr'.
+ *
+ *  Return:
+ *	 Nothing
+ *
+ */
+void cdf_mem_free(void *ptr);
+
+/**
+ * cdf_mem_set() - set (fill) memory with a specified byte value.
+ * @pMemory:	Pointer to memory that will be set
+ * @numBytes:	Number of bytes to be set
+ * @value:	Byte set in memory
+ *
+ * Return:
+ *    Nothing
+ *
+ */
+void cdf_mem_set(void *ptr, uint32_t numBytes, uint32_t value);
+
+/**
+ * cdf_mem_zero() - zero out memory
+ * @pMemory:	pointer to memory that will be set to zero
+ * @numBytes:	number of bytes zero
+ * @value:	byte set in memory
+ *
+ *  This function sets the memory location to all zeros, essentially clearing
+ *  the memory.
+ *
+ * Return:
+ *	Nothing
+ *
+ */
+void cdf_mem_zero(void *ptr, uint32_t numBytes);
+
+/**
+ * cdf_mem_copy() - copy memory
+ * @pDst:	Pointer to destination memory location (to copy to)
+ * @pSrc:	Pointer to source memory location (to copy from)
+ * @numBytes:	Number of bytes to copy.
+ *
+ * Copy host memory from one location to another, similar to memcpy in
+ * standard C.  Note this function does not specifically handle overlapping
+ * source and destination memory locations.  Calling this function with
+ * overlapping source and destination memory locations will result in
+ * unpredictable results.  Use cdf_mem_move() if the memory locations
+ * for the source and destination are overlapping (or could be overlapping!)
+ *
+ * Return:
+ *    Nothing
+ *
+ */
+void cdf_mem_copy(void *pDst, const void *pSrc, uint32_t numBytes);
+
+/**
+ * cdf_mem_move() - move memory
+ * @pDst:	pointer to destination memory location (to move to)
+ * @pSrc:	pointer to source memory location (to move from)
+ * @numBytes:	number of bytes to move.
+ *
+ * Move host memory from one location to another, similar to memmove in
+ * standard C.  Note this function *does* handle overlapping
+ * source and destination memory locations.
+
+ * Return:
+ *	Nothing
+ */
+void cdf_mem_move(void *pDst, const void *pSrc, uint32_t numBytes);
+
+/**
+ * cdf_mem_compare() - memory compare
+ * @pMemory1:	pointer to one location in memory to compare.
+ * @pMemory2:	pointer to second location in memory to compare.
+ * @numBytes:	the number of bytes to compare.
+ *
+ * Function to compare two pieces of memory, similar to memcmp function
+ * in standard C.
+ *
+ * Return:
+ *	bool - returns a bool value that tells if the memory locations
+ *	are equal or not equal.
+ *
+ */
+bool cdf_mem_compare(const void *pMemory1, const void *pMemory2,
+		     uint32_t numBytes);
+
+/**
+ * cdf_mem_compare2() - memory compare
+ * @pMemory1: pointer to one location in memory to compare.
+ * @pMemory2:	pointer to second location in memory to compare.
+ * @numBytes:	the number of bytes to compare.
+ *
+ * Function to compare two pieces of memory, similar to memcmp function
+ * in standard C.
+ * Return:
+ *	 int32_t - returns a bool value that tells if the memory
+ *	 locations are equal or not equal.
+ *	 0 -- equal
+ *	 < 0 -- *pMemory1 is less than *pMemory2
+ *	 > 0 -- *pMemory1 is bigger than *pMemory2
+ */
+int32_t cdf_mem_compare2(const void *pMemory1, const void *pMemory2,
+			 uint32_t numBytes);
+
+void *cdf_os_mem_alloc_consistent(cdf_device_t osdev, cdf_size_t size,
+				  cdf_dma_addr_t *paddr,
+				  cdf_dma_context_t mctx);
+void
+cdf_os_mem_free_consistent(cdf_device_t osdev,
+			   cdf_size_t size,
+			   void *vaddr,
+			   cdf_dma_addr_t paddr, cdf_dma_context_t memctx);
+
+void
+cdf_os_mem_dma_sync_single_for_device(cdf_device_t osdev,
+				      cdf_dma_addr_t bus_addr,
+				      cdf_size_t size,
+				      enum dma_data_direction direction);
+
+/**
+ * cdf_str_len() - returns the length of a string
+ * @str:	input string
+ *
+ * Return:
+ *	length of string
+ */
+static inline int32_t cdf_str_len(const char *str)
+{
+	return strlen(str);
+}
+
+#endif /* __CDF_MEMORY_H */

+ 1053 - 0
core/cdf/inc/cdf_nbuf.h

@@ -0,0 +1,1053 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_nbuf_public network buffer API
+ * This file defines the network buffer abstraction.
+ */
+
+#ifndef _CDF_NBUF_H
+#define _CDF_NBUF_H
+#include <cdf_util.h>
+#include <cdf_types.h>
+#include <cdf_net_types.h>
+#include <cdf_lock.h>
+#include <i_cdf_nbuf.h>
+#include <asm/cacheflush.h>
+
+#define IPA_NBUF_OWNER_ID 0xaa55aa55
+#define NBUF_PKT_TRAC_TYPE_EAPOL   0x02
+#define NBUF_PKT_TRAC_TYPE_DHCP    0x04
+#define NBUF_PKT_TRAC_TYPE_MGMT_ACTION    0x08
+#define NBUF_PKT_TRAC_MAX_STRING   12
+#define NBUF_PKT_TRAC_PROTO_STRING 4
+#define NBUF_PKT_ERROR  1
+
+/* Tracked Packet types */
+#define NBUF_TX_PKT_INVALID              0
+#define NBUF_TX_PKT_DATA_TRACK           1
+#define NBUF_TX_PKT_MGMT_TRACK           2
+
+/* Different Packet states */
+#define NBUF_TX_PKT_HDD                  1
+#define NBUF_TX_PKT_TXRX_ENQUEUE         2
+#define NBUF_TX_PKT_TXRX_DEQUEUE         3
+#define NBUF_TX_PKT_TXRX                 4
+#define NBUF_TX_PKT_HTT                  5
+#define NBUF_TX_PKT_HTC                  6
+#define NBUF_TX_PKT_HIF                  7
+#define NBUF_TX_PKT_CE                   8
+#define NBUF_TX_PKT_FREE                 9
+#define NBUF_TX_PKT_STATE_MAX            10
+
+
+/**
+ * @cdf_nbuf_t - Platform indepedent packet abstraction
+ */
+typedef __cdf_nbuf_t cdf_nbuf_t;
+
+/**
+ * @cdf_dma_map_cb_t - Dma map callback prototype
+ */
+typedef void (*cdf_dma_map_cb_t)(void *arg, cdf_nbuf_t buf,
+				 cdf_dma_map_t dmap);
+
+/**
+ * @__CDF_NBUF_NULL - invalid handle
+ */
+#define CDF_NBUF_NULL   __CDF_NBUF_NULL
+/**
+ * @cdf_nbuf_queue_t - Platform independent packet queue abstraction
+ */
+typedef __cdf_nbuf_queue_t cdf_nbuf_queue_t;
+
+/* BUS/DMA mapping routines */
+
+/**
+ * cdf_nbuf_map() - map a buffer to local bus address space
+ * @osdev: OS device
+ * @buf: Buf to be mapped (mapping info is stored in the buf's meta-data area)
+ * @dir: DMA direction
+ *
+ * Return: Status of the operation
+ */
+static inline CDF_STATUS
+cdf_nbuf_map(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
+{
+	return __cdf_nbuf_map(osdev, buf, dir);
+}
+
+/**
+ * cdf_nbuf_unmap() - unmap a previously mapped buf
+ * @osdev: OS device
+ * @buf: Buf to be unmapped (mapping info is stored in the buf's meta-data area)
+ * @dir: DMA direction
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_unmap(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
+{
+	__cdf_nbuf_unmap(osdev, buf, dir);
+}
+
+/**
+ * cdf_nbuf_map_single() - map a single buffer to local bus address space
+ * @osdev: OS device
+ * @buf: Buf to be mapped (mapping info is stored in the buf's meta-data area)
+ * @dir: DMA direction
+ *
+ * Return: Status of the operation
+ */
+static inline CDF_STATUS
+cdf_nbuf_map_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
+{
+	return __cdf_nbuf_map_single(osdev, buf, dir);
+}
+
+/**
+ * cdf_nbuf_unmap_single() - unmap a previously mapped buf
+ * @osdev: OS device
+ * @buf: Buf to be unmapped (mapping info is stored in the buf's meta-data area)
+ * @dir: DMA direction
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_unmap_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
+{
+	__cdf_nbuf_unmap_single(osdev, buf, dir);
+}
+
+/**
+ * cdf_nbuf_get_num_frags() - get number of fragments
+ * @buf: Network buffer
+ *
+ * Return: Number of fragments
+ */
+static inline int cdf_nbuf_get_num_frags(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_get_num_frags(buf);
+}
+
+/**
+ * cdf_nbuf_get_frag_len() - get fragment length
+ * @buf: Network buffer
+ * @frag_num: Fragment number
+ *
+ * Return: Fragment length
+ */
+static inline int cdf_nbuf_get_frag_len(cdf_nbuf_t buf, int frag_num)
+{
+	return __cdf_nbuf_get_frag_len(buf, frag_num);
+}
+
+/**
+ * cdf_nbuf_get_frag_vaddr() - get fragment virtual address
+ * @buf: Network buffer
+ * @frag_num: Fragment number
+ *
+ * Return: Fragment virtual address
+ */
+static inline unsigned char *cdf_nbuf_get_frag_vaddr(cdf_nbuf_t buf,
+						     int frag_num)
+{
+	return __cdf_nbuf_get_frag_vaddr(buf, frag_num);
+}
+
+/**
+ * cdf_nbuf_get_frag_paddr_lo() - get fragment physical address low order bytes
+ * @buf: Network buffer
+ * @frag_num: Fragment number
+ *
+ * Return: Fragment physical address lo
+ */
+static inline uint32_t cdf_nbuf_get_frag_paddr_lo(cdf_nbuf_t buf, int frag_num)
+{
+	return __cdf_nbuf_get_frag_paddr_lo(buf, frag_num);
+}
+
+/**
+ * cdf_nbuf_get_frag_is_wordstream() - is fragment wordstream
+ * @buf: Network buffer
+ * @frag_num: Fragment number
+ *
+ * Return: Fragment wordstream or not
+ */
+static inline int cdf_nbuf_get_frag_is_wordstream(cdf_nbuf_t buf, int frag_num)
+{
+	return __cdf_nbuf_get_frag_is_wordstream(buf, frag_num);
+}
+
+/**
+ * cdf_nbuf_set_frag_is_wordstream() - set fragment wordstream
+ * @buf: Network buffer
+ * @frag_num: Fragment number
+ * @is_wordstream: Wordstream
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_set_frag_is_wordstream(cdf_nbuf_t buf, int frag_num, int is_wordstream)
+{
+	__cdf_nbuf_set_frag_is_wordstream(buf, frag_num, is_wordstream);
+}
+
+/**
+ * cdf_nbuf_frag_push_head() - push fragment head
+ * @buf: Network buffer
+ * @frag_len: Fragment length
+ * @frag_vaddr: Fragment virtual address
+ * @frag_paddr_lo: Fragment physical address lo
+ * @frag_paddr_hi: Fragment physical address hi
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_frag_push_head(cdf_nbuf_t buf,
+			int frag_len,
+			char *frag_vaddr,
+			uint32_t frag_paddr_lo, uint32_t frag_paddr_hi)
+{
+	__cdf_nbuf_frag_push_head(buf, frag_len, frag_vaddr, frag_paddr_lo,
+				  frag_paddr_hi);
+}
+
+#ifdef MEMORY_DEBUG
+void cdf_net_buf_debug_init(void);
+void cdf_net_buf_debug_exit(void);
+void cdf_net_buf_debug_clean(void);
+void cdf_net_buf_debug_add_node(cdf_nbuf_t net_buf, size_t size,
+				uint8_t *file_name, uint32_t line_num);
+void cdf_net_buf_debug_delete_node(cdf_nbuf_t net_buf);
+void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf);
+
+/* nbuf allocation rouines */
+
+/**
+ * cdf_nbuf_alloc() - Allocate cdf_nbuf
+ * @hdl: Platform device object
+ * @size: Data buffer size for this cdf_nbuf including max header
+ *	  size
+ * @reserve: Headroom to start with.
+ * @align: Alignment for the start buffer.
+ * @prio: Indicate if the nbuf is high priority (some OSes e.g darwin
+ *	   polls few times if allocation fails and priority is true)
+ *
+ * The nbuf created is guarenteed to have only 1 physical segment
+ *
+ * Return: The new cdf_nbuf instance or NULL if there's not enough memory.
+ */
+
+#define cdf_nbuf_alloc(d, s, r, a, p)			\
+	cdf_nbuf_alloc_debug(d, s, r, a, p, __FILE__, __LINE__)
+static inline cdf_nbuf_t
+cdf_nbuf_alloc_debug(cdf_device_t osdev, cdf_size_t size, int reserve,
+		     int align, int prio, uint8_t *file_name,
+		     uint32_t line_num)
+{
+	cdf_nbuf_t net_buf;
+	net_buf = __cdf_nbuf_alloc(osdev, size, reserve, align, prio);
+
+	/* Store SKB in internal CDF tracking table */
+	if (cdf_likely(net_buf))
+		cdf_net_buf_debug_add_node(net_buf, size, file_name, line_num);
+
+	return net_buf;
+}
+
+/**
+ * cdf_nbuf_free() - free cdf_nbuf
+ * @net_buf: Network buffer to free
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_free(cdf_nbuf_t net_buf)
+{
+	/* Remove SKB from internal CDF tracking table */
+	if (cdf_likely(net_buf))
+		cdf_net_buf_debug_delete_node(net_buf);
+
+	__cdf_nbuf_free(net_buf);
+}
+
+#else
+
+static inline void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf)
+{
+	return;
+}
+
+/* Nbuf allocation rouines */
+
+/**
+ * cdf_nbuf_alloc() - allocate cdf_nbuf
+ * @hdl: Platform device object
+ * @size: Data buffer size for this cdf_nbuf including max header
+ *	  size
+ * @reserve: Headroom to start with.
+ * @align: Alignment for the start buffer.
+ * @prio: Indicate if the nbuf is high priority (some OSes e.g darwin
+ *	  polls few times if allocation fails and priority is  true)
+ *
+ * The nbuf created is guarenteed to have only 1 physical segment
+ *
+ * Return: new cdf_nbuf instance or NULL if there's not enough memory.
+ */
+static inline cdf_nbuf_t
+cdf_nbuf_alloc(cdf_device_t osdev,
+	       cdf_size_t size, int reserve, int align, int prio)
+{
+	return __cdf_nbuf_alloc(osdev, size, reserve, align, prio);
+}
+
+/**
+ * cdf_nbuf_free() - free cdf_nbuf
+ * @buf: Network buffer to free
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_free(cdf_nbuf_t buf)
+{
+	__cdf_nbuf_free(buf);
+}
+
+#endif
+
+/**
+ * cdf_nbuf_tx_free() - free a list of cdf_nbufs and tell the OS their tx
+ *			status (if req'd)
+ * @bufs: List of netbufs to free
+ * @tx_err: Whether the tx frames were transmitted successfully
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_tx_free(cdf_nbuf_t buf_list, int tx_err)
+{
+	__cdf_nbuf_tx_free(buf_list, tx_err);
+}
+
+/**
+ * cdf_nbuf_copy() - copy src buffer into dst.
+ * @buf: source nbuf to copy from
+ *
+ * This API is useful, for example, because most native buffer provide a way to
+ * copy a chain into a single buffer. Therefore as a side effect, it also
+ * "linearizes" a buffer (which is perhaps why you'll use it mostly). It
+ * creates a writeable copy.
+ *
+ *
+ * Return: new nbuf
+ */
+static inline cdf_nbuf_t cdf_nbuf_copy(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_copy(buf);
+}
+
+/**
+ * cdf_nbuf_cat() - link two nbufs, the new buf is piggybacked into older one
+ * @dst: Buffer to piggyback into
+ * @src: Buffer to put
+ *
+ * Return: Status of the call - 0 successful
+ */
+static inline CDF_STATUS cdf_nbuf_cat(cdf_nbuf_t dst, cdf_nbuf_t src)
+{
+	return __cdf_nbuf_cat(dst, src);
+}
+
+/**
+ * @cdf_nbuf_copy_bits() - return the length of the copy bits for skb
+ * @skb: SKB pointer
+ * @offset: offset
+ * @len: Length
+ * @to: To
+ *
+ * Return: int32_t
+ */
+static inline int32_t
+cdf_nbuf_copy_bits(cdf_nbuf_t nbuf, uint32_t offset, uint32_t len, void *to)
+{
+	return __cdf_nbuf_copy_bits(nbuf, offset, len, to);
+}
+
+/**
+ * cdf_nbuf_clone() - clone the nbuf (copy is readonly)
+ * @buf: nbuf to clone from
+ *
+ * Return: cloned buffer
+ */
+static inline cdf_nbuf_t cdf_nbuf_clone(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_clone(buf);
+}
+
+/* nbuf manipulation routines */
+
+/**
+ * @cdf_nbuf_head() - return the address of an nbuf's buffer
+ * @buf: netbuf
+ *
+ * Return: head address
+ */
+static inline uint8_t *cdf_nbuf_head(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_head(buf);
+}
+
+/**
+ * cdf_nbuf_data() - Return the address of the start of data within an nbuf
+ * @buf: Network buffer
+ *
+ * Return: Data address
+ */
+static inline uint8_t *cdf_nbuf_data(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_data(buf);
+}
+
+/**
+ * cdf_nbuf_headroom() - amount of headroom int the current nbuf
+ * @buf: Network buffer
+ *
+ * Return: Amount of head room
+ */
+static inline uint32_t cdf_nbuf_headroom(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_headroom(buf);
+}
+
+/**
+ * cdf_nbuf_tailroom() - amount of tail space available
+ * @buf: Network buffer
+ *
+ * Return: amount of tail room
+ */
+static inline uint32_t cdf_nbuf_tailroom(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_tailroom(buf);
+}
+
+/**
+ * cdf_nbuf_push_head() - push data in the front
+ * @buf: Network buf instance
+ * @size: Size to be pushed
+ *
+ * Return: New data pointer of this buf after data has been pushed,
+ *	   or NULL if there is not enough room in this buf.
+ */
+static inline uint8_t *cdf_nbuf_push_head(cdf_nbuf_t buf, cdf_size_t size)
+{
+	return __cdf_nbuf_push_head(buf, size);
+}
+
+/**
+ * cdf_nbuf_put_tail() - puts data in the end
+ * @buf: Network buf instance
+ * @size: Size to be pushed
+ *
+ * Return: Data pointer of this buf where new data has to be
+ *	   put, or NULL if there is not enough room in this buf.
+ */
+static inline uint8_t *cdf_nbuf_put_tail(cdf_nbuf_t buf, cdf_size_t size)
+{
+	return __cdf_nbuf_put_tail(buf, size);
+}
+
+/**
+ * cdf_nbuf_pull_head() - pull data out from the front
+ * @buf: Network buf instance
+ * @size: Size to be popped
+ *
+ * Return: New data pointer of this buf after data has been popped,
+ *	   or NULL if there is not sufficient data to pull.
+ */
+static inline uint8_t *cdf_nbuf_pull_head(cdf_nbuf_t buf, cdf_size_t size)
+{
+	return __cdf_nbuf_pull_head(buf, size);
+}
+
+/**
+ * cdf_nbuf_trim_tail() - trim data out from the end
+ * @buf: Network buf instance
+ * @size: Size to be popped
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_trim_tail(cdf_nbuf_t buf, cdf_size_t size)
+{
+	__cdf_nbuf_trim_tail(buf, size);
+}
+
+/**
+ * cdf_nbuf_len() - get the length of the buf
+ * @buf: Network buf instance
+ *
+ * Return: total length of this buf.
+ */
+static inline cdf_size_t cdf_nbuf_len(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_len(buf);
+}
+
+/**
+ * cdf_nbuf_set_pktlen() - set the length of the buf
+ * @buf: Network buf instance
+ * @size: Size to be set
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_set_pktlen(cdf_nbuf_t buf, uint32_t len)
+{
+	__cdf_nbuf_set_pktlen(buf, len);
+}
+
+/**
+ * cdf_nbuf_reserve() - trim data out from the end
+ * @buf: Network buf instance
+ * @size: Size to be popped
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_reserve(cdf_nbuf_t buf, cdf_size_t size)
+{
+	__cdf_nbuf_reserve(buf, size);
+}
+
+/**
+ * cdf_nbuf_peek_header() - return the data pointer & length of the header
+ * @buf: Network nbuf
+ * @addr: Data pointer
+ * @len: Length of the data
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_peek_header(cdf_nbuf_t buf, uint8_t **addr, uint32_t *len)
+{
+	__cdf_nbuf_peek_header(buf, addr, len);
+}
+
+/* nbuf private context routines */
+
+/* nbuf queue routines */
+
+/**
+ * cdf_nbuf_queue_init() - initialize buf queue
+ * @head: Network buf queue head
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_queue_init(cdf_nbuf_queue_t *head)
+{
+	__cdf_nbuf_queue_init(head);
+}
+
+/**
+ * cdf_nbuf_queue_add() - append a nbuf to the tail of the buf queue
+ * @head: Network buf queue head
+ * @buf: Network buf
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_queue_add(cdf_nbuf_queue_t *head, cdf_nbuf_t buf)
+{
+	__cdf_nbuf_queue_add(head, buf);
+}
+
+/**
+ * cdf_nbuf_queue_insert_head() - insert nbuf at the head of queue
+ * @head: Network buf queue head
+ * @buf: Network buf
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_queue_insert_head(cdf_nbuf_queue_t *head, cdf_nbuf_t buf)
+{
+	__cdf_nbuf_queue_insert_head(head, buf);
+}
+
+/**
+ * cdf_nbuf_queue_remove() - retrieve a buf from the head of the buf queue
+ * @head: Network buf queue head
+ *
+ * Return: The head buf in the buf queue.
+ */
+static inline cdf_nbuf_t cdf_nbuf_queue_remove(cdf_nbuf_queue_t *head)
+{
+	return __cdf_nbuf_queue_remove(head);
+}
+
+/**
+ * cdf_nbuf_queue_len() - get the length of the queue
+ * @head: Network buf queue head
+ *
+ * Return: length of the queue
+ */
+static inline uint32_t cdf_nbuf_queue_len(cdf_nbuf_queue_t *head)
+{
+	return __cdf_nbuf_queue_len(head);
+}
+
+/**
+ * cdf_nbuf_queue_next() - get the next guy/packet of the given buffer
+ * @buf: Network buffer
+ *
+ * Return: next buffer/packet
+ */
+static inline cdf_nbuf_t cdf_nbuf_queue_next(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_queue_next(buf);
+}
+
+/**
+ * @cdf_nbuf_is_queue_empty() - check if the buf queue is empty
+ * @nbq: Network buf queue handle
+ *
+ * Return: true  if queue is empty
+ *	   false if queue is not emty
+ */
+static inline bool cdf_nbuf_is_queue_empty(cdf_nbuf_queue_t *nbq)
+{
+	return __cdf_nbuf_is_queue_empty(nbq);
+}
+
+/**
+ * cdf_nbuf_next() - get the next packet in the linked list
+ * @buf: Network buffer
+ *
+ * This function can be used when nbufs are directly linked into a list,
+ * rather than using a separate network buffer queue object.
+ *
+ * Return: next network buffer in the linked list
+ */
+static inline cdf_nbuf_t cdf_nbuf_next(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_next(buf);
+}
+
+/**
+ * cdf_nbuf_get_protocol() - return the protocol value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb protocol
+ */
+static inline uint16_t cdf_nbuf_get_protocol(struct sk_buff *skb)
+{
+	return __cdf_nbuf_get_protocol(skb);
+}
+
+/**
+ * cdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb ip_summed
+ */
+static inline uint8_t cdf_nbuf_get_ip_summed(struct sk_buff *skb)
+{
+	return __cdf_nbuf_get_ip_summed(skb);
+}
+
+/**
+ * cdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
+ * @skb: Pointer to network buffer
+ * @ip_summed: ip checksum
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_set_ip_summed(struct sk_buff *skb, uint8_t ip_summed)
+{
+	__cdf_nbuf_set_ip_summed(skb, ip_summed);
+}
+
+/**
+ * cdf_nbuf_set_next() - add a packet to a linked list
+ * @this_buf: Predecessor buffer
+ * @next_buf: Successor buffer
+ *
+ * This function can be used to directly link nbufs, rather than using
+ * a separate network buffer queue object.
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_set_next(cdf_nbuf_t this_buf, cdf_nbuf_t next_buf)
+{
+	__cdf_nbuf_set_next(this_buf, next_buf);
+}
+
+/* nbuf extension routines */
+
+/**
+ * cdf_nbuf_set_next_ext() - link extension of this packet contained in a new
+ *			     nbuf
+ * @this_buf: predecessor buffer
+ * @next_buf: successor buffer
+ *
+ * This function is used to link up many nbufs containing a single logical
+ * packet - not a collection of packets. Do not use for linking the first
+ * extension to the head
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_set_next_ext(cdf_nbuf_t this_buf, cdf_nbuf_t next_buf)
+{
+	__cdf_nbuf_set_next_ext(this_buf, next_buf);
+}
+
+/**
+ * cdf_nbuf_next_ext() - get the next packet extension in the linked list
+ * @buf: Network buffer
+ *
+ * Return: Next network buffer in the linked list
+ */
+static inline cdf_nbuf_t cdf_nbuf_next_ext(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_next_ext(buf);
+}
+
+/**
+ * cdf_nbuf_append_ext_list() - link list of packet extensions to the head
+ *				segment
+ * @head_buf: Network buf holding head segment (single)
+ * @ext_list: Network buf list holding linked extensions to the head
+ * @ext_len: Total length of all buffers in the extension list
+ *
+ * This function is used to link up a list of packet extensions (seg1, 2,
+ * ...) to the nbuf holding the head segment (seg0)
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_append_ext_list(cdf_nbuf_t head_buf, cdf_nbuf_t ext_list,
+			 cdf_size_t ext_len)
+{
+	__cdf_nbuf_append_ext_list(head_buf, ext_list, ext_len);
+}
+
+/**
+ * cdf_nbuf_get_tx_cksum() - gets the tx checksum offload demand
+ * @buf: Network buffer
+ *
+ * Return: cdf_nbuf_tx_cksum_t checksum offload demand for the frame
+ */
+static inline cdf_nbuf_tx_cksum_t cdf_nbuf_get_tx_cksum(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_get_tx_cksum(buf);
+}
+
+/**
+ * cdf_nbuf_set_rx_cksum() - drivers that support hw checksumming use this to
+ *			     indicate checksum info to the stack.
+ * @buf: Network buffer
+ * @cksum: Checksum
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_set_rx_cksum(cdf_nbuf_t buf, cdf_nbuf_rx_cksum_t *cksum)
+{
+	__cdf_nbuf_set_rx_cksum(buf, cksum);
+}
+
+/**
+ * cdf_nbuf_get_tid() - this function extracts the TID value from nbuf
+ * @buf: Network buffer
+ *
+ * Return: TID value
+ */
+static inline uint8_t cdf_nbuf_get_tid(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_get_tid(buf);
+}
+
+/**
+ * cdf_nbuf_set_tid() - this function sets the TID value in nbuf
+ * @buf: Network buffer
+ * @tid: TID value
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_set_tid(cdf_nbuf_t buf, uint8_t tid)
+{
+	__cdf_nbuf_set_tid(buf, tid);
+}
+
+/**
+ * cdf_nbuf_get_exemption_type() - this function extracts the exemption type
+ *				   from nbuf
+ * @buf: Network buffer
+ *
+ * Return: Exemption type
+ */
+static inline uint8_t cdf_nbuf_get_exemption_type(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_get_exemption_type(buf);
+}
+
+/**
+ * cdf_nbuf_set_protocol() - this function peeks data into the buffer at given
+ *			     offset
+ * @buf: Network buffer
+ * @proto: Protocol
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_set_protocol(cdf_nbuf_t buf, uint16_t proto)
+{
+	__cdf_nbuf_set_protocol(buf, proto);
+}
+
+/**
+ * cdf_nbuf_trace_get_proto_type() - this function return packet proto type
+ * @buf: Network buffer
+ *
+ * Return: Packet protocol type
+ */
+static inline uint8_t cdf_nbuf_trace_get_proto_type(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_trace_get_proto_type(buf);
+}
+
+#ifdef QCA_PKT_PROTO_TRACE
+/**
+ * cdf_nbuf_trace_set_proto_type() - this function updates packet proto type
+ * @buf: Network buffer
+ * @proto_type: Protocol type
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_trace_set_proto_type(cdf_nbuf_t buf, uint8_t proto_type)
+{
+	__cdf_nbuf_trace_set_proto_type(buf, proto_type);
+}
+#else
+#define cdf_nbuf_trace_set_proto_type(buf, proto_type) /*NO OP*/
+#endif
+
+/**
+ * cdf_nbuf_reg_trace_cb() - this function registers protocol trace callback
+ * @cb_func_ptr: Callback pointer
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr)
+{
+	__cdf_nbuf_reg_trace_cb(cb_func_ptr);
+}
+
+/**
+ * cdf_nbuf_trace_update() - this function updates protocol event
+ * @buf: Network buffer
+ * @event_string: Event string pointer
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_trace_update(cdf_nbuf_t buf, char *event_string)
+{
+	__cdf_nbuf_trace_update(buf, event_string);
+}
+
+/**
+ * cdf_nbuf_set_tx_parallel_dnload_frm() - set tx parallel download
+ * @buf: Network buffer
+ * @candi: Candidate of parallel download frame
+ *
+ * This function stores a flag specifying this TX frame is suitable for
+ * downloading though a 2nd TX data pipe that is used for short frames for
+ * protocols that can accept out-of-order delivery.
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_set_tx_parallel_dnload_frm(cdf_nbuf_t buf, uint8_t candi)
+{
+	__cdf_nbuf_set_tx_htt2_frm(buf, candi);
+}
+
+/**
+ * cdf_nbuf_get_tx_parallel_dnload_frm() - get tx parallel download
+ * @buf: Network buffer
+ *
+ * This function return whether this TX frame is allow to download though a 2nd
+ * TX data pipe or not.
+ *
+ * Return: none
+ */
+static inline uint8_t cdf_nbuf_get_tx_parallel_dnload_frm(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_get_tx_htt2_frm(buf);
+}
+
+/**
+ * cdf_invalidate_range() - invalidate the virtual address range specified by
+ *			    start and end addresses.
+ * Note: This does not write back the cache entries.
+ *
+ * Return: none
+ */
+static inline void cdf_invalidate_range(void *start, void *end)
+{
+#ifdef MSM_PLATFORM
+	dmac_inv_range(start, end);
+#else
+	/* TODO figure out how to invalidate cache on x86 and other
+	   non-MSM platform */
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		  "Cache invalidate not yet implemneted for non-MSM platform");
+	return;
+#endif
+}
+
+#if defined(FEATURE_TSO)
+/**
+ * cdf_nbuf_dec_num_frags() - decrement the number of fragments
+ * @buf: Network buffer
+ *
+ * Return: Number of fragments
+ */
+static inline int cdf_nbuf_dec_num_frags(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_dec_num_frags(buf);
+}
+
+/**
+ * cdf_nbuf_is_tso() - is the network buffer a jumbo packet?
+ * @buf: Network buffer
+ *
+ * Return: 1 - this is a jumbo packet 0 - not a jumbo packet
+ */
+static inline uint8_t cdf_nbuf_is_tso(cdf_nbuf_t nbuf)
+{
+	return __cdf_nbuf_is_tso(nbuf);
+}
+
+/**
+ * cdf_nbuf_get_tso_info() - function to divide a jumbo TSO
+ * network buffer into segments
+ * @nbuf:   network buffer to be segmented
+ * @tso_info:  This is the output. The information about the
+ *      TSO segments will be populated within this.
+ *
+ * This function fragments a TCP jumbo packet into smaller
+ * segments to be transmitted by the driver. It chains the TSO
+ * segments created into a list.
+ *
+ * Return: number of TSO segments
+ */
+static inline uint32_t cdf_nbuf_get_tso_info(cdf_device_t osdev,
+		 cdf_nbuf_t nbuf, struct cdf_tso_info_t *tso_info)
+{
+	return __cdf_nbuf_get_tso_info(osdev, nbuf, tso_info);
+}
+
+/**
+ * cdf_nbuf_get_tso_num_seg() - function to calculate the number
+ * of TCP segments within the TSO jumbo packet
+ * @nbuf:   TSO jumbo network buffer to be segmented
+ *
+ * This function calculates the number of TCP segments that the
+   network buffer can be divided into.
+ *
+ * Return: number of TCP segments
+ */
+static inline uint32_t cdf_nbuf_get_tso_num_seg(cdf_nbuf_t nbuf)
+{
+	return __cdf_nbuf_get_tso_num_seg(nbuf);
+}
+
+/**
+ * cdf_nbuf_inc_users() - function to increment the number of
+ * users referencing this network buffer
+ *
+ * @nbuf:   network buffer
+ *
+ * This function increments the number of users referencing this
+ * network buffer
+ *
+ * Return: the network buffer
+ */
+static inline cdf_nbuf_t cdf_nbuf_inc_users(cdf_nbuf_t nbuf)
+{
+	return __cdf_nbuf_inc_users(nbuf);
+}
+#endif /*TSO*/
+
+/**
+ * cdf_nbuf_data_attr_get() - Get data_attr field from cvg_nbuf_cb
+ *
+ * @nbuf: Network buffer (skb on linux)
+ *
+ * This function returns the values of data_attr field
+ * in struct cvg_nbuf_cb{}, to which skb->cb is typecast.
+ * This value is actually the value programmed in CE descriptor.
+ *
+ * Return: Value of data_attr
+ */
+static inline
+uint32_t cdf_nbuf_data_attr_get(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_data_attr_get(buf);
+}
+
+/**
+ * cdf_nbuf_data_attr_set() - Sets data_attr field in cvg_nbuf_cb
+ *
+ * @nbuf: Network buffer (skb on linux)
+ * @data_attr: Value to be stored cvg_nbuf_cb->data_attr
+ *
+ * This function stores the value to be programmed in CE
+ * descriptor as part skb->cb which is typecast to struct cvg_nbuf_cb{}
+ *
+ * Return: void
+ */
+static inline
+void cdf_nbuf_data_attr_set(cdf_nbuf_t buf, uint32_t data_attr)
+{
+	__cdf_nbuf_data_attr_set(buf, data_attr);
+}
+
+/**
+ * cdf_nbuf_tx_info_get() - Parse skb and get Tx metadata
+ *
+ * @nbuf: Network buffer (skb on linux)
+ *
+ * This function parses the payload to figure out relevant
+ * Tx meta-data e.g. whether to enable tx_classify bit
+ * in CE.
+ *
+ * Return:	void
+ */
+#define cdf_nbuf_tx_info_get __cdf_nbuf_tx_info_get
+
+void cdf_nbuf_set_state(cdf_nbuf_t nbuf, uint8_t current_state);
+void cdf_nbuf_tx_desc_count_display(void);
+void cdf_nbuf_tx_desc_count_clear(void);
+
+#endif

+ 117 - 0
core/cdf/inc/cdf_net_types.h

@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_net_types
+ * This file defines types used in the networking stack abstraction.
+ */
+
+#ifndef _CDF_NET_TYPES_H
+#define _CDF_NET_TYPES_H
+
+#include <cdf_types.h>          /* uint8_t, etc. */
+
+#define ADF_NET_MAC_ADDR_MAX_LEN 6
+#define ADF_NET_IF_NAME_SIZE    64
+#define ADF_NET_ETH_LEN         ADF_NET_MAC_ADDR_MAX_LEN
+#define ADF_NET_MAX_MCAST_ADDR  64
+
+/* Extended Traffic ID  passed to target if the TID is unknown */
+#define ADF_NBUF_TX_EXT_TID_INVALID     0x1f
+
+/**
+ * cdf_nbuf_exemption_type - CDF net buf exemption types for encryption
+ * @CDF_NBUF_EXEMPT_NO_EXEMPTION: No exemption
+ * @CDF_NBUF_EXEMPT_ALWAYS: Exempt always
+ * @CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: Exempt on key mapping
+ */
+enum cdf_nbuf_exemption_type {
+	CDF_NBUF_EXEMPT_NO_EXEMPTION = 0,
+	CDF_NBUF_EXEMPT_ALWAYS,
+	CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE
+};
+
+/**
+ * typedef cdf_nbuf_tx_cksum_t - transmit checksum offload types
+ * @CDF_NBUF_TX_CKSUM_NONE: No checksum offload
+ * @CDF_NBUF_TX_CKSUM_IP: IP header checksum offload
+ * @CDF_NBUF_TX_CKSUM_TCP_UDP: TCP/UDP checksum offload
+ * @CDF_NBUF_TX_CKSUM_TCP_UDP_IP: TCP/UDP and IP header checksum offload
+ */
+
+typedef enum {
+	CDF_NBUF_TX_CKSUM_NONE,
+	CDF_NBUF_TX_CKSUM_IP,
+	CDF_NBUF_TX_CKSUM_TCP_UDP,
+	CDF_NBUF_TX_CKSUM_TCP_UDP_IP,
+
+} cdf_nbuf_tx_cksum_t;
+
+/**
+ * typedef cdf_nbuf_l4_rx_cksum_type_t - receive checksum API types
+ * @CDF_NBUF_RX_CKSUM_TCP: Rx checksum TCP
+ * @CDF_NBUF_RX_CKSUM_UDP: Rx checksum UDP
+ * @CDF_NBUF_RX_CKSUM_TCPIPV6: Rx checksum TCP IPV6
+ * @CDF_NBUF_RX_CKSUM_UDPIPV6: Rx checksum UDP IPV6
+ * @CDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER: Rx checksum TCP no pseudo header
+ * @CDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER: Rx checksum UDP no pseudo header
+ * @CDF_NBUF_RX_CKSUM_TCPSUM16: Rx checksum TCP SUM16
+ */
+typedef enum {
+	CDF_NBUF_RX_CKSUM_TCP = 0x0001,
+	CDF_NBUF_RX_CKSUM_UDP = 0x0002,
+	CDF_NBUF_RX_CKSUM_TCPIPV6 = 0x0010,
+	CDF_NBUF_RX_CKSUM_UDPIPV6 = 0x0020,
+	CDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER = 0x0100,
+	CDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER = 0x0200,
+	CDF_NBUF_RX_CKSUM_TCPSUM16 = 0x1000,
+} cdf_nbuf_l4_rx_cksum_type_t;
+
+/**
+ * typedef cdf_nbuf_l4_rx_cksum_result_t - receive checksum status types
+ * @CDF_NBUF_RX_CKSUM_NONE: Device failed to checksum
+ * @CDF_NBUF_RX_CKSUM_TCP_UDP_HW: TCP/UDP cksum successful and value returned
+ * @CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: TCP/UDP cksum successful, no value
+ */
+typedef enum {
+	CDF_NBUF_RX_CKSUM_NONE = 0x0000,
+	CDF_NBUF_RX_CKSUM_TCP_UDP_HW = 0x0010,
+	CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY = 0x0020,
+} cdf_nbuf_l4_rx_cksum_result_t;
+
+/**
+ * typedef cdf_nbuf_rx_cksum_t - receive checksum type
+ * @l4_type: L4 type
+ * @l4_result: L4 result
+ */
+typedef struct {
+	cdf_nbuf_l4_rx_cksum_type_t l4_type;
+	cdf_nbuf_l4_rx_cksum_result_t l4_result;
+	uint32_t val;
+} cdf_nbuf_rx_cksum_t;
+
+#endif /*_CDF_NET_TYPES_H*/

+ 118 - 0
core/cdf/inc/cdf_softirq_timer.h

@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_softirq_timer
+ * This file abstracts OS timers running in soft IRQ context.
+ */
+
+#ifndef _CDF_SOFTIRQ_TIMER_H
+#define _CDF_SOFTIRQ_TIMER_H
+
+#include <cdf_types.h>
+#include <i_cdf_softirq_timer.h>
+
+/* Platform timer object */
+typedef __cdf_softirq_timer_t cdf_softirq_timer_t;
+
+/**
+ * cdf_softirq_timer_init() - initialize a softirq timer
+ * @hdl: OS handle
+ * @timer: Timer object pointer
+ * @func: Timer function
+ * @arg: Arguement of timer function
+ * @type: deferrable or non deferrable timer type
+ *
+ * Timer type CDF_TIMER_TYPE_SW means its a deferrable sw timer which will
+ * not cause CPU wake upon expiry
+ * Timer type CDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
+ * will cause CPU wake up on expiry
+ *
+ * Return: none
+ */
+static inline void
+cdf_softirq_timer_init(cdf_handle_t hdl,
+			cdf_softirq_timer_t *timer,
+			cdf_softirq_timer_func_t func, void *arg,
+			CDF_TIMER_TYPE type)
+{
+	__cdf_softirq_timer_init(hdl, timer, func, arg, type);
+}
+
+/**
+ * cdf_softirq_timer_start() - start a one-shot softirq timer
+ * @timer: Timer object pointer
+ * @msec: Expiration period in milliseconds
+ *
+ * Return: none
+ */
+static inline void
+cdf_softirq_timer_start(cdf_softirq_timer_t *timer, int msec)
+{
+	__cdf_softirq_timer_start(timer, msec);
+}
+
+/**
+ * cdf_softirq_timer_mod() - modify existing timer to new timeout value
+ * @timer: Timer object pointer
+ * @msec: Expiration period in milliseconds
+ *
+ * Return: none
+ */
+static inline void cdf_softirq_timer_mod(cdf_softirq_timer_t *timer, int msec)
+{
+	__cdf_softirq_timer_mod(timer, msec);
+}
+
+/**
+ * cdf_softirq_timer_cancel() - cancel cdf softirq timer
+ * @timer: Timer object pointer
+ * @retval: Timer was cancelled and deactived
+ * @retval: Timer was cancelled but already got fired.
+ *
+ * The function will return after any running timer completes.
+ *
+ * Return: none
+ */
+static inline bool cdf_softirq_timer_cancel(cdf_softirq_timer_t *timer)
+{
+	return __cdf_softirq_timer_cancel(timer);
+}
+
+/**
+ * cdf_softirq_timer_free() - free cdf softirq timer
+ * @timer: Timer object pointer
+ *
+ * The function will return after any running timer completes.
+ * Return: none
+ */
+static inline void cdf_softirq_timer_free(cdf_softirq_timer_t *timer)
+{
+	__cdf_softirq_timer_free(timer);
+}
+
+#endif

+ 111 - 0
core/cdf/inc/cdf_status.h

@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_STATUS_H)
+#define __CDF_STATUS_H
+
+/**
+ * DOC:  cdf_status
+ *
+ * Connectivity driver framework (CDF) status codes
+ *
+ * Basic status codes/definitions used by CDF
+ */
+
+/**
+ * typedef CDF_STATUS - CDF error codes
+ * @CDF_STATUS_SUCCESS: success
+ * @CDF_STATUS_E_RESOURCES: system resource(other than memory) not available
+ * @CDF_STATUS_E_NOMEM: not enough memory
+ * @CDF_STATUS_E_AGAIN: try again
+ * @CDF_STATUS_E_INVAL: invalid request
+ * @CDF_STATUS_E_FAULT: system fault
+ * @CDF_STATUS_E_ALREADY: another request already in progress
+ * @CDF_STATUS_E_BADMSG: bad message
+ * @CDF_STATUS_E_BUSY: device or resource busy
+ * @CDF_STATUS_E_CANCELED: request cancelled
+ * @CDF_STATUS_E_ABORTED: request aborted
+ * @CDF_STATUS_E_NOSUPPORT: request not supported
+ * @CDF_STATUS_E_PERM: operation not permitted
+ * @CDF_STATUS_E_EMPTY: empty condition
+ * @CDF_STATUS_E_EXISTS: existence failure
+ * @CDF_STATUS_E_TIMEOUT: operation timeout
+ * @CDF_STATUS_E_FAILURE: unknown reason do not use unless nothign else applies
+ * @CDF_STATUS_NOT_INITIALIZED: resource not initialized
+ * @CDF_STATUS_E_NULL_VALUE: request is null
+ * @CDF_STATUS_PMC_PENDING: request pendign in pmc
+ * @CDF_STATUS_PMC_DISABLED: pmc is disabled
+ * @CDF_STATUS_PMC_NOT_NOW: pmc not ready now
+ * @CDF_STATUS_PMC_AC_POWER: pmc ac power
+ * @CDF_STATUS_PMC_SYS_ERROR: pmc system error
+ * @CDF_STATUS_HEARTBEAT_TMOUT: hearbeat timeout error
+ * @CDF_STATUS_NTH_BEACON_DELIVERY: Nth beacon delivery
+ * @CDF_STATUS_CSR_WRONG_STATE: csr in wrong state
+ * @CDF_STATUS_FT_PREAUTH_KEY_SUCCESS: ft preauth key success
+ * @CDF_STATUS_FT_PREAUTH_KEY_FAILED: ft preauth key failed
+ * @CDF_STATUS_CMD_NOT_QUEUED: command not queued
+ * @CDF_STATUS_FW_MSG_TIMEDOUT: target message timeout
+ * @CDF_STATUS_MAX: not a realy value just a place holder for max
+ */
+typedef enum {
+	CDF_STATUS_SUCCESS,
+	CDF_STATUS_E_RESOURCES,
+	CDF_STATUS_E_NOMEM,
+	CDF_STATUS_E_AGAIN,
+	CDF_STATUS_E_INVAL,
+	CDF_STATUS_E_FAULT,
+	CDF_STATUS_E_ALREADY,
+	CDF_STATUS_E_BADMSG,
+	CDF_STATUS_E_BUSY,
+	CDF_STATUS_E_CANCELED,
+	CDF_STATUS_E_ABORTED,
+	CDF_STATUS_E_NOSUPPORT,
+	CDF_STATUS_E_PERM,
+	CDF_STATUS_E_EMPTY,
+	CDF_STATUS_E_EXISTS,
+	CDF_STATUS_E_TIMEOUT,
+	CDF_STATUS_E_FAILURE,
+	CDF_STATUS_NOT_INITIALIZED,
+	CDF_STATUS_E_NULL_VALUE,
+	CDF_STATUS_PMC_PENDING,
+	CDF_STATUS_PMC_DISABLED,
+	CDF_STATUS_PMC_NOT_NOW,
+	CDF_STATUS_PMC_AC_POWER,
+	CDF_STATUS_PMC_SYS_ERROR,
+	CDF_STATUS_HEARTBEAT_TMOUT,
+	CDF_STATUS_NTH_BEACON_DELIVERY,
+	CDF_STATUS_CSR_WRONG_STATE,
+	CDF_STATUS_FT_PREAUTH_KEY_SUCCESS,
+	CDF_STATUS_FT_PREAUTH_KEY_FAILED,
+	CDF_STATUS_CMD_NOT_QUEUED,
+	CDF_STATUS_FW_MSG_TIMEDOUT,
+	CDF_STATUS_MAX
+} CDF_STATUS;
+
+#define CDF_IS_STATUS_SUCCESS(status) (CDF_STATUS_SUCCESS == (status))
+
+#endif /* if !defined __CDF_STATUS_H */

+ 83 - 0
core/cdf/inc/cdf_threads.h

@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_THREADS_H)
+#define __CDF_THREADS_H
+
+/**
+ * DOC:  cdf_threads
+ *
+ * Connectivity driver framework (CDF) thread related APIs
+ *
+ */
+
+/* Include Files */
+#include <cdf_types.h>
+
+/* Preprocessor definitions and constants */
+
+/* Type declarations */
+
+/* Function declarations and documenation */
+
+/**
+ *  cdf_sleep() - sleep
+ *  @msInterval : Number of milliseconds to suspend the current thread.
+ *  A value of 0 may or may not cause the current thread to yield.
+ *
+ *  This function suspends the execution of the current thread
+ *  until the specified time out interval elapses.
+ *
+ *  Return: nothing
+ */
+void cdf_sleep(uint32_t msInterval);
+
+/**
+ *  cdf_sleep_us() - sleep
+ *  @usInterval : Number of microseconds to suspend the current thread.
+ *  A value of 0 may or may not cause the current thread to yield.
+ *
+ *  This function suspends the execution of the current thread
+ *  until the specified time out interval elapses.
+ *
+ *  Return : nothing
+ */
+void cdf_sleep_us(uint32_t usInterval);
+
+/**
+ *  cdf_busy_wait() - busy wait
+ *  @usInterval : Number of microseconds to busy wait.
+ *
+ *  This function places the current thread in busy wait until the specified
+ *  time out interval elapses. If the interval is greater than 50us on WM, the
+ *  behaviour is undefined.
+ *
+ *  Return : nothing
+ */
+void cdf_busy_wait(uint32_t usInterval);
+
+#endif /* __CDF_THREADS_H */

+ 184 - 0
core/cdf/inc/cdf_time.h

@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_time
+ * This file abstracts time related functionality.
+ */
+
+#ifndef _CDF_OS_TIME_H
+#define _CDF_OS_TIME_H
+
+#include <i_cdf_time.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+
+typedef __cdf_time_t cdf_time_t;
+
+/**
+ * cdf_system_ticks() - Count the number of ticks elapsed from the time when
+ *			the system booted
+ *
+ * Return: ticks
+ */
+static inline unsigned long cdf_system_ticks(void)
+{
+	return __cdf_system_ticks();
+}
+
+/**
+ * cdf_system_ticks_to_msecs() - convert ticks to milliseconds
+ * @clock_ticks: Number of ticks
+ *
+ * Return: Time in milliseconds
+ */
+static inline uint32_t cdf_system_ticks_to_msecs(unsigned long clock_ticks)
+{
+	return __cdf_system_ticks_to_msecs(clock_ticks);
+}
+
+/**
+ * cdf_system_msecs_to_ticks() - convert milliseconds to ticks
+ * @msec: Time in milliseconds
+ *
+ * Return: number of ticks
+ */
+static inline unsigned long cdf_system_msecs_to_ticks(uint32_t msecs)
+{
+	return __cdf_system_msecs_to_ticks(msecs);
+}
+
+/**
+ * cdf_get_system_uptime() - Return a monotonically increasing time.
+ * This increments once per HZ ticks
+ *
+ * Return: system up time
+ */
+static inline unsigned long cdf_get_system_uptime(void)
+{
+	return __cdf_get_system_uptime();
+}
+
+/**
+ * cdf_get_system_timestamp() - brief Return current timestamp
+ *
+ * Return: none
+ */
+static inline unsigned long cdf_get_system_timestamp(void)
+{
+	return __cdf_get_system_timestamp();
+}
+
+/**
+ * cdf_udelay() - delay in microseconds
+ * @usecs: Number of microseconds to delay
+ *
+ * Return: none
+ */
+static inline void cdf_udelay(int usecs)
+{
+	__cdf_udelay(usecs);
+}
+
+/**
+ * cdf_mdelay() - Delay in milliseconds.
+ * @msec: Number of milliseconds to delay
+ *
+ * Return: none
+ */
+static inline void cdf_mdelay(int msecs)
+{
+	__cdf_mdelay(msecs);
+}
+
+/* Check if _a is later than _b */
+#define cdf_system_time_after(_a, _b)       __cdf_system_time_after(_a, _b)
+
+/* Check if _a is prior to _b */
+#define cdf_system_time_before(_a, _b)      __cdf_system_time_before(_a, _b)
+
+/* Check if _a atleast as recent as _b, if not later */
+#define cdf_system_time_after_eq(_a, _b)    __cdf_system_time_after_eq(_a, _b)
+
+#ifdef QCA_WIFI_3_0_ADRASTEA
+/**
+ * cdf_get_log_timestamp() - get time stamp for logging
+ *
+ * For adrastea this API returns QTIMER tick which is needed to synchronize
+ * host and fw log timestamps
+ *
+ * For ROME and other discrete solution this API returns system boot time stamp
+ *
+ * Return:
+ *	QTIMER ticks(19.2MHz) for adrastea
+ *	System tick for rome and other future discrete solutions
+ */
+static inline uint64_t cdf_get_log_timestamp(void)
+{
+	return __cdf_get_qtimer_ticks();
+}
+#else
+/**
+ * cdf_get_log_timestamp() - get time stamp for logging
+ *
+ * For adrastea this API returns QTIMER tick which is needed to synchronize
+ * host and fw log timestamps
+ *
+ * For ROME and other discrete solution this API returns system boot time stamp
+ *
+ * Return:
+ *	QTIMER ticks(19.2MHz) for adrastea
+ *	System tick for rome and other future discrete solutions
+ */
+static inline uint64_t cdf_get_log_timestamp(void)
+{
+#ifdef CONFIG_CNSS
+	struct timespec ts;
+
+	cnss_get_boottime(&ts);
+
+	return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
+#else
+	return cdf_system_ticks_to_msecs(cdf_system_ticks()) * 1000;
+#endif /* CONFIG_CNSS */
+}
+#endif /* QCA_WIFI_3_0_ADRASTEA */
+
+/**
+ * cdf_get_monotonic_boottime() - get monotonic kernel boot time
+ * This API is similar to cdf_get_system_boottime but it includes
+ * time spent in suspend.
+ *
+ * Return: Time in microseconds
+ */
+static inline uint64_t cdf_get_monotonic_boottime(void)
+{
+	return __cdf_get_monotonic_boottime();
+}
+
+#endif

+ 283 - 0
core/cdf/inc/cdf_trace.h

@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_TRACE_H)
+#define __CDF_TRACE_H
+
+/**
+ *  DOC:  cdf_trace
+ *
+ *  Connectivity driver framework trace APIs
+ *
+ *  Trace, logging, and debugging definitions and APIs
+ *
+ */
+
+/* Include Files */
+#include  <cdf_types.h>         /* For CDF_MODULE_ID... */
+#include  <stdarg.h>            /* For va_list... */
+#include  <cdf_status.h>
+#include  <cdf_nbuf.h>
+#include  <cds_packet.h>
+#include  <i_cdf_types.h>
+
+/* Type declarations */
+
+typedef enum {
+	/* NONE means NO traces will be logged.  This value is in place
+	 * for the cdf_trace_setlevel() to allow the user to turn off
+	 * all traces
+	 */
+	CDF_TRACE_LEVEL_NONE = 0,
+
+	/* Following trace levels are the ones that 'callers' of CDF_TRACE()
+	 * can specify in for the CDF_TRACE_LEVEL parameter.  Traces are
+	 * classified by severity. FATAL being more serious than INFO for
+	 * example
+	 */
+	CDF_TRACE_LEVEL_FATAL,
+	CDF_TRACE_LEVEL_ERROR,
+	CDF_TRACE_LEVEL_WARN,
+	CDF_TRACE_LEVEL_INFO,
+	CDF_TRACE_LEVEL_INFO_HIGH,
+	CDF_TRACE_LEVEL_INFO_MED,
+	CDF_TRACE_LEVEL_INFO_LOW,
+	CDF_TRACE_LEVEL_DEBUG,
+
+	/* All means all trace levels will be active.  This value is in place
+	 * for the cdf_trace_setlevel() to allow the user to turn ON all traces
+	 */
+	CDF_TRACE_LEVEL_ALL,
+
+	/* Not a real level.  Used to identify the maximum number of
+	 * CDF_TRACE_LEVELs defined
+	 */
+	CDF_TRACE_LEVEL_MAX
+} CDF_TRACE_LEVEL;
+
+/* By default Data Path module will have all log levels enabled, except debug
+ * log level. Debug level will be left up to the framework or user space modules
+ * to be enabled when issue is detected
+ */
+#define CDF_DATA_PATH_TRACE_LEVEL \
+	((1 << CDF_TRACE_LEVEL_FATAL) | (1 << CDF_TRACE_LEVEL_ERROR) | \
+	(1 << CDF_TRACE_LEVEL_WARN) | (1 << CDF_TRACE_LEVEL_INFO) | \
+	(1 << CDF_TRACE_LEVEL_INFO_HIGH) | (1 << CDF_TRACE_LEVEL_INFO_MED) | \
+	(1 << CDF_TRACE_LEVEL_INFO_LOW))
+
+/* Preprocessor definitions and constants */
+#define ASSERT_BUFFER_SIZE (512)
+
+#define CDF_ENABLE_TRACING
+#define MAX_CDF_TRACE_RECORDS 4000
+#define INVALID_CDF_TRACE_ADDR 0xffffffff
+#define DEFAULT_CDF_TRACE_DUMP_COUNT 0
+
+#include  <i_cdf_trace.h>
+
+#ifdef TRACE_RECORD
+
+#define MTRACE(p) p
+#define NO_SESSION 0xFF
+
+#else
+#define MTRACE(p) {  }
+
+#endif
+
+/* Structure definition */
+typedef struct cdf_trace_record_s {
+	uint64_t time;
+	uint8_t module;
+	uint8_t code;
+	uint16_t session;
+	uint32_t data;
+	uint32_t pid;
+} cdf_trace_record_t, *tp_cdf_trace_record;
+
+typedef struct s_cdf_trace_data {
+	/* MTRACE logs are stored in ring buffer where head represents the
+	 * position of first record, tail represents the position of last record
+	 * added till now and num is the count of total record added
+	 */
+	uint32_t head;
+	uint32_t tail;
+	uint32_t num;
+	uint16_t numSinceLastDump;
+
+	/* config for controlling the trace */
+	uint8_t enable;
+	/* Dump after number of records reach this number */
+	uint16_t dumpCount;
+} t_cdf_trace_data;
+
+#define CASE_RETURN_STRING(str) case ((str)): return (uint8_t *)(# str);
+
+/* DP Trace Implementation */
+#define DPTRACE(p) p
+
+#define MAX_CDF_DP_TRACE_RECORDS       4000
+#define CDF_DP_TRACE_RECORD_SIZE       16
+#define INVALID_CDF_DP_TRACE_ADDR      0xffffffff
+#define CDF_DP_TRACE_VERBOSITY_HIGH    3
+#define CDF_DP_TRACE_VERBOSITY_MEDIUM  2
+#define CDF_DP_TRACE_VERBOSITY_LOW     1
+#define CDF_DP_TRACE_VERBOSITY_DEFAULT 0
+
+/**
+ * enum CDF_DP_TRACE_ID - Generic ID to identify various events in data path
+ * @CDF_DP_TRACE_INVALID: Invalid ID
+ * @CDF_DP_TRACE_DROP_PACKET_RECORD: Dropped packet stored with this id
+ * @CDF_DP_TRACE_HDD_PACKET_PTR_RECORD: nbuf->data ptr of HDD
+ * @CDF_DP_TRACE_HDD_PACKET_RECORD: nbuf->data stored with this id
+ * @CDF_DP_TRACE_CE_PACKET_PTR_RECORD: nbuf->data ptr of CE
+ * @CDF_DP_TRACE_CE_PACKET_RECORD: nbuf->data stored with this id
+ * @CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD: nbuf->data ptr of txrx queue
+ * @CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD: nbuf->data ptr of txrx
+ * @CDF_DP_TRACE_HTT_PACKET_PTR_RECORD: nbuf->data ptr of htt
+ * @CDF_DP_TRACE_HTC_PACKET_PTR_RECORD: nbuf->data ptr of htc
+ * @CDF_DP_TRACE_HIF_PACKET_PTR_RECORD: nbuf->data ptr of hif
+ * @CDF_DP_TRACE_HDD_TX_TIMEOUT: hdd tx timeout event
+ * @CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: hdd tx softap timeout event
+ * @CDF_DP_TRACE_VDEV_PAUSE: vdev pause event
+ * @CDF_DP_TRACE_VDEV_UNPAUSE: vdev unpause event
+ *
+ */
+enum  CDF_DP_TRACE_ID {
+	CDF_DP_TRACE_INVALID                           = 0,
+	CDF_DP_TRACE_DROP_PACKET_RECORD                = 1,
+	CDF_DP_TRACE_HDD_PACKET_PTR_RECORD             = 2,
+	CDF_DP_TRACE_HDD_PACKET_RECORD                 = 3,
+	CDF_DP_TRACE_CE_PACKET_PTR_RECORD              = 4,
+	CDF_DP_TRACE_CE_PACKET_RECORD                  = 5,
+	CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD      = 6,
+	CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD            = 7,
+	CDF_DP_TRACE_HTT_PACKET_PTR_RECORD             = 8,
+	CDF_DP_TRACE_HTC_PACKET_PTR_RECORD             = 9,
+	CDF_DP_TRACE_HIF_PACKET_PTR_RECORD             = 10,
+	CDF_DP_TRACE_HDD_TX_TIMEOUT                    = 11,
+	CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT             = 12,
+	CDF_DP_TRACE_VDEV_PAUSE                        = 13,
+	CDF_DP_TRACE_VDEV_UNPAUSE                      = 14,
+	CDF_DP_TRACE_MAX
+
+};
+
+/**
+ * struct cdf_dp_trace_record_s - Describes a record in DP trace
+ * @time: time when it got stored
+ * @code: Describes the particular event
+ * @data: buffer to store data
+ * @size: Length of the valid data stored in this record
+ * @pid : process id which stored the data in this record
+ */
+struct cdf_dp_trace_record_s {
+	uint64_t time;
+	uint8_t code;
+	uint8_t data[CDF_DP_TRACE_RECORD_SIZE];
+	uint8_t size;
+	uint32_t pid;
+};
+
+/**
+ * struct cdf_dp_trace_data - Parameters to configure/control DP trace
+ * @head: Position of first record
+ * @tail: Position of last record
+ * @num:  Current index
+ * @proto_bitmap: defines which protocol to be traced
+ * @no_of_record: defines every nth packet to be traced
+ * @verbosity : defines verbosity level
+ * @enable: enable/disable DP trace
+ * @count: current packet number
+ */
+struct s_cdf_dp_trace_data {
+	uint32_t head;
+	uint32_t tail;
+	uint32_t num;
+
+	/* config for controlling the trace */
+	uint8_t proto_bitmap;
+	uint8_t no_of_record;
+	uint8_t verbosity;
+	bool enable;
+	uint32_t count;
+};
+/* Function declarations and documenation */
+
+/**
+ * cdf_trace_set_level() - Set the trace level for a particular module
+ * @level : trace level
+ *
+ * Trace level is a member of the CDF_TRACE_LEVEL enumeration indicating
+ * the severity of the condition causing the trace message to be issued.
+ * More severe conditions are more likely to be logged.
+ *
+ * This is an external API that allows trace levels to be set for each module.
+ *
+ * Return:  nothing
+ */
+void cdf_trace_set_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level);
+
+/**
+ * cdf_trace_get_level() - get the trace level
+ * @level : trace level
+ *
+ * This is an external API that returns a bool value to signify if a
+ * particular trace level is set for the specified module.
+ * A member of the CDF_TRACE_LEVEL enumeration indicating the severity
+ * of the condition causing the trace message to be issued.
+ *
+ * Note that individual trace levels are the only valid values
+ * for this API.  CDF_TRACE_LEVEL_NONE and CDF_TRACE_LEVEL_ALL
+ * are not valid input and will return false
+ *
+ * Return:
+ *  false - the specified trace level for the specified module is OFF
+ *  true - the specified trace level for the specified module is ON
+ */
+bool cdf_trace_get_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level);
+
+typedef void (*tp_cdf_trace_cb)(void *pMac, tp_cdf_trace_record, uint16_t);
+void cdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data);
+void cdf_trace_register(CDF_MODULE_ID, tp_cdf_trace_cb);
+CDF_STATUS cdf_trace_spin_lock_init(void);
+void cdf_trace_init(void);
+void cdf_trace_enable(uint32_t, uint8_t enable);
+void cdf_trace_dump_all(void *, uint8_t, uint8_t, uint32_t, uint32_t);
+
+void cdf_dp_trace_spin_lock_init(void);
+void cdf_dp_trace_init(void);
+void cdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_records,
+			 uint8_t verbosity);
+void cdf_dp_trace_set_track(cdf_nbuf_t nbuf);
+void cdf_dp_trace(cdf_nbuf_t nbuf, enum CDF_DP_TRACE_ID code,
+			uint8_t *data, uint8_t size);
+void cdf_dp_trace_dump_all(uint32_t count);
+typedef void (*tp_cdf_dp_trace_cb)(struct cdf_dp_trace_record_s* , uint16_t);
+void cdf_dp_display_record(struct cdf_dp_trace_record_s *record,
+							uint16_t index);
+#endif

+ 492 - 0
core/cdf/inc/cdf_types.h

@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__CDF_TYPES_H)
+#define __CDF_TYPES_H
+/**
+ * DOC: cdf_types.h
+ *
+ * Connectivity driver framework (CDF) basic type definitions
+ */
+
+/* Include Files */
+#include "i_cdf_types.h"
+#include <string.h>
+
+/* Preprocessor definitions and constants */
+
+/**
+ * CDF_MAX - get maximum of two values
+ * @_x: 1st arguement
+ * @_y: 2nd arguement
+ */
+#define CDF_MAX(_x, _y) (((_x) > (_y)) ? (_x) : (_y))
+
+/**
+ * CDF_MIN - get minimum of two values
+ * @_x: 1st arguement
+ * @_y: 2nd arguement
+ */
+#define CDF_MIN(_x, _y) (((_x) < (_y)) ? (_x) : (_y))
+
+/**
+ * CDF_SWAP_U16 - swap input u16 value
+ * @_x: variable to swap
+ */
+#define CDF_SWAP_U16(_x) \
+	((((_x) << 8) & 0xFF00) | (((_x) >> 8) & 0x00FF))
+
+/**
+ * CDF_SWAP_U32 - swap input u32 value
+ * @_x: variable to swap
+ */
+#define CDF_SWAP_U32(_x) \
+	(((((_x) << 24) & 0xFF000000) | (((_x) >> 24) & 0x000000FF)) | \
+	 ((((_x) << 8) & 0x00FF0000) | (((_x) >> 8) & 0x0000FF00)))
+
+#define CDF_TICKS_PER_SECOND        (1000)
+
+/**
+ * CDF_ARRAY_SIZE - get array size
+ * @_arr: array variable name
+ */
+#define CDF_ARRAY_SIZE(_arr) (sizeof(_arr) / sizeof((_arr)[0]))
+
+/* endian operations for Big Endian and Small Endian modes */
+#ifdef ANI_LITTLE_BYTE_ENDIAN
+
+#define cdf_be16_to_cpu(_x) CDF_SWAP_U16(_x)
+
+#endif
+
+#ifdef ANI_BIG_BYTE_ENDIAN
+
+#define cdf_be16_to_cpu(_x) (_x)
+
+#endif
+
+#ifndef __ahdecl
+#ifdef __i386__
+#define __ahdecl   __attribute__((regparm(0)))
+#else
+#define __ahdecl
+#endif
+#endif
+
+#define CDF_OS_MAX_SCATTER  __CDF_OS_MAX_SCATTER
+
+/**
+ * @brief denotes structure is packed.
+ */
+#define cdf_packed __cdf_packed
+
+/**
+ * typedef cdf_handle_t - handles opaque to each other
+ */
+typedef void *cdf_handle_t;
+
+/**
+ * typedef cdf_device_t - Platform/bus generic handle.
+ *			  Used for bus specific functions.
+ */
+typedef __cdf_device_t cdf_device_t;
+
+/**
+ * typedef cdf_size_t - size of an object
+ */
+typedef __cdf_size_t cdf_size_t;
+
+/**
+ * typedef cdf_dma_map_t - DMA mapping object.
+ */
+typedef __cdf_dma_map_t cdf_dma_map_t;
+
+/**
+ * tyepdef cdf_dma_addr_t - DMA address.
+ */
+typedef __cdf_dma_addr_t cdf_dma_addr_t;
+
+/**
+ * tyepdef cdf_dma_context_t - DMA context.
+ */
+typedef __cdf_dma_context_t cdf_dma_context_t;
+
+
+#define cdf_iomem_t   __cdf_iomem_t;
+/**
+ * typedef enum CDF_TIMER_TYPE - CDF timer type
+ * @CDF_TIMER_TYPE_SW: Deferrable SW timer it will not cause CPU to wake up
+ *			on expiry
+ * @CDF_TIMER_TYPE_WAKE_APPS:  Non deferrable timer which will cause CPU to
+ *				wake up on expiry
+ */
+typedef enum {
+	CDF_TIMER_TYPE_SW,
+	CDF_TIMER_TYPE_WAKE_APPS
+} CDF_TIMER_TYPE;
+
+/**
+ * tyepdef cdf_resource_type_t - hw resources
+ *
+ * @CDF_RESOURCE_TYPE_MEM: memory resource
+ * @CDF_RESOURCE_TYPE_IO: io resource
+ *
+ * Define the hw resources the OS has allocated for the device
+ * Note that start defines a mapped area.
+ */
+typedef enum {
+	CDF_RESOURCE_TYPE_MEM,
+	CDF_RESOURCE_TYPE_IO,
+} cdf_resource_type_t;
+
+/**
+ * tyepdef cdf_resource_t - representation of a h/w resource.
+ *
+ * @start: start
+ * @end: end
+ * @type: resource type
+ */
+typedef struct {
+	uint64_t start;
+	uint64_t end;
+	cdf_resource_type_t type;
+} cdf_resource_t;
+
+/**
+ * typedef cdf_dma_dir_t - DMA directions
+ *
+ * @CDF_DMA_BIDIRECTIONAL: bidirectional data
+ * @CDF_DMA_TO_DEVICE: data going from device to memory
+ * @CDF_DMA_FROM_DEVICE: data going from memory to device
+ */
+typedef enum {
+	CDF_DMA_BIDIRECTIONAL = __CDF_DMA_BIDIRECTIONAL,
+	CDF_DMA_TO_DEVICE = __CDF_DMA_TO_DEVICE,
+	CDF_DMA_FROM_DEVICE = __CDF_DMA_FROM_DEVICE,
+} cdf_dma_dir_t;
+
+/* work queue(kernel thread)/DPC function callback */
+typedef void (*cdf_defer_fn_t)(void *);
+
+/* Prototype of the critical region function that is to be
+ * executed with spinlock held and interrupt disalbed
+ */
+typedef bool (*cdf_irqlocked_func_t)(void *);
+
+/* Prototype of timer function */
+typedef void (*cdf_softirq_timer_func_t)(void *);
+
+#define cdf_print         __cdf_print
+#define cdf_vprint        __cdf_vprint
+#define cdf_snprint       __cdf_snprint
+
+#define cdf_offsetof(type, field) offsetof(type, field)
+
+/**
+ * typedef CDF_MODULE_ID - CDF Module IDs
+ *
+ * @CDF_MODULE_ID_TLSHIM: TLSHIM module ID
+ * @CDF_MODULE_ID_WMI: WMI module ID
+ * @CDF_MODULE_ID_HTT: HTT module ID
+ * @CDF_MODULE_ID_RSV4: Reserved
+ * @CDF_MODULE_ID_HDD: HDD module ID
+ * @CDF_MODULE_ID_SME: SME module ID
+ * @CDF_MODULE_ID_PE: PE module ID
+ * @CDF_MODULE_ID_WMA: WMA module ID
+ * @CDF_MODULE_ID_SYS: SYS module ID
+ * @CDF_MODULE_ID_CDF: CDF module ID
+ * @CDF_MODULE_ID_SAP: SAP module ID
+ * @CDF_MODULE_ID_HDD_SOFTAP: HDD SAP module ID
+ * @CDF_MODULE_ID_HDD_DATA: HDD DATA module ID
+ * @CDF_MODULE_ID_HDD_SAP_DATA: HDD SAP DATA module ID
+ * @CDF_MODULE_ID_HIF: HIF module ID
+ * @CDF_MODULE_ID_HTC: HTC module ID
+ * @CDF_MODULE_ID_TXRX: TXRX module ID
+ * @CDF_MODULE_ID_CDF_DEVICE: CDF DEVICE module ID
+ * @CDF_MODULE_ID_CFG: CFG module ID
+ * @CDF_MODULE_ID_BMI: BMI module ID
+ * @CDF_MODULE_ID_EPPING: EPPING module ID
+ * @CDF_MODULE_ID_MAX: Max place holder module ID
+ *
+ * These are generic IDs that identify the various modules in the software
+ * system
+ * 0 is unused for historical purposes
+ * 3 & 4 are unused for historical purposes
+ */
+typedef enum {
+	CDF_MODULE_ID_TLSHIM = 1,
+	CDF_MODULE_ID_WMI = 2,
+	CDF_MODULE_ID_HTT = 3,
+	CDF_MODULE_ID_RSV4 = 4,
+	CDF_MODULE_ID_HDD = 5,
+	CDF_MODULE_ID_SME = 6,
+	CDF_MODULE_ID_PE = 7,
+	CDF_MODULE_ID_WMA = 8,
+	CDF_MODULE_ID_SYS = 9,
+	CDF_MODULE_ID_CDF = 10,
+	CDF_MODULE_ID_SAP = 11,
+	CDF_MODULE_ID_HDD_SOFTAP = 12,
+	CDF_MODULE_ID_HDD_DATA = 14,
+	CDF_MODULE_ID_HDD_SAP_DATA = 15,
+
+	CDF_MODULE_ID_HIF = 16,
+	CDF_MODULE_ID_HTC = 17,
+	CDF_MODULE_ID_TXRX = 18,
+	CDF_MODULE_ID_CDF_DEVICE = 19,
+	CDF_MODULE_ID_CFG = 20,
+	CDF_MODULE_ID_BMI = 21,
+	CDF_MODULE_ID_EPPING = 22,
+
+	CDF_MODULE_ID_MAX
+} CDF_MODULE_ID;
+
+/**
+ * typedef enum tCDF_CON_MODE - Concurrency role.
+ *
+ * @CDF_STA_MODE: STA mode
+ * @CDF_SAP_MODE: SAP mode
+ * @CDF_P2P_CLIENT_MODE: P2P client mode
+ * @CDF_P2P_GO_MODE: P2P GO mode
+ * @CDF_FTM_MODE: FTM mode
+ * @CDF_IBSS_MODE: IBSS mode
+ * @CDF_P2P_DEVICE_MODE: P2P device mode
+ * @CDF_EPPING_MODE: EPPING device mode
+ * @CDF_OCB_MODE: OCB device mode
+ * @CDF_MAX_NO_OF_MODE: Max place holder
+ *
+ * These are generic IDs that identify the various roles
+ * in the software system
+ */
+typedef enum {
+	CDF_STA_MODE = 0,
+	CDF_SAP_MODE = 1,
+	CDF_P2P_CLIENT_MODE,
+	CDF_P2P_GO_MODE,
+	CDF_FTM_MODE = 5,
+	CDF_IBSS_MODE,
+	CDF_P2P_DEVICE_MODE,
+	CDF_EPPING_MODE,
+	CDF_OCB_MODE,
+	CDF_MAX_NO_OF_MODE
+} tCDF_CON_MODE;
+
+#ifdef WLAN_OPEN_P2P_INTERFACE
+/* This should match with WLAN_MAX_INTERFACES */
+#define CDF_MAX_CONCURRENCY_PERSONA    (4)
+#else
+#define CDF_MAX_CONCURRENCY_PERSONA    (3)
+#endif
+
+#define CDF_STA_MASK (1 << CDF_STA_MODE)
+#define CDF_SAP_MASK (1 << CDF_SAP_MODE)
+#define CDF_P2P_CLIENT_MASK (1 << CDF_P2P_CLIENT_MODE)
+#define CDF_P2P_GO_MASK (1 << CDF_P2P_GO_MODE)
+
+#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
+typedef enum {
+	CDF_MCC_TO_SCC_SWITCH_DISABLE = 0,
+	CDF_MCC_TO_SCC_SWITCH_ENABLE,
+	CDF_MCC_TO_SCC_SWITCH_FORCE,
+	CDF_MCC_TO_SCC_SWITCH_MAX
+} tCDF_MCC_TO_SCC_SWITCH_MODE;
+#endif
+
+#if !defined(NULL)
+#ifdef __cplusplus
+#define NULL    0
+#else
+#define NULL    ((void *)0)
+#endif
+#endif
+
+/* 'Time' type */
+typedef unsigned long v_TIME_t;
+
+/* typedef for CDF Context... */
+typedef void *v_CONTEXT_t;
+
+#define CDF_MAC_ADDR_SIZE (6)
+
+/**
+ * struct cdf_mac_addr - mac address array
+ * @bytes: MAC address bytes
+ */
+struct cdf_mac_addr {
+	uint8_t bytes[CDF_MAC_ADDR_SIZE];
+};
+
+/* This macro is used to initialize a CDF MacAddress to the broadcast
+ * MacAddress.  It is used like this...
+ * struct cdf_mac_addr macAddress = CDF_MAC_ADDR_BROADCAST_INITIALIZER
+ */
+#define CDF_MAC_ADDR_BROADCAST_INITIALIZER { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }
+
+/* This macro is used to initialize a CDF MacAddress to zero
+ * It is used like this...
+ * struct cdf_mac_addr macAddress = CDF_MAC_ADDR_ZERO_INITIALIZER
+ */
+#define CDF_MAC_ADDR_ZERO_INITIALIZER { { 0, 0, 0, 0, 0, 0 } }
+
+#define CDF_IPV4_ADDR_SIZE (4)
+
+/**
+ * struct cdf_tso_frag_t - fragments of a single TCP segment
+ * @paddr_low_32:	Lower 32 bits of the buffer pointer
+ * @paddr_upper_16:	upper 16 bits of the buffer pointer
+ * @length:	length of the buffer
+ * @vaddr:	virtual address
+ *
+ * This structure holds the fragments of a single TCP segment of a
+ * given jumbo TSO network buffer
+ */
+struct cdf_tso_frag_t {
+	uint32_t paddr_low_32;
+	uint32_t paddr_upper_16:16,
+		     length:16;
+	unsigned char *vaddr;
+};
+
+#define FRAG_NUM_MAX 6
+
+/**
+ * struct cdf_tso_flags_t - TSO specific flags
+ * @tso_enable: Enable transmit segmentation offload
+ * @tcp_flags_mask: Tcp_flag is inserted into the header based
+ * on the mask
+ * @l2_len: L2 length for the msdu
+ * @ip_len: IP length for the msdu
+ * @tcp_seq_num: TCP sequence number
+ * @ip_id: IP identification number
+ *
+ * This structure holds the TSO specific flags extracted from the TSO network
+ * buffer for a given TCP segment
+ */
+struct cdf_tso_flags_t {
+	u_int32_t tso_enable:1,
+			reserved_0a:6,
+			fin:1,
+			syn:1,
+			rst:1,
+			psh:1,
+			ack:1,
+			urg:1,
+			ece:1,
+			cwr:1,
+			ns:1,
+			tcp_flags_mask:9,
+			reserved_0b:7;
+/* ------------------------------------------------------------------- */
+
+	u_int32_t l2_len:16,
+			ip_len:16;
+/* ------------------------------------------------------------------- */
+
+	u_int32_t tcp_seq_num;
+/* ------------------------------------------------------------------- */
+
+	u_int32_t ip_id:16,
+			ipv4_checksum_en:1,
+			udp_ipv4_checksum_en:1,
+			udp_ipv6_checksum_en:1,
+			tcp_ipv4_checksum_en:1,
+			tcp_ipv6_checksum_en:1,
+			partial_checksum_en:1,
+			reserved_3a:10;
+/* ------------------------------------------------------------------- */
+
+	u_int32_t checksum_offset:14,
+			reserved_4a:2,
+			payload_start_offset:14,
+			reserved_4b:2;
+/* ------------------------------------------------------------------- */
+
+	u_int32_t payload_end_offset:14,
+			reserved_5:18;
+};
+
+/**
+ * struct cdf_tso_seg_t - single TSO segment
+ * @tso_flags:	TSO flags
+ * @num_frags:	number of fragments
+ * @tso_frags:	array holding the fragments
+ *
+ * This structure holds the information of a single TSO segment of a jumbo
+ * TSO network buffer
+ */
+struct cdf_tso_seg_t {
+	struct cdf_tso_flags_t tso_flags;
+/* ------------------------------------------------------------------- */
+	uint32_t num_frags;
+	struct cdf_tso_frag_t tso_frags[FRAG_NUM_MAX];
+};
+
+struct cdf_tso_seg_elem_t {
+	struct cdf_tso_seg_t seg;
+	struct cdf_tso_seg_elem_t *next;
+};
+
+/**
+ * struct cdf_tso_info_t - TSO information extracted
+ * @is_tso: is this is a TSO frame
+ * @num_segs: number of segments
+ * @total_len: total length of the packet
+ * @tso_seg_list: list of TSO segments for this jumbo packet
+ * @curr_seg: segment that is currently being processed
+ *
+ * This structure holds the TSO information extracted after parsing the TSO
+ * jumbo network buffer. It contains a chain of the TSO segments belonging to
+ * the jumbo packet
+ */
+struct cdf_tso_info_t {
+	uint8_t is_tso;
+	uint32_t num_segs;
+	uint32_t total_len;
+	struct cdf_tso_seg_elem_t *tso_seg_list;
+	struct cdf_tso_seg_elem_t *curr_seg;
+};
+
+/**
+ * Used to set classify bit in CE desc.
+ */
+#define CDF_CE_TX_CLASSIFY_BIT_S	5
+
+/**
+ * 2 bits starting at bit 6 in CE desc.
+ */
+#define CDF_CE_TX_PKT_TYPE_BIT_S	6
+
+/**
+ * 12 bits --> 16-27, in the CE desciptor, the length of HTT/HTC descriptor
+ */
+#define CDF_CE_TX_PKT_OFFSET_BIT_S	16
+
+/**
+ * Mask for packet offset in the CE descriptor.
+ */
+#define CDF_CE_TX_PKT_OFFSET_BIT_M	0x0fff0000
+
+#endif /* if !defined __CDF_TYPES_H */

+ 325 - 0
core/cdf/inc/cdf_util.h

@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_util.h
+ *
+ * This file defines utility functions.
+ */
+
+#ifndef _CDF_UTIL_H
+#define _CDF_UTIL_H
+
+#include <i_cdf_util.h>
+
+/**
+ * cdf_unlikely - Compiler-dependent macro denoting code likely to execute
+ * @_expr: expression to be checked
+ */
+#define cdf_unlikely(_expr)     __cdf_unlikely(_expr)
+
+/**
+ * cdf_likely - Compiler-dependent macro denoting code unlikely to execute
+ * @_expr: expression to be checked
+ */
+#define cdf_likely(_expr)       __cdf_likely(_expr)
+
+CDF_INLINE_FN int cdf_status_to_os_return(CDF_STATUS status)
+{
+	return __cdf_status_to_os_return(status);
+}
+
+/**
+ * cdf_assert - assert "expr" evaluates to false
+ * @expr: assert expression
+ */
+#ifdef CDF_OS_DEBUG
+#define cdf_assert(expr)         __cdf_assert(expr)
+#else
+#define cdf_assert(expr)
+#endif /* CDF_OS_DEBUG */
+
+/**
+ * @cdf_assert_always- alway assert "expr" evaluates to false
+ * @expr: assert expression
+ */
+#define cdf_assert_always(expr)  __cdf_assert(expr)
+
+/**
+ * cdf_os_cpu_to_le64 - Convert a 64-bit value from CPU byte order to
+ *			little-endian byte order
+ * @x: value to be converted
+ */
+#define cdf_os_cpu_to_le64(x)                   __cdf_os_cpu_to_le64(x)
+
+/**
+ * cdf_le16_to_cpu - Convert a 16-bit value from little-endian byte order
+ *			to CPU byte order
+ * @x: value to be converted
+ */
+#define cdf_le16_to_cpu(x)                   __cdf_le16_to_cpu(x)
+
+/**
+ * cdf_le32_to_cpu - Convert a 32-bit value from little-endian byte order to
+ *			CPU byte order
+ * @x: value to be converted
+ */
+#define cdf_le32_to_cpu(x)                   __cdf_le32_to_cpu(x)
+
+/**
+ * cdf_in_interrupt - returns true if in interrupt context
+ */
+#define cdf_in_interrupt          in_interrupt
+
+/**
+ * cdf_container_of - cast a member of a structure out to the containing
+ *                    structure
+ * @ptr:        the pointer to the member.
+ * @type:       the type of the container struct this is embedded in.
+ * @member:     the name of the member within the struct.
+ *
+ */
+#define cdf_container_of(ptr, type, member) \
+	 __cdf_container_of(ptr, type, member)
+
+/**
+ * cdf_is_macaddr_equal() - compare two CDF MacAddress
+ * @pMacAddr1: Pointer to one cdf MacAddress to compare
+ * @pMacAddr2: Pointer to the other cdf MacAddress to compare
+ *
+ * This function returns a bool that tells if a two CDF MacAddress'
+ * are equivalent.
+ *
+ * Return: true if the MacAddress's are equal
+ *	not true if the MacAddress's are not equal
+ */
+CDF_INLINE_FN bool cdf_is_macaddr_equal(struct cdf_mac_addr *pMacAddr1,
+					struct cdf_mac_addr *pMacAddr2)
+{
+	return 0 == memcmp(pMacAddr1, pMacAddr2, CDF_MAC_ADDR_SIZE);
+}
+
+/**
+ * cdf_is_macaddr_zero() - check for a MacAddress of all zeros.
+ * @pMacAddr - pointer to the struct cdf_mac_addr to check.
+ *
+ * This function returns a bool that tells if a MacAddress is made up of
+ * all zeros.
+ *
+ *
+ * Return:  true if the MacAddress is all Zeros
+ *	flase if the MacAddress is not all Zeros.
+ *
+ */
+CDF_INLINE_FN bool cdf_is_macaddr_zero(struct cdf_mac_addr *pMacAddr)
+{
+	struct cdf_mac_addr zeroMacAddr = CDF_MAC_ADDR_ZERO_INITIALIZER;
+
+	return cdf_is_macaddr_equal(pMacAddr, &zeroMacAddr);
+}
+
+/**
+ * cdf_zero_macaddr() - zero out a MacAddress
+ * @pMacAddr: pointer to the struct cdf_mac_addr to zero.
+ *
+ * This function zeros out a CDF MacAddress type.
+ *
+ * Return: nothing
+ */
+CDF_INLINE_FN void cdf_zero_macaddr(struct cdf_mac_addr *pMacAddr)
+{
+	memset(pMacAddr, 0, CDF_MAC_ADDR_SIZE);
+}
+
+/**
+ * cdf_is_macaddr_group() - check for a MacAddress is a 'group' address
+ * @pMacAddr1: pointer to the cdf MacAddress to check
+ *
+ * This function returns a bool that tells if a the input CDF MacAddress
+ * is a "group" address.  Group addresses have the 'group address bit' turned
+ * on in the MacAddress.  Group addresses are made up of Broadcast and
+ * Multicast addresses.
+ *
+ * Return:  true if the input MacAddress is a Group address
+ *	false if the input MacAddress is not a Group address
+ */
+CDF_INLINE_FN bool cdf_is_macaddr_group(struct cdf_mac_addr *pMacAddr)
+{
+	return pMacAddr->bytes[0] & 0x01;
+}
+
+/**
+ * cdf_is_macaddr_broadcast() - check for a MacAddress is a broadcast address
+ *
+ * This function returns a bool that tells if a the input CDF MacAddress
+ * is a "broadcast" address.
+ *
+ * @pMacAddr: Pointer to the cdf MacAddress to check
+ *
+ * Return:  true if the input MacAddress is a broadcast address
+ *	flase if the input MacAddress is not a broadcast address
+ */
+CDF_INLINE_FN bool cdf_is_macaddr_broadcast(struct cdf_mac_addr *pMacAddr)
+{
+	struct cdf_mac_addr broadcastMacAddr =
+					CDF_MAC_ADDR_BROADCAST_INITIALIZER;
+
+	return cdf_is_macaddr_equal(pMacAddr, &broadcastMacAddr);
+}
+
+/**
+ * cdf_copy_macaddr() - copy a CDF MacAddress
+ * @pDst - pointer to the cdf MacAddress to copy TO (the destination)
+ * @pSrc - pointer to the cdf MacAddress to copy FROM (the source)
+ *
+ * This function copies a CDF MacAddress into another CDF MacAddress.
+ *
+ *
+ * Return: nothing
+ */
+CDF_INLINE_FN void cdf_copy_macaddr(struct cdf_mac_addr *pDst,
+				    struct cdf_mac_addr *pSrc)
+{
+	*pDst = *pSrc;
+}
+
+/**
+ * cdf_set_macaddr_broadcast() - set a CDF MacAddress to the 'broadcast'
+ * @pMacAddr: pointer to the cdf MacAddress to set to broadcast
+ *
+ * This function sets a CDF MacAddress to the 'broadcast' MacAddress. Broadcast
+ * MacAddress contains all 0xFF bytes.
+ *
+ * Return: nothing
+ */
+CDF_INLINE_FN void cdf_set_macaddr_broadcast(struct cdf_mac_addr *pMacAddr)
+{
+	memset(pMacAddr, 0xff, CDF_MAC_ADDR_SIZE);
+}
+
+#if defined(ANI_LITTLE_BYTE_ENDIAN)
+
+/**
+ * i_cdf_htonl() - convert from host byte order to network byte order
+ * @ul: input to be converted
+ *
+ * Return: converted network byte order
+ */
+CDF_INLINE_FN unsigned long i_cdf_htonl(unsigned long ul)
+{
+	return ((ul & 0x000000ff) << 24) |
+		((ul & 0x0000ff00) << 8) |
+		((ul & 0x00ff0000) >> 8) | ((ul & 0xff000000) >> 24);
+}
+
+/**
+ * i_cdf_ntohl() - convert network byte order to host byte order
+ * @ul: input to be converted
+ *
+ * Return: converted host byte order
+ */
+CDF_INLINE_FN unsigned long i_cdf_ntohl(unsigned long ul)
+{
+	return i_cdf_htonl(ul);
+}
+
+#endif
+
+/**
+ * cdf_set_u16() - Assign 16-bit unsigned value to a byte array base on CPU's
+ *			endianness.
+ * @ptr: Starting address of a byte array
+ * @value: The value to assign to the byte array
+ *
+ * Caller must validate the byte array has enough space to hold the vlaue
+ *
+ * Return: The address to the byte after the assignment. This may or may not
+ *	be valid. Caller to verify.
+ */
+CDF_INLINE_FN uint8_t *cdf_set_u16(uint8_t *ptr, uint16_t value)
+{
+#if defined(ANI_BIG_BYTE_ENDIAN)
+	*(ptr) = (uint8_t) (value >> 8);
+	*(ptr + 1) = (uint8_t) (value);
+#else
+	*(ptr + 1) = (uint8_t) (value >> 8);
+	*(ptr) = (uint8_t) (value);
+#endif
+
+	return ptr + 2;
+}
+
+/**
+ * cdf_get_u16() - Retrieve a 16-bit unsigned value from a byte array base on
+ *			CPU's endianness.
+ * @ptr: Starting address of a byte array
+ * @pValue: Pointer to a caller allocated buffer for 16 bit value. Value is to
+ *		assign to this location.
+ *
+ * Caller must validate the byte array has enough space to hold the vlaue
+ *
+ * Return: The address to the byte after the assignment. This may or may not
+ *	be valid. Caller to verify.
+ */
+CDF_INLINE_FN uint8_t *cdf_get_u16(uint8_t *ptr, uint16_t *pValue)
+{
+#if defined(ANI_BIG_BYTE_ENDIAN)
+	*pValue = (((uint16_t) (*ptr << 8)) | ((uint16_t) (*(ptr + 1))));
+#else
+	*pValue = (((uint16_t) (*(ptr + 1) << 8)) | ((uint16_t) (*ptr)));
+#endif
+
+	return ptr + 2;
+}
+
+/**
+ * cdf_get_u32() - retrieve a 32-bit unsigned value from a byte array base on
+ *			CPU's endianness.
+ * @ptr: Starting address of a byte array
+ * @pValue: Pointer to a caller allocated buffer for 32 bit value. Value is to
+ *		assign to this location.
+ *
+ * Caller must validate the byte array has enough space to hold the vlaue
+ *
+ * Return: The address to the byte after the assignment. This may or may not
+ *		be valid. Caller to verify.
+ */
+CDF_INLINE_FN uint8_t *cdf_get_u32(uint8_t *ptr, uint32_t *pValue)
+{
+#if defined(ANI_BIG_BYTE_ENDIAN)
+	*pValue = ((uint32_t) (*(ptr) << 24) |
+		   (uint32_t) (*(ptr + 1) << 16) |
+		   (uint32_t) (*(ptr + 2) << 8) | (uint32_t) (*(ptr + 3)));
+#else
+	*pValue = ((uint32_t) (*(ptr + 3) << 24) |
+		   (uint32_t) (*(ptr + 2) << 16) |
+		   (uint32_t) (*(ptr + 1) << 8) | (uint32_t) (*(ptr)));
+#endif
+	return ptr + 4;
+}
+
+#endif /*_CDF_UTIL_H*/

+ 300 - 0
core/cdf/inc/osdep.h

@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OSDEP_H
+#define _OSDEP_H
+
+#include <cdf_types.h>
+#include <cdf_memory.h>
+#include <cdf_lock.h>
+#include <cdf_time.h>
+#include <cdf_softirq_timer.h>
+#include <cdf_defer.h>
+#include <cdf_nbuf.h>
+#include <cds_if_upperproto.h>
+
+#include <cds_queue.h>
+
+/**
+ * enum ath_hal_bus_type - Supported Bus types
+ * @HAL_BUS_TYPE_PCI: PCI Bus
+ * @HAL_BUS_TYPE_AHB: AHB Bus
+ * @HAL_BUS_TYPE_SNOC: SNOC Bus
+ * @HAL_BUS_TYPE_SIM: Simulator
+ */
+enum ath_hal_bus_type {
+	HAL_BUS_TYPE_PCI,
+	HAL_BUS_TYPE_AHB,
+	HAL_BUS_TYPE_SNOC,
+	HAL_BUS_TYPE_SIM
+};
+
+/**
+ * sturct hal_bus_context - Bus to hal context handoff
+ * @bc_tag:     bus context tag
+ * @bc_handle:  bus context handle
+ * @bc_bustype: bus type
+ */
+typedef struct hal_bus_context {
+        int bc_tag;
+        char *bc_handle;
+        enum ath_hal_bus_type bc_bustype;
+} HAL_BUS_CONTEXT;
+
+#define INLINE   inline
+
+/* ATH_DEBUG -
+ * Control whether debug features (printouts, assertions) are compiled
+ * into the driver.
+ */
+#ifndef ATH_DEBUG
+#define ATH_DEBUG 1             /* default: include debug code */
+#endif
+
+#if ATH_DEBUG
+#ifndef ASSERT
+#define ASSERT(expr)  cdf_assert(expr)
+#endif
+#else
+#define ASSERT(expr)
+#endif /* ATH_DEBUG */
+
+/*
+ * Need to define byte order based on the CPU configuration.
+ */
+#ifndef _LITTLE_ENDIAN
+#define _LITTLE_ENDIAN  1234
+#endif
+#ifndef _BIG_ENDIAN
+#define _BIG_ENDIAN 4321
+#endif
+#ifdef __BIG_ENDIAN
+#define _BYTE_ORDER    _BIG_ENDIAN
+#else
+#define _BYTE_ORDER    _LITTLE_ENDIAN
+#endif
+
+/*
+ * Deduce if tasklets are available.  If not then
+ * fall back to using the immediate work queue.
+ */
+#define ath_sysctl_decl(f, ctl, write, filp, buffer, lenp, ppos) \
+	f(struct ctl_table *ctl, int write, void *buffer,		     \
+	  size_t *lenp, loff_t *ppos)
+#define ATH_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
+	proc_dointvec(ctl, write, buffer, lenp, ppos)
+#define ATH_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos) \
+	proc_dostring(ctl, write, filp, buffer, lenp, ppos)
+
+/*
+ * Byte Order stuff
+ */
+#define    le16toh(_x)    le16_to_cpu(_x)
+#define    htole16(_x)    cpu_to_le16(_x)
+#define    htobe16(_x)    cpu_to_be16(_x)
+#define    le32toh(_x)    le32_to_cpu(_x)
+#define    htole32(_x)    cpu_to_le32(_x)
+#define    be16toh(_x)    be16_to_cpu(_x)
+#define    be32toh(_x)    be32_to_cpu(_x)
+#define    htobe32(_x)    cpu_to_be32(_x)
+
+#define EOK    (0)
+
+#ifndef false
+#define false 0
+#endif
+#ifndef true
+#define true  1
+#endif
+
+#ifndef ARPHRD_IEEE80211
+#define ARPHRD_IEEE80211 801    /* IEEE 802.11.  */
+#endif
+
+/*
+ * Normal Delay functions. Time specified in microseconds.
+ */
+#define OS_DELAY(_us)                     cdf_udelay(_us)
+
+/*
+ * memory data manipulation functions.
+ */
+#define OS_MEMCPY(_dst, _src, _len)       cdf_mem_copy(_dst, _src, _len)
+#define OS_MEMMOVE(_dst, _src, _len)      cdf_mem_move(_dst, _src, _len)
+#define OS_MEMZERO(_buf, _len)            cdf_mem_zero(_buf, _len)
+#define OS_MEMSET(_buf, _ch, _len)        cdf_mem_set(_buf, _len, _ch)
+#define OS_MEMCMP(_mem1, _mem2, _len)     cdf_mem_compare(_mem1, _mem2, _len)
+
+#ifdef CONFIG_SMP
+/* Undo the one provided by the kernel to debug spin locks */
+#undef spin_lock
+#undef spin_unlock
+#undef spin_trylock
+
+#define spin_lock(x) \
+	do { \
+		spin_lock_bh(x); \
+	} while (0)
+
+#define spin_unlock(x) \
+	do { \
+		if (!spin_is_locked(x)) { \
+			WARN_ON(1); \
+			printk(KERN_EMERG " %s:%d unlock addr=%p, %s \n", __func__, __LINE__, x, \
+			       !spin_is_locked(x) ? "Not locked" : "");	\
+		} \
+		spin_unlock_bh(x); \
+	} while (0)
+
+#define spin_trylock(x) spin_trylock_bh(x)
+
+#define OS_SUPPORT_ASYNC_Q 1    /* support for handling asyn function calls */
+
+#else
+#define OS_SUPPORT_ASYNC_Q 0
+#endif /* ifdef CONFIG_SMP */
+
+
+/*
+ * System time interface
+ */
+typedef cdf_time_t systime_t;
+typedef cdf_time_t systick_t;
+
+static INLINE cdf_time_t os_get_timestamp(void)
+{
+	return cdf_system_ticks();      /* Fix double conversion from jiffies to ms */
+}
+
+struct _NIC_DEV;
+
+typedef struct _NIC_DEV *osdev_t;
+
+typedef struct timer_list os_timer_t;
+
+typedef struct _os_mesg_t {
+	STAILQ_ENTRY(_os_mesg_t) mesg_next;
+	uint16_t mesg_type;
+	uint16_t mesg_len;
+	/* followed by mesg_len bytes */
+} os_mesg_t;
+
+typedef void (*os_mesg_handler_t)(void *ctx,
+				  uint16_t mesg_type,
+				  uint16_t mesg_len, void *mesg);
+
+typedef struct {
+	osdev_t dev_handle;
+	int32_t num_queued;
+	int32_t mesg_len;
+	uint8_t *mesg_queue_buf;
+	STAILQ_HEAD(, _os_mesg_t) mesg_head;    /* queued mesg buffers */
+	STAILQ_HEAD(, _os_mesg_t) mesg_free_head;       /* free mesg buffers  */
+	spinlock_t lock;
+	spinlock_t ev_handler_lock;
+#ifdef USE_SOFTINTR
+	void *_task;
+#else
+	os_timer_t _timer;
+#endif
+	os_mesg_handler_t handler;
+	void *ctx;
+	uint8_t is_synchronous : 1;
+} os_mesg_queue_t;
+
+/*
+ * Definition of OS-dependent device structure.
+ * It'll be opaque to the actual ATH layer.
+ */
+struct _NIC_DEV {
+	void *bdev;             /* bus device handle */
+	struct net_device *netdev;      /* net device handle (wifi%d) */
+	cdf_bh_t intr_tq;       /* tasklet */
+	struct net_device_stats devstats;       /* net device statisitics */
+	HAL_BUS_CONTEXT bc;
+#ifdef ATH_PERF_PWR_OFFLOAD
+	struct device *device;  /* generic device */
+	wait_queue_head_t event_queue;
+#endif /* PERF_PWR_OFFLOAD */
+#if OS_SUPPORT_ASYNC_Q
+	os_mesg_queue_t async_q;        /* mesgq to handle async calls */
+#endif
+#ifdef ATH_BUS_PM
+	uint8_t isDeviceAsleep;
+#endif /* ATH_BUS_PM */
+};
+
+static INLINE unsigned char *os_malloc(osdev_t pNicDev,
+				       unsigned long ulSizeInBytes, int gfp)
+{
+	return cdf_mem_malloc(ulSizeInBytes);
+}
+
+#define OS_FREE(_p)                     cdf_mem_free(_p)
+
+#define OS_DMA_MEM_CONTEXT(context)	    \
+	dma_addr_t context;
+
+#define OS_GET_DMA_MEM_CONTEXT(var, field)  \
+	&(var->field)
+
+#define OS_COPY_DMA_MEM_CONTEXT(dst, src)   \
+	*dst = *src
+
+#define OS_ZERO_DMA_MEM_CONTEXT(context)   \
+	*context = 0
+
+/*
+ * Timer Interfaces. Use these macros to declare timer
+ * and retrieve timer argument. This is mainly for resolving
+ * different argument types for timer function in different OS.
+ */
+#define OS_DECLARE_TIMER(_fn)                  void _fn(void *)
+
+#define os_timer_func(_fn)		       \
+	void _fn(void *timer_arg)
+
+#define OS_GET_TIMER_ARG(_arg, _type)	       \
+	(_arg) = (_type)(timer_arg)
+
+#define OS_INIT_TIMER(_osdev, _timer, _fn, _ctx, type)  \
+		cdf_softirq_timer_init(_osdev, _timer, _fn, _ctx, type)
+
+#define OS_SET_TIMER(_timer, _ms)      cdf_softirq_timer_mod(_timer, _ms)
+
+#define OS_CANCEL_TIMER(_timer)        cdf_softirq_timer_cancel(_timer)
+
+#define OS_FREE_TIMER(_timer)          cdf_softirq_timer_cancel(_timer)
+
+/*
+ * These are required for network manager support
+ */
+#ifndef SET_NETDEV_DEV
+#define    SET_NETDEV_DEV(ndev, pdev)
+#endif
+
+#endif /* end of _OSDEP_H */

+ 50 - 0
core/cdf/src/cdf_defer.c

@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+
+#include "i_cdf_defer.h"
+
+/**
+ * __cdf_defer_func() - defer work handler
+ * @work: Pointer to defer work
+ *
+ * Return: none
+ */
+void __cdf_defer_func(struct work_struct *work)
+{
+	__cdf_work_t *ctx = container_of(work, __cdf_work_t, work);
+	if (ctx->fn == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			"No callback registered !!");
+		return;
+	}
+	ctx->fn(ctx->arg);
+}

+ 270 - 0
core/cdf/src/cdf_event.c

@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_event.c
+ *
+ * This source file contains linux specific definitions for CDF event APIs
+ * The APIs mentioned in this file are used for initializing, setting,
+ * resetting, destroying an event and waiting on an occurance of an event
+ * among multiple events.
+ */
+
+/* Include Files */
+#include "cdf_event.h"
+#include "cdf_trace.h"
+
+/* Preprocessor Definitions and Constants */
+
+/* Type Declarations */
+
+/* Global Data Definitions */
+
+/* Static Variable Definitions */
+
+/* Function Definitions and Documentation */
+
+/**
+ * cdf_event_init() - initializes a CDF event
+ * @event: Pointer to the opaque event object to initialize
+ *
+ * The cdf_event_init() function initializes the specified event. Upon
+ * successful initialization, the state of the event becomes initialized
+ * and not signaled.
+ *
+ * An event must be initialized before it may be used in any other event
+ * functions.
+ *
+ * Attempting to initialize an already initialized event results in
+ * a failure.
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_event_init(cdf_event_t *event)
+{
+	/* check for null pointer */
+	if (NULL == event) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "NULL event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check for 'already initialized' event */
+	if (LINUX_EVENT_COOKIE == event->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "Initialized event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_BUSY;
+	}
+
+	/* initialize new event */
+	init_completion(&event->complete);
+	event->cookie = LINUX_EVENT_COOKIE;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_event_set() - sets a CDF event
+ * @event: The event to set to the signalled state
+ *
+ * The state of the specified event is set to signalled by calling
+ * cdf_event_set().
+ *
+ * Any threads waiting on the event as a result of a cdf_event_wait() will
+ * be unblocked and available to be scheduled for execution when the event
+ * is signaled by a call to cdf_event_set().
+ *
+ *
+ * Return: CDF status
+ */
+
+CDF_STATUS cdf_event_set(cdf_event_t *event)
+{
+	/* check for null pointer */
+	if (NULL == event) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "NULL event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check if event refers to an initialized object */
+	if (LINUX_EVENT_COOKIE != event->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "Uninitialized event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	complete(&event->complete);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_event_reset() - resets a CDF event
+ * @event: The event to set to the NOT signalled state
+ *
+ * This function isn't required for Linux. Therefore, it doesn't do much.
+ *
+ * The state of the specified event is set to 'NOT signalled' by calling
+ * cdf_event_reset().  The state of the event remains NOT signalled until an
+ * explicit call to cdf_event_set().
+ *
+ * This function sets the event to a NOT signalled state even if the event was
+ * signalled multiple times before being signaled.
+ *
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_event_reset(cdf_event_t *event)
+{
+	/* check for null pointer */
+	if (NULL == event) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "NULL event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check to make sure it is an 'already initialized' event */
+	if (LINUX_EVENT_COOKIE != event->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "Uninitialized event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	/* (re)initialize event */
+	INIT_COMPLETION(event->complete);
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_event_destroy() - Destroys a CDF event
+ * @event: The event object to be destroyed.
+ *
+ * This function doesn't do much in Linux. There is no need for the caller
+ * to explicitly destroy an event after use.
+ *
+ * The os_event_destroy() function shall destroy the event object
+ * referenced by event.  After a successful return from cdf_event_destroy()
+ * the event object becomes, in effect, uninitialized.
+ *
+ * A destroyed event object can be reinitialized using cdf_event_init();
+ * the results of otherwise referencing the object after it has been destroyed
+ * are undefined.  Calls to CDF event functions to manipulate the lock such
+ * as cdf_event_set() will fail if the event is destroyed.  Therefore,
+ * don't use the event after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return: CDF status
+ */
+
+CDF_STATUS cdf_event_destroy(cdf_event_t *event)
+{
+	/* check for null pointer */
+	if (NULL == event) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "NULL event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check to make sure it is an 'already initialized' event */
+	if (LINUX_EVENT_COOKIE != event->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "Uninitialized event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	/* make sure nobody is waiting on the event */
+	complete_all(&event->complete);
+
+	/* destroy the event */
+	memset(event, 0, sizeof(cdf_event_t));
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_wait_single_event() - Waits for a single event to be set.
+ *
+ * This API waits for the event to be set.
+ *
+ * @pEvent: Pointer to an event to wait on.
+ * @timeout: Timeout value (in milliseconds).  This function returns
+ *	if this interval elapses, regardless if any of the events have
+ *	been set.  An input value of 0 for this timeout parameter means
+ *	to wait infinitely, meaning a timeout will never occur.
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_wait_single_event(cdf_event_t *event, uint32_t timeout)
+{
+	if (in_interrupt()) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check for null pointer */
+	if (NULL == event) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "NULL event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check if cookie is same as that of initialized event */
+	if (LINUX_EVENT_COOKIE != event->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "Uninitialized event passed into %s", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	if (timeout) {
+		long ret;
+		ret = wait_for_completion_timeout(&event->complete,
+						  msecs_to_jiffies(timeout));
+		if (0 >= ret)
+			return CDF_STATUS_E_TIMEOUT;
+	} else {
+		CDF_ASSERT(0);
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "Zero timeout value passed into %s", __func__);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}

+ 225 - 0
core/cdf/src/cdf_list.c

@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_list.c
+ *
+ * Connectivity driver framework list manipulation APIs. CDF linked list
+ * APIs are NOT thread safe so make sure to use appropriate locking mechanisms
+ * to assure operations on the list are thread safe.
+ */
+
+/* Include files */
+#include <cdf_list.h>
+#include <cdf_trace.h>
+
+/* Preprocessor definitions and constants */
+
+/* Type declarations */
+
+/* Function declarations and documenation */
+
+/**
+ * cdf_list_insert_front() - insert input node at front of the list
+ * @pList: Pointer to list
+ * @pNode: Pointer to input node
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_list_insert_front(cdf_list_t *pList, cdf_list_node_t *pNode)
+{
+	list_add(pNode, &pList->anchor);
+	pList->count++;
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_list_insert_back() - insert input node at back of the list
+ * @pList: Pointer to list
+ * @pNode: Pointer to input node
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_list_insert_back(cdf_list_t *pList, cdf_list_node_t *pNode)
+{
+	list_add_tail(pNode, &pList->anchor);
+	pList->count++;
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_list_insert_back_size() - insert input node at back of list and save
+ *				 list size
+ * @pList: Pointer to list
+ * @pNode: Pointer to input node
+ * @pSize: Pointer to store list size
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_list_insert_back_size(cdf_list_t *pList,
+				     cdf_list_node_t *pNode, uint32_t *pSize)
+{
+	list_add_tail(pNode, &pList->anchor);
+	pList->count++;
+	*pSize = pList->count;
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_list_remove_front() - remove node from front of the list
+ * @pList: Pointer to list
+ * @ppNode: Double pointer to store the node which is removed from list
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_list_remove_front(cdf_list_t *pList, cdf_list_node_t **ppNode)
+{
+	struct list_head *listptr;
+
+	if (list_empty(&pList->anchor))
+		return CDF_STATUS_E_EMPTY;
+
+	listptr = pList->anchor.next;
+	*ppNode = listptr;
+	list_del(pList->anchor.next);
+	pList->count--;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_list_remove_back() - remove node from end of the list
+ * @pList: Pointer to list
+ * @ppNode: Double pointer to store node which is removed from list
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_list_remove_back(cdf_list_t *pList, cdf_list_node_t **ppNode)
+{
+	struct list_head *listptr;
+
+	if (list_empty(&pList->anchor))
+		return CDF_STATUS_E_EMPTY;
+
+	listptr = pList->anchor.prev;
+	*ppNode = listptr;
+	list_del(pList->anchor.prev);
+	pList->count--;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_list_remove_node() - remove input node from list
+ * @pList: Pointer to list
+ * @pNodeToRemove: Pointer to node which needs to be removed
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_list_remove_node(cdf_list_t *pList,
+				cdf_list_node_t *pNodeToRemove)
+{
+	cdf_list_node_t *tmp;
+	int found = 0;
+
+	if (list_empty(&pList->anchor))
+		return CDF_STATUS_E_EMPTY;
+
+	/* verify that pNodeToRemove is indeed part of list pList */
+	list_for_each(tmp, &pList->anchor) {
+		if (tmp == pNodeToRemove) {
+			found = 1;
+			break;
+		}
+	}
+	if (found == 0)
+		return CDF_STATUS_E_INVAL;
+
+	list_del(pNodeToRemove);
+	pList->count--;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_list_peek_front() - peek front node from list
+ * @pList: Pointer to list
+ * @ppNode: Double pointer to store peeked node pointer
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_list_peek_front(cdf_list_t *pList, cdf_list_node_t **ppNode)
+{
+	struct list_head *listptr;
+	if (list_empty(&pList->anchor))
+		return CDF_STATUS_E_EMPTY;
+
+	listptr = pList->anchor.next;
+	*ppNode = listptr;
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_list_peek_next() - peek next node of input node in the list
+ * @pList: Pointer to list
+ * @pNode: Pointer to input node
+ * @ppNode: Double pointer to store peeked node pointer
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cdf_list_peek_next(cdf_list_t *pList, cdf_list_node_t *pNode,
+			      cdf_list_node_t **ppNode)
+{
+	struct list_head *listptr;
+	int found = 0;
+	cdf_list_node_t *tmp;
+
+	if ((pList == NULL) || (pNode == NULL) || (ppNode == NULL))
+		return CDF_STATUS_E_FAULT;
+
+	if (list_empty(&pList->anchor))
+		return CDF_STATUS_E_EMPTY;
+
+	/* verify that pNode is indeed part of list pList */
+	list_for_each(tmp, &pList->anchor) {
+		if (tmp == pNode) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found == 0)
+		return CDF_STATUS_E_INVAL;
+
+	listptr = pNode->next;
+	if (listptr == &pList->anchor)
+		return CDF_STATUS_E_EMPTY;
+
+	*ppNode = listptr;
+
+	return CDF_STATUS_SUCCESS;
+}

+ 491 - 0
core/cdf/src/cdf_lock.c

@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_lock.c
+ *
+ * OVERVIEW: This source file contains definitions for CDF lock APIs
+ *	     The four APIs mentioned in this file are used for
+ *	     initializing, acquiring, releasing and destroying a lock.
+ *	     the lock are implemented using critical sections
+ */
+
+/* Include Files */
+
+#include "cdf_lock.h"
+#include "cdf_memory.h"
+#include "cdf_trace.h"
+#include <cdf_types.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+#include "i_host_diag_core_event.h"
+#include "cds_api.h"
+#include "ani_global.h"
+
+/* Preprocessor Definitions and Constants */
+#define LINUX_LOCK_COOKIE 0x12345678
+
+#define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
+#define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
+#define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
+
+/* Type Declarations */
+
+enum {
+	LOCK_RELEASED = 0x11223344,
+	LOCK_ACQUIRED,
+	LOCK_DESTROYED
+};
+
+/* Global Data Definitions */
+
+/* Function Definitions and Documentation */
+
+/**
+ * cdf_mutex_init() - initialize a CDF lock
+ * @lock:        Pointer to the opaque lock object to initialize
+ *
+ * cdf_mutex_init() function initializes the specified lock. Upon
+ * successful initialization, the state of the lock becomes initialized
+ * and unlocked.
+ *
+ * A lock must be initialized by calling cdf_mutex_init() before it
+ * may be used in any other lock functions.
+ *
+ * Attempting to initialize an already initialized lock results in
+ * a failure.
+ *
+ * Return:
+ *      CDF_STATUS_SUCCESS:     lock was successfully initialized
+ *      CDF failure reason codes: lock is not initialized and can't be used
+ */
+CDF_STATUS cdf_mutex_init(cdf_mutex_t *lock)
+{
+	/* check for invalid pointer */
+	if (lock == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed in", __func__);
+		return CDF_STATUS_E_FAULT;
+	}
+	/* check for 'already initialized' lock */
+	if (LINUX_LOCK_COOKIE == lock->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: already initialized lock", __func__);
+		return CDF_STATUS_E_BUSY;
+	}
+
+	if (in_interrupt()) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* initialize new lock */
+	mutex_init(&lock->m_lock);
+	lock->cookie = LINUX_LOCK_COOKIE;
+	lock->state = LOCK_RELEASED;
+	lock->processID = 0;
+	lock->refcount = 0;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_mutex_acquire() - acquire a CDF lock
+ * @lock:        Pointer to the opaque lock object to acquire
+ *
+ * A lock object is acquired by calling cdf_mutex_acquire().  If the lock
+ * is already locked, the calling thread shall block until the lock becomes
+ * available. This operation shall return with the lock object referenced by
+ * lock in the locked state with the calling thread as its owner.
+ *
+ * Return:
+ *      CDF_STATUS_SUCCESS:     lock was successfully initialized
+ *      CDF failure reason codes: lock is not initialized and can't be used
+ */
+CDF_STATUS cdf_mutex_acquire(cdf_mutex_t *lock)
+{
+	int rc;
+	/* check for invalid pointer */
+	if (lock == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed in", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+	/* check if lock refers to an initialized object */
+	if (LINUX_LOCK_COOKIE != lock->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: uninitialized lock", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	if (in_interrupt()) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+	if ((lock->processID == current->pid) &&
+		(lock->state == LOCK_ACQUIRED)) {
+		lock->refcount++;
+#ifdef CDF_NESTED_LOCK_DEBUG
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+			  "%s: %x %d %d", __func__, lock, current->pid,
+			  lock->refcount);
+#endif
+		return CDF_STATUS_SUCCESS;
+	}
+	/* acquire a Lock */
+	mutex_lock(&lock->m_lock);
+	rc = mutex_is_locked(&lock->m_lock);
+	if (rc == 0) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: unable to lock mutex (rc = %d)", __func__, rc);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+#ifdef CDF_NESTED_LOCK_DEBUG
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: %x %d", __func__, lock, current->pid);
+#endif
+	if (LOCK_DESTROYED != lock->state) {
+		lock->processID = current->pid;
+		lock->refcount++;
+		lock->state = LOCK_ACQUIRED;
+		return CDF_STATUS_SUCCESS;
+	} else {
+		/* lock is already destroyed */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Lock is already destroyed", __func__);
+		mutex_unlock(&lock->m_lock);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+}
+
+/**
+ * cdf_mutex_release() - release a CDF lock
+ * @lock:        Pointer to the opaque lock object to be released
+ *
+ * cdf_mutex_release() function shall release the lock object
+ * referenced by 'lock'.
+ *
+ * If a thread attempts to release a lock that it unlocked or is not
+ * initialized, an error is returned.
+ *
+ * Return:
+ *      CDF_STATUS_SUCCESS:     lock was successfully initialized
+ *      CDF failure reason codes: lock is not initialized and can't be used
+ */
+CDF_STATUS cdf_mutex_release(cdf_mutex_t *lock)
+{
+	/* check for invalid pointer */
+	if (lock == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed in", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check if lock refers to an uninitialized object */
+	if (LINUX_LOCK_COOKIE != lock->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: uninitialized lock", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	if (in_interrupt()) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* CurrentThread = GetCurrentThreadId();
+	 * Check thread ID of caller against thread ID
+	 * of the thread which acquire the lock
+	 */
+	if (lock->processID != current->pid) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: current task pid does not match original task pid!!",
+			  __func__);
+#ifdef CDF_NESTED_LOCK_DEBUG
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+			  "%s: Lock held by=%d being released by=%d",
+			  __func__, lock->processID, current->pid);
+#endif
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_PERM;
+	}
+	if ((lock->processID == current->pid) &&
+		(lock->state == LOCK_ACQUIRED)) {
+		if (lock->refcount > 0)
+			lock->refcount--;
+	}
+#ifdef CDF_NESTED_LOCK_DEBUG
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: %x %d %d", __func__, lock, lock->processID,
+		  lock->refcount);
+#endif
+	if (lock->refcount)
+		return CDF_STATUS_SUCCESS;
+
+	lock->processID = 0;
+	lock->refcount = 0;
+	lock->state = LOCK_RELEASED;
+	/* release a Lock */
+	mutex_unlock(&lock->m_lock);
+#ifdef CDF_NESTED_LOCK_DEBUG
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: Freeing lock %x %d %d", lock, lock->processID,
+		  lock->refcount);
+#endif
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_mutex_destroy() - destroy a CDF lock
+ * @lock:        Pointer to the opaque lock object to be destroyed
+ *
+ * cdf_mutex_destroy() function shall destroy the lock object
+ * referenced by lock.  After a successful return from cdf_mutex_destroy()
+ * the lock object becomes, in effect, uninitialized.
+ *
+ * A destroyed lock object can be reinitialized using cdf_mutex_init();
+ * the results of otherwise referencing the object after it has been destroyed
+ * are undefined.  Calls to CDF lock functions to manipulate the lock such
+ * as cdf_mutex_acquire() will fail if the lock is destroyed.  Therefore,
+ * don't use the lock after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ *      CDF_STATUS_SUCCESS:     lock was successfully initialized
+ *      CDF failure reason codes: lock is not initialized and can't be used
+ */
+CDF_STATUS cdf_mutex_destroy(cdf_mutex_t *lock)
+{
+	/* check for invalid pointer */
+	if (NULL == lock) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed in", __func__);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	if (LINUX_LOCK_COOKIE != lock->cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: uninitialized lock", __func__);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	if (in_interrupt()) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check if lock is released */
+	if (!mutex_trylock(&lock->m_lock)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: lock is not released", __func__);
+		return CDF_STATUS_E_BUSY;
+	}
+	lock->cookie = 0;
+	lock->state = LOCK_DESTROYED;
+	lock->processID = 0;
+	lock->refcount = 0;
+
+	mutex_unlock(&lock->m_lock);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_spinlock_acquire() - acquires a spin lock
+ * @pLock:       Spin lock to acquire
+ *
+ * Return:
+ *    CDF status success : if wake lock is acquired
+ *    CDF status failure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_spinlock_acquire(cdf_spinlock_t *pLock)
+{
+	spin_lock(&pLock->spinlock);
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_spinlock_release() - release a spin lock
+ * @pLock:       Spin lock to release
+ *
+ * Return:
+ * CDF status success : if wake lock is acquired
+ * CDF status failure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_spinlock_release(cdf_spinlock_t *pLock)
+{
+	spin_unlock(&pLock->spinlock);
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_wake_lock_name() - This function returns the name of the wakelock
+ * @pLock: Pointer to the wakelock
+ *
+ * This function returns the name of the wakelock
+ *
+ * Return: Pointer to the name if it is valid or a default string
+ *
+ */
+static const char *cdf_wake_lock_name(cdf_wake_lock_t *pLock)
+{
+#if defined CONFIG_CNSS
+	if (pLock->name)
+		return pLock->name;
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	if (pLock->ws.name)
+		return pLock->ws.name;
+#endif
+	return "UNNAMED_WAKELOCK";
+}
+
+/**
+ * cdf_wake_lock_init() - initializes a CDF wake lock
+ * @pLock: The wake lock to initialize
+ * @name: Name of wake lock
+ *
+ * Return:
+ *    CDF status success : if wake lock is initialized
+ *    CDF status failure : if wake lock was not initialized
+ */
+CDF_STATUS cdf_wake_lock_init(cdf_wake_lock_t *pLock, const char *name)
+{
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock_init(pLock, name);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_lock_init(pLock, WAKE_LOCK_SUSPEND, name);
+#endif
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_wake_lock_acquire() - acquires a wake lock
+ * @pLock:       The wake lock to acquire
+ * @reason:      Reason for wakelock
+ *
+ * Return:
+ *    CDF status success : if wake lock is acquired
+ *    CDF status failure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_wake_lock_acquire(cdf_wake_lock_t *pLock, uint32_t reason)
+{
+	host_diag_log_wlock(reason, cdf_wake_lock_name(pLock),
+			WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
+			WIFI_POWER_EVENT_WAKELOCK_TAKEN);
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock(pLock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_lock(pLock);
+#endif
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
+ * @pLock:       The wake lock to acquire
+ * @reason:      Reason for wakelock
+ *
+ * Return:
+ *   CDF status success : if wake lock is acquired
+ *   CDF status failure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_wake_lock_timeout_acquire(cdf_wake_lock_t *pLock, uint32_t msec,
+					 uint32_t reason)
+{
+	/* Wakelock for Rx is frequent.
+	 * It is reported only during active debug
+	 */
+	if (((cds_get_ring_log_level(RING_ID_WAKELOCK) >= WLAN_LOG_LEVEL_ACTIVE)
+			&& (WIFI_POWER_EVENT_WAKELOCK_HOLD_RX == reason)) ||
+			(WIFI_POWER_EVENT_WAKELOCK_HOLD_RX != reason)) {
+		host_diag_log_wlock(reason, cdf_wake_lock_name(pLock), msec,
+				WIFI_POWER_EVENT_WAKELOCK_TAKEN);
+	}
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock_timeout(pLock, msec);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_lock_timeout(pLock, msecs_to_jiffies(msec));
+#endif
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_wake_lock_release() - releases a wake lock
+ * @pLock:       the wake lock to release
+ * @reason:      Reason for wakelock
+ *
+ * Return:
+ *    CDF status success : if wake lock is acquired
+ *    CDF status failure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_wake_lock_release(cdf_wake_lock_t *pLock, uint32_t reason)
+{
+	host_diag_log_wlock(reason, cdf_wake_lock_name(pLock),
+			WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
+			WIFI_POWER_EVENT_WAKELOCK_RELEASED);
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock_release(pLock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_unlock(pLock);
+#endif
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_wake_lock_destroy() - destroys a wake lock
+ * @pLock:       The wake lock to destroy
+ *
+ * Return:
+ * CDF status success : if wake lock is acquired
+ * CDF status failure : if wake lock was not acquired
+ */
+CDF_STATUS cdf_wake_lock_destroy(cdf_wake_lock_t *pLock)
+{
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock_destroy(pLock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_lock_destroy(pLock);
+#endif
+	return CDF_STATUS_SUCCESS;
+}

+ 800 - 0
core/cdf/src/cdf_mc_timer.c

@@ -0,0 +1,800 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ *  DOC: cdf_mc_timer
+ *
+ *  Connectivity driver framework timer APIs serialized to MC thread
+ */
+
+/* Include Files */
+#include <cdf_mc_timer.h>
+#include <cdf_lock.h>
+#include <cds_api.h>
+#include "wlan_qct_sys.h"
+#include "cds_sched.h"
+
+/* Preprocessor definitions and constants */
+
+#define LINUX_TIMER_COOKIE 0x12341234
+#define LINUX_INVALID_TIMER_COOKIE 0xfeedface
+#define TMR_INVALID_ID (0)
+
+/* Type declarations */
+
+/* Static Variable Definitions */
+static unsigned int persistent_timer_count;
+static cdf_mutex_t persistent_timer_count_lock;
+
+/* Function declarations and documenation */
+
+/**
+ * try_allowing_sleep() - clean up timer states after it has been deactivated
+ * @type: Timer type
+ *
+ * Clean up timer states after it has been deactivated check and try to allow
+ * sleep after a timer has been stopped or expired.
+ *
+ * Return: none
+ */
+static void try_allowing_sleep(CDF_TIMER_TYPE type)
+{
+	if (CDF_TIMER_TYPE_WAKE_APPS == type) {
+		/* cdf_mutex_acquire(&persistent_timer_count_lock); */
+		persistent_timer_count--;
+		if (0 == persistent_timer_count) {
+			/* since the number of persistent timers has
+			   decreased from 1 to 0, the timer should allow
+			   sleep sleep_assert_okts( sleepClientHandle ); */
+		}
+		/* cdf_mutex_release(&persistent_timer_count_lock); */
+	}
+}
+
+/**
+ * cdf_linux_timer_callback() - internal cdf entry point which is
+ *				called when the timer interval expires
+ * @data: pointer to the timer control block which describes the
+ *	timer that expired
+ *
+ * This function in turn calls the CDF client callback and changes the
+ * state of the timer from running (ACTIVE) to expired (INIT).
+ *
+ * Note: function signature is defined by the Linux kernel.  The fact
+ * that the argument is "unsigned long" instead of "void *" is
+ * unfortunately imposed upon us.  But we can safely pass a pointer via
+ * this parameter for LP32 and LP64 architectures.
+ *
+ *  Return: nothing
+ */
+
+static void cdf_linux_timer_callback(unsigned long data)
+{
+	cdf_mc_timer_t *timer = (cdf_mc_timer_t *) data;
+	cds_msg_t msg;
+	CDF_STATUS vStatus;
+	unsigned long flags;
+
+	cdf_mc_timer_callback_t callback = NULL;
+	void *userData = NULL;
+	int threadId;
+	CDF_TIMER_TYPE type = CDF_TIMER_TYPE_SW;
+
+	CDF_ASSERT(timer);
+
+	if (timer == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s Null pointer passed in!", __func__);
+		return;
+	}
+
+	threadId = timer->platformInfo.threadID;
+	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
+
+	switch (timer->state) {
+	case CDF_TIMER_STATE_STARTING:
+		/* we are in this state because someone just started the timer,
+		 * MC timer got started and expired, but the time content have
+		 * not been updated this is a rare race condition!
+		 */
+		timer->state = CDF_TIMER_STATE_STOPPED;
+		vStatus = CDF_STATUS_E_ALREADY;
+		break;
+
+	case CDF_TIMER_STATE_STOPPED:
+		vStatus = CDF_STATUS_E_ALREADY;
+		break;
+
+	case CDF_TIMER_STATE_UNUSED:
+		vStatus = CDF_STATUS_E_EXISTS;
+		break;
+
+	case CDF_TIMER_STATE_RUNNING:
+		/* need to go to stop state here because the call-back function
+		 * may restart timer (to emulate periodic timer)
+		 */
+		timer->state = CDF_TIMER_STATE_STOPPED;
+		/* copy the relevant timer information to local variables;
+		 * once we exist from this critical section, the timer content
+		 * may be modified by other tasks
+		 */
+		callback = timer->callback;
+		userData = timer->userData;
+		threadId = timer->platformInfo.threadID;
+		type = timer->type;
+		vStatus = CDF_STATUS_SUCCESS;
+		break;
+
+	default:
+		CDF_ASSERT(0);
+		vStatus = CDF_STATUS_E_FAULT;
+		break;
+	}
+
+	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+
+	if (CDF_STATUS_SUCCESS != vStatus) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "TIMER callback called in a wrong state=%d",
+			  timer->state);
+		return;
+	}
+
+	try_allowing_sleep(type);
+
+	if (callback == NULL) {
+		CDF_ASSERT(0);
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: No TIMER callback, Could not enqueue timer to any queue",
+			  __func__);
+		return;
+	}
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "TIMER callback: running on MC thread");
+
+	/* serialize to the MC thread */
+	sys_build_message_header(SYS_MSG_ID_MC_TIMER, &msg);
+	msg.callback = callback;
+	msg.bodyptr = userData;
+	msg.bodyval = 0;
+
+	if (cds_mq_post_message(CDS_MQ_ID_SYS, &msg) == CDF_STATUS_SUCCESS)
+		return;
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "%s: Could not enqueue timer to any queue", __func__);
+	CDF_ASSERT(0);
+}
+
+/**
+ * cdf_mc_timer_get_current_state() - get the current state of the timer
+ * @pTimer: Pointer to timer object
+ *
+ * Return:
+ *	CDF_TIMER_STATE - cdf timer state
+ */
+CDF_TIMER_STATE cdf_mc_timer_get_current_state(cdf_mc_timer_t *pTimer)
+{
+	if (NULL == pTimer) {
+		CDF_ASSERT(0);
+		return CDF_TIMER_STATE_UNUSED;
+	}
+
+	switch (pTimer->state) {
+	case CDF_TIMER_STATE_STOPPED:
+	case CDF_TIMER_STATE_STARTING:
+	case CDF_TIMER_STATE_RUNNING:
+	case CDF_TIMER_STATE_UNUSED:
+		return pTimer->state;
+	default:
+		CDF_ASSERT(0);
+		return CDF_TIMER_STATE_UNUSED;
+	}
+}
+
+/**
+ * cdf_timer_module_init() - initializes a CDF timer module.
+ *
+ * This API initializes the CDF timer module. This needs to be called
+ * exactly once prior to using any CDF timers.
+ *
+ * Return: none
+ */
+void cdf_timer_module_init(void)
+{
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "Initializing the CDF timer module");
+	cdf_mutex_init(&persistent_timer_count_lock);
+}
+
+#ifdef TIMER_MANAGER
+
+cdf_list_t cdf_timer_list;
+cdf_spinlock_t cdf_timer_list_lock;
+
+static void cdf_timer_clean(void);
+
+/**
+ * cdf_mc_timer_manager_init() - initialize CDF debug timer manager
+ *
+ * This API initializes CDF timer debug functionality.
+ *
+ * Return: none
+ */
+void cdf_mc_timer_manager_init(void)
+{
+	/* Initalizing the list with maximum size of 60000 */
+	cdf_list_init(&cdf_timer_list, 1000);
+	cdf_spinlock_init(&cdf_timer_list_lock);
+	return;
+}
+
+/**
+ * cdf_timer_clean() - clean up CDF timer debug functionality
+ *
+ * This API cleans up CDF timer debug functionality and prints which CDF timers
+ * are leaked. This is called during driver unload.
+ *
+ * Return: none
+ */
+static void cdf_timer_clean(void)
+{
+	uint32_t listSize;
+
+	cdf_list_size(&cdf_timer_list, &listSize);
+
+	if (listSize) {
+		cdf_list_node_t *pNode;
+		CDF_STATUS cdf_status;
+
+		cdf_mc_timer_node_t *ptimerNode;
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: List is not Empty. listSize %d ",
+			  __func__, (int)listSize);
+
+		do {
+			cdf_spin_lock_irqsave(&cdf_timer_list_lock);
+			cdf_status =
+				cdf_list_remove_front(&cdf_timer_list, &pNode);
+			cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
+			if (CDF_STATUS_SUCCESS == cdf_status) {
+				ptimerNode = (cdf_mc_timer_node_t *) pNode;
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_FATAL,
+					  "Timer Leak@ File %s, @Line %d",
+					  ptimerNode->fileName,
+					  (int)ptimerNode->lineNum);
+				cdf_mem_free(ptimerNode);
+			}
+		} while (cdf_status == CDF_STATUS_SUCCESS);
+	}
+}
+
+/**
+ * cdf_mc_timer_exit() - exit CDF timer debug functionality
+ *
+ * This API exists CDF timer debug functionality
+ *
+ * Return: none
+ */
+void cdf_mc_timer_exit(void)
+{
+	cdf_timer_clean();
+	cdf_list_destroy(&cdf_timer_list);
+}
+#endif
+
+/**
+ * cdf_mc_timer_init() - initialize a CDF timer
+ * @pTimer:     Pointer to timer object
+ * @timerType:  Type of timer
+ * @callback:   Callback to be called after timer expiry
+ * @serData:    User data which will be passed to callback function
+ *
+ * This API initializes a CDF Timer object.
+ *
+ * cdf_mc_timer_init() initializes a CDF Timer object.  A timer must be
+ * initialized by calling cdf_mc_timer_initialize() before it may be used in
+ * any other timer functions.
+ *
+ * Attempting to initialize timer that is already initialized results in
+ * a failure. A destroyed timer object can be re-initialized with a call to
+ * cdf_mc_timer_init().  The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ *  Calls to CDF timer functions to manipulate the timer such
+ *  as cdf_mc_timer_set() will fail if the timer is not initialized or has
+ *  been destroyed.  Therefore, don't use the timer after it has been
+ *  destroyed until it has been re-initialized.
+ *
+ *  All callback will be executed within the CDS main thread unless it is
+ *  initialized from the Tx thread flow, in which case it will be executed
+ *  within the tx thread flow.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *	CDF failure status - Timer initialization failed
+ */
+#ifdef TIMER_MANAGER
+CDF_STATUS cdf_mc_timer_init_debug(cdf_mc_timer_t *timer,
+				   CDF_TIMER_TYPE timerType,
+				   cdf_mc_timer_callback_t callback,
+				   void *userData, char *fileName,
+				   uint32_t lineNum)
+{
+	CDF_STATUS cdf_status;
+
+	/* check for invalid pointer */
+	if ((timer == NULL) || (callback == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	timer->ptimerNode = cdf_mem_malloc(sizeof(cdf_mc_timer_node_t));
+
+	if (timer->ptimerNode == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Not able to allocate memory for timeNode",
+			  __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_NOMEM;
+	}
+
+	cdf_mem_set(timer->ptimerNode, sizeof(cdf_mc_timer_node_t), 0);
+
+	timer->ptimerNode->fileName = fileName;
+	timer->ptimerNode->lineNum = lineNum;
+	timer->ptimerNode->cdf_timer = timer;
+
+	cdf_spin_lock_irqsave(&cdf_timer_list_lock);
+	cdf_status = cdf_list_insert_front(&cdf_timer_list,
+					   &timer->ptimerNode->pNode);
+	cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
+	if (CDF_STATUS_SUCCESS != cdf_status) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Unable to insert node into List cdf_status %d",
+			  __func__, cdf_status);
+	}
+
+	/* set the various members of the timer structure
+	 * with arguments passed or with default values
+	 */
+	spin_lock_init(&timer->platformInfo.spinlock);
+	if (CDF_TIMER_TYPE_SW == timerType)
+		init_timer_deferrable(&(timer->platformInfo.Timer));
+	else
+		init_timer(&(timer->platformInfo.Timer));
+	timer->platformInfo.Timer.function = cdf_linux_timer_callback;
+	timer->platformInfo.Timer.data = (unsigned long)timer;
+	timer->callback = callback;
+	timer->userData = userData;
+	timer->type = timerType;
+	timer->platformInfo.cookie = LINUX_TIMER_COOKIE;
+	timer->platformInfo.threadID = 0;
+	timer->state = CDF_TIMER_STATE_STOPPED;
+
+	return CDF_STATUS_SUCCESS;
+}
+#else
+CDF_STATUS cdf_mc_timer_init(cdf_mc_timer_t *timer, CDF_TIMER_TYPE timerType,
+			     cdf_mc_timer_callback_t callback,
+			     void *userData)
+{
+	/* check for invalid pointer */
+	if ((timer == NULL) || (callback == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* set the various members of the timer structure
+	 * with arguments passed or with default values
+	 */
+	spin_lock_init(&timer->platformInfo.spinlock);
+	if (CDF_TIMER_TYPE_SW == timerType)
+		init_timer_deferrable(&(timer->platformInfo.Timer));
+	else
+		init_timer(&(timer->platformInfo.Timer));
+	timer->platformInfo.Timer.function = cdf_linux_timer_callback;
+	timer->platformInfo.Timer.data = (unsigned long)timer;
+	timer->callback = callback;
+	timer->userData = userData;
+	timer->type = timerType;
+	timer->platformInfo.cookie = LINUX_TIMER_COOKIE;
+	timer->platformInfo.threadID = 0;
+	timer->state = CDF_TIMER_STATE_STOPPED;
+
+	return CDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * cdf_mc_timer_destroy() - destroy CDF timer
+ * @timer: Pointer to timer object
+ *
+ * cdf_mc_timer_destroy() function shall destroy the timer object.
+ * After a successful return from \a cdf_mc_timer_destroy() the timer
+ * object becomes, in effect, uninitialized.
+ *
+ * A destroyed timer object can be re-initialized by calling
+ * cdf_mc_timer_init().  The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ * Calls to CDF timer functions to manipulate the timer, such
+ * as cdf_mc_timer_set() will fail if the lock is destroyed.  Therefore,
+ * don't use the timer after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *	CDF failure status - Timer initialization failed
+ */
+#ifdef TIMER_MANAGER
+CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer)
+{
+	CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
+	unsigned long flags;
+
+	/* check for invalid pointer */
+	if (NULL == timer) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Null timer pointer being passed", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* Check if timer refers to an uninitialized object */
+	if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot destroy uninitialized timer", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	cdf_spin_lock_irqsave(&cdf_timer_list_lock);
+	vStatus = cdf_list_remove_node(&cdf_timer_list,
+				       &timer->ptimerNode->pNode);
+	cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
+	if (vStatus != CDF_STATUS_SUCCESS) {
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+	cdf_mem_free(timer->ptimerNode);
+
+	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
+
+	switch (timer->state) {
+
+	case CDF_TIMER_STATE_STARTING:
+		vStatus = CDF_STATUS_E_BUSY;
+		break;
+
+	case CDF_TIMER_STATE_RUNNING:
+		/* Stop the timer first */
+		del_timer(&(timer->platformInfo.Timer));
+		vStatus = CDF_STATUS_SUCCESS;
+		break;
+	case CDF_TIMER_STATE_STOPPED:
+		vStatus = CDF_STATUS_SUCCESS;
+		break;
+
+	case CDF_TIMER_STATE_UNUSED:
+		vStatus = CDF_STATUS_E_ALREADY;
+		break;
+
+	default:
+		vStatus = CDF_STATUS_E_FAULT;
+		break;
+	}
+
+	if (CDF_STATUS_SUCCESS == vStatus) {
+		timer->platformInfo.cookie = LINUX_INVALID_TIMER_COOKIE;
+		timer->state = CDF_TIMER_STATE_UNUSED;
+		spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+		return vStatus;
+	}
+
+	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "%s: Cannot destroy timer in state = %d", __func__,
+		  timer->state);
+	CDF_ASSERT(0);
+
+	return vStatus;
+}
+
+#else
+
+/**
+ * cdf_mc_timer_destroy() - destroy CDF timer
+ * @timer: Pointer to timer object
+ *
+ * cdf_mc_timer_destroy() function shall destroy the timer object.
+ * After a successful return from \a cdf_mc_timer_destroy() the timer
+ * object becomes, in effect, uninitialized.
+ *
+ * A destroyed timer object can be re-initialized by calling
+ * cdf_mc_timer_init().  The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ * Calls to CDF timer functions to manipulate the timer, such
+ * as cdf_mc_timer_set() will fail if the lock is destroyed.  Therefore,
+ * don't use the timer after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ *      CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *      CDF failure status - Timer initialization failed
+ */
+CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer)
+{
+	CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
+	unsigned long flags;
+
+	/* check for invalid pointer */
+	if (NULL == timer) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Null timer pointer being passed", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	/* check if timer refers to an uninitialized object */
+	if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot destroy uninitialized timer", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
+
+	switch (timer->state) {
+
+	case CDF_TIMER_STATE_STARTING:
+		vStatus = CDF_STATUS_E_BUSY;
+		break;
+
+	case CDF_TIMER_STATE_RUNNING:
+		/* Stop the timer first */
+		del_timer(&(timer->platformInfo.Timer));
+		vStatus = CDF_STATUS_SUCCESS;
+		break;
+
+	case CDF_TIMER_STATE_STOPPED:
+		vStatus = CDF_STATUS_SUCCESS;
+		break;
+
+	case CDF_TIMER_STATE_UNUSED:
+		vStatus = CDF_STATUS_E_ALREADY;
+		break;
+
+	default:
+		vStatus = CDF_STATUS_E_FAULT;
+		break;
+	}
+
+	if (CDF_STATUS_SUCCESS == vStatus) {
+		timer->platformInfo.cookie = LINUX_INVALID_TIMER_COOKIE;
+		timer->state = CDF_TIMER_STATE_UNUSED;
+		spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+		return vStatus;
+	}
+
+	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "%s: Cannot destroy timer in state = %d", __func__,
+		  timer->state);
+	CDF_ASSERT(0);
+
+	return vStatus;
+}
+#endif
+
+/**
+ * cdf_mc_timer_start() - start a CDF Timer object
+ * @timer:      Pointer to timer object
+ * @expirationTime:     Time to expire
+ *
+ * cdf_mc_timer_start() function starts a timer to expire after the
+ * specified interval, thus running the timer callback function when
+ * the interval expires.
+ *
+ * A timer only runs once (a one-shot timer).  To re-start the
+ * timer, cdf_mc_timer_start() has to be called after the timer runs
+ * or has been cancelled.
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *	CDF failure status - Timer initialization failed
+ */
+CDF_STATUS cdf_mc_timer_start(cdf_mc_timer_t *timer, uint32_t expirationTime)
+{
+	unsigned long flags;
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "Timer Addr inside cds_enable : 0x%p ", timer);
+
+	/* check for invalid pointer */
+	if (NULL == timer) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s Null timer pointer being passed", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	/* check if timer refers to an uninitialized object */
+	if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot start uninitialized timer", __func__);
+		if (LINUX_INVALID_TIMER_COOKIE != timer->platformInfo.cookie)
+			CDF_ASSERT(0);
+
+		return CDF_STATUS_E_INVAL;
+	}
+
+	/* check if timer has expiration time less than 10 ms */
+	if (expirationTime < 10) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot start a timer with expiration less than 10 ms",
+			  __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	/* make sure the remainer of the logic isn't interrupted */
+	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
+
+	/* ensure if the timer can be started */
+	if (CDF_TIMER_STATE_STOPPED != timer->state) {
+		spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+			  "%s: Cannot start timer in state = %d ", __func__,
+			  timer->state);
+		return CDF_STATUS_E_ALREADY;
+	}
+
+	/* start the timer */
+	mod_timer(&(timer->platformInfo.Timer),
+		  jiffies + msecs_to_jiffies(expirationTime));
+
+	timer->state = CDF_TIMER_STATE_RUNNING;
+
+	/* get the thread ID on which the timer is being started */
+	timer->platformInfo.threadID = current->pid;
+
+	if (CDF_TIMER_TYPE_WAKE_APPS == timer->type) {
+		persistent_timer_count++;
+		if (1 == persistent_timer_count) {
+			/* since we now have one persistent timer,
+			 * we need to disallow sleep
+			 * sleep_negate_okts(sleepClientHandle);
+			 */
+		}
+	}
+
+	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_mc_timer_stop() - stop a CDF Timer
+ * @timer:      Pointer to timer object
+ * cdf_mc_timer_stop() function stops a timer that has been started but
+ * has not expired, essentially cancelling the 'start' request.
+ *
+ * After a timer is stopped, it goes back to the state it was in after it
+ * was created and can be started again via a call to cdf_mc_timer_start().
+ *
+ * Return:
+ *	CDF_STATUS_SUCCESS - Timer is initialized successfully
+ *	CDF failure status - Timer initialization failed
+ */
+CDF_STATUS cdf_mc_timer_stop(cdf_mc_timer_t *timer)
+{
+	unsigned long flags;
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Timer Addr inside cds_disable : 0x%p", __func__, timer);
+
+	/* check for invalid pointer */
+	if (NULL == timer) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s Null timer pointer being passed", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	/* check if timer refers to an uninitialized object */
+	if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot stop uninitialized timer", __func__);
+		if (LINUX_INVALID_TIMER_COOKIE != timer->platformInfo.cookie)
+			CDF_ASSERT(0);
+
+		return CDF_STATUS_E_INVAL;
+	}
+
+	/* ensure the timer state is correct */
+	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
+
+	if (CDF_TIMER_STATE_RUNNING != timer->state) {
+		spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+			  "%s: Cannot stop timer in state = %d",
+			  __func__, timer->state);
+		return CDF_STATUS_SUCCESS;
+	}
+
+	timer->state = CDF_TIMER_STATE_STOPPED;
+
+	del_timer(&(timer->platformInfo.Timer));
+
+	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
+
+	try_allowing_sleep(timer->type);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
+
+ * cdf_mc_timer_get_system_ticks() function returns the current number
+ * of timer ticks in 10msec intervals. This function is suitable timestamping
+ * and calculating time intervals by calculating the difference between two
+ * timestamps.
+ *
+ * Return:
+ *	The current system tick count (in 10msec intervals).  This
+ *	function cannot fail.
+ */
+v_TIME_t cdf_mc_timer_get_system_ticks(void)
+{
+	return jiffies_to_msecs(jiffies) / 10;
+}
+
+/**
+ * cdf_mc_timer_get_system_time() - Get the system time in milliseconds
+ *
+ * cdf_mc_timer_get_system_time() function returns the number of milliseconds
+ * that have elapsed since the system was started
+ *
+ * Return:
+ *	The current system time in milliseconds
+ */
+v_TIME_t cdf_mc_timer_get_system_time(void)
+{
+	struct timeval tv;
+	do_gettimeofday(&tv);
+	return tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}

+ 631 - 0
core/cdf/src/cdf_memory.c

@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC:  cdf_memory
+ *
+ * Connectivity driver framework (CDF) memory management APIs
+ */
+
+/* Include Files */
+#include "cdf_memory.h"
+#include "cdf_nbuf.h"
+#include "cdf_trace.h"
+#include "cdf_lock.h"
+
+#if defined(CONFIG_CNSS)
+#include <net/cnss.h>
+#endif
+
+#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
+#include <net/cnss_prealloc.h>
+#endif
+
+#ifdef MEMORY_DEBUG
+#include <cdf_list.h>
+
+cdf_list_t cdf_mem_list;
+cdf_spinlock_t cdf_mem_list_lock;
+
+static uint8_t WLAN_MEM_HEADER[] = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
+					0x67, 0x68 };
+static uint8_t WLAN_MEM_TAIL[] = { 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+					0x86, 0x87 };
+
+struct s_cdf_mem_struct {
+	cdf_list_node_t pNode;
+	char *fileName;
+	unsigned int lineNum;
+	unsigned int size;
+	uint8_t header[8];
+};
+#endif
+
+/* Preprocessor Definitions and Constants */
+
+/* Type Declarations */
+
+/* Data definitions */
+
+/* External Function implementation */
+#ifdef MEMORY_DEBUG
+
+/**
+ * cdf_mem_init() - initialize cdf memory debug functionality
+ *
+ * Return: none
+ */
+void cdf_mem_init(void)
+{
+	/* Initalizing the list with maximum size of 60000 */
+	cdf_list_init(&cdf_mem_list, 60000);
+	cdf_spinlock_init(&cdf_mem_list_lock);
+	cdf_net_buf_debug_init();
+	return;
+}
+
+/**
+ * cdf_mem_clean() - display memory leak debug info and free leaked pointers
+ *
+ * Return: none
+ */
+void cdf_mem_clean(void)
+{
+	uint32_t listSize;
+	cdf_list_size(&cdf_mem_list, &listSize);
+
+	cdf_net_buf_debug_clean();
+
+	if (listSize) {
+		cdf_list_node_t *pNode;
+		CDF_STATUS cdf_status;
+
+		struct s_cdf_mem_struct *memStruct;
+		char *prev_mleak_file = "";
+		unsigned int prev_mleak_lineNum = 0;
+		unsigned int prev_mleak_sz = 0;
+		unsigned int mleak_cnt = 0;
+
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: List is not Empty. listSize %d ",
+			  __func__, (int)listSize);
+
+		do {
+			cdf_spin_lock(&cdf_mem_list_lock);
+			cdf_status =
+				cdf_list_remove_front(&cdf_mem_list, &pNode);
+			cdf_spin_unlock(&cdf_mem_list_lock);
+			if (CDF_STATUS_SUCCESS == cdf_status) {
+				memStruct = (struct s_cdf_mem_struct *)pNode;
+				/* Take care to log only once multiple memory
+				   leaks from the same place */
+				if (strcmp(prev_mleak_file, memStruct->fileName)
+				    || (prev_mleak_lineNum !=
+					memStruct->lineNum)
+				    || (prev_mleak_sz != memStruct->size)) {
+					if (mleak_cnt != 0) {
+						CDF_TRACE(CDF_MODULE_ID_CDF,
+							  CDF_TRACE_LEVEL_FATAL,
+							  "%d Time Memory Leak@ File %s, @Line %d, size %d",
+							  mleak_cnt,
+							  prev_mleak_file,
+							  prev_mleak_lineNum,
+							  prev_mleak_sz);
+					}
+					prev_mleak_file = memStruct->fileName;
+					prev_mleak_lineNum = memStruct->lineNum;
+					prev_mleak_sz = memStruct->size;
+					mleak_cnt = 0;
+				}
+				mleak_cnt++;
+				kfree((void *)memStruct);
+			}
+		} while (cdf_status == CDF_STATUS_SUCCESS);
+
+		/* Print last memory leak from the module */
+		if (mleak_cnt) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+				  "%d Time memory Leak@ File %s, @Line %d, size %d",
+				  mleak_cnt, prev_mleak_file,
+				  prev_mleak_lineNum, prev_mleak_sz);
+		}
+#ifdef CONFIG_HALT_KMEMLEAK
+		BUG_ON(0);
+#endif
+	}
+}
+
+/**
+ * cdf_mem_exit() - exit cdf memory debug functionality
+ *
+ * Return: none
+ */
+void cdf_mem_exit(void)
+{
+	cdf_net_buf_debug_exit();
+	cdf_mem_clean();
+	cdf_list_destroy(&cdf_mem_list);
+}
+
+/**
+ * cdf_mem_malloc_debug() - debug version of CDF memory allocation API
+ * @size: Number of bytes of memory to allocate.
+ * @fileName: File name from which memory allocation is called
+ * @lineNum: Line number from which memory allocation is called
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory and ad it in cdf tracking list to check against memory leaks and
+ * corruptions
+ *
+ *
+ * Return:
+ *      Upon successful allocate, returns a non-NULL pointer to the allocated
+ *      memory.  If this function is unable to allocate the amount of memory
+ *      specified (for any reason) it returns %NULL.
+ *
+ */
+void *cdf_mem_malloc_debug(size_t size, char *fileName, uint32_t lineNum)
+{
+	struct s_cdf_mem_struct *memStruct;
+	void *memPtr = NULL;
+	uint32_t new_size;
+	int flags = GFP_KERNEL;
+
+	if (size > (1024 * 1024)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: called with arg > 1024K; passed in %zu !!!",
+			  __func__, size);
+		return NULL;
+	}
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+	if (size > WCNSS_PRE_ALLOC_GET_THRESHOLD) {
+		void *pmem;
+		pmem = wcnss_prealloc_get(size);
+		if (NULL != pmem) {
+			memset(pmem, 0, size);
+			return pmem;
+		}
+	}
+#endif
+
+	if (in_interrupt() || irqs_disabled() || in_atomic())
+		flags = GFP_ATOMIC;
+
+	new_size = size + sizeof(struct s_cdf_mem_struct) + 8;
+
+	memStruct = (struct s_cdf_mem_struct *)kzalloc(new_size, flags);
+
+	if (memStruct != NULL) {
+		CDF_STATUS cdf_status;
+
+		memStruct->fileName = fileName;
+		memStruct->lineNum = lineNum;
+		memStruct->size = size;
+
+		cdf_mem_copy(&memStruct->header[0],
+			     &WLAN_MEM_HEADER[0], sizeof(WLAN_MEM_HEADER));
+
+		cdf_mem_copy((uint8_t *) (memStruct + 1) + size,
+			     &WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL));
+
+		cdf_spin_lock_irqsave(&cdf_mem_list_lock);
+		cdf_status = cdf_list_insert_front(&cdf_mem_list,
+						   &memStruct->pNode);
+		cdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
+		if (CDF_STATUS_SUCCESS != cdf_status) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: Unable to insert node into List cdf_status %d",
+				  __func__, cdf_status);
+		}
+
+		memPtr = (void *)(memStruct + 1);
+	}
+	return memPtr;
+}
+
+/**
+ *  cdf_mem_free() - debug version of CDF memory free API
+ *  @ptr: Pointer to the starting address of the memory to be free'd.
+ *
+ *  This function will free the memory pointed to by 'ptr'. It also checks
+ *  is memory is corrupted or getting double freed and panic.
+ *
+ *  Return:
+ *       Nothing
+ */
+void cdf_mem_free(void *ptr)
+{
+	if (ptr != NULL) {
+		CDF_STATUS cdf_status;
+		struct s_cdf_mem_struct *memStruct =
+			((struct s_cdf_mem_struct *)ptr) - 1;
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+		if (wcnss_prealloc_put(ptr))
+			return;
+#endif
+
+		cdf_spin_lock_irqsave(&cdf_mem_list_lock);
+		cdf_status =
+			cdf_list_remove_node(&cdf_mem_list, &memStruct->pNode);
+		cdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
+
+		if (CDF_STATUS_SUCCESS == cdf_status) {
+			if (0 == cdf_mem_compare(memStruct->header,
+						 &WLAN_MEM_HEADER[0],
+						 sizeof(WLAN_MEM_HEADER))) {
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_FATAL,
+					  "Memory Header is corrupted. MemInfo: Filename %s, LineNum %d",
+					  memStruct->fileName,
+					  (int)memStruct->lineNum);
+				CDF_BUG(0);
+			}
+			if (0 ==
+			    cdf_mem_compare((uint8_t *) ptr + memStruct->size,
+					    &WLAN_MEM_TAIL[0],
+					    sizeof(WLAN_MEM_TAIL))) {
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_FATAL,
+					  "Memory Trailer is corrupted. MemInfo: Filename %s, LineNum %d",
+					  memStruct->fileName,
+					  (int)memStruct->lineNum);
+				CDF_BUG(0);
+			}
+			kfree((void *)memStruct);
+		} else {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+				  "%s: Unallocated memory (double free?)",
+				  __func__);
+			CDF_BUG(0);
+		}
+	}
+}
+#else
+/**
+ * cdf_mem_malloc() - allocation CDF memory
+ * @size: Number of bytes of memory to allocate.
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory.
+ *
+ *
+ * Return:
+ *	Upon successful allocate, returns a non-NULL pointer to the allocated
+ *	memory.  If this function is unable to allocate the amount of memory
+ *	specified (for any reason) it returns %NULL.
+ *
+ */
+void *cdf_mem_malloc(size_t size)
+{
+	int flags = GFP_KERNEL;
+#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
+	void *pmem;
+#endif
+	if (size > (1024 * 1024)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: called with arg > 1024K; passed in %zu !!",
+			  __func__, size);
+		return NULL;
+	}
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+	if (size > WCNSS_PRE_ALLOC_GET_THRESHOLD) {
+		pmem = wcnss_prealloc_get(size);
+		if (NULL != pmem) {
+			memset(pmem, 0, size);
+			return pmem;
+		}
+	}
+#endif
+
+	if (in_interrupt() || irqs_disabled() || in_atomic())
+		flags = GFP_ATOMIC;
+
+	return kzalloc(size, flags);
+}
+
+/**
+ * cdf_mem_free() - free CDF memory
+ * @ptr: Pointer to the starting address of the memory to be free'd.
+ *
+ * This function will free the memory pointed to by 'ptr'.
+ *
+ * Return:
+ *	Nothing
+ *
+ */
+void cdf_mem_free(void *ptr)
+{
+	if (ptr == NULL)
+		return;
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+	if (wcnss_prealloc_put(ptr))
+		return;
+#endif
+
+	kfree(ptr);
+}
+#endif
+
+/**
+ * cdf_mem_set() - set (fill) memory with a specified byte value.
+ * @pMemory:    Pointer to memory that will be set
+ * @numBytes:   Number of bytes to be set
+ * @value:      Byte set in memory
+ *
+ * Return:
+ *    Nothing
+ *
+ */
+void cdf_mem_set(void *ptr, uint32_t numBytes, uint32_t value)
+{
+	if (ptr == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter ptr", __func__);
+		return;
+	}
+	memset(ptr, value, numBytes);
+}
+
+/**
+ * cdf_mem_zero() - zero out memory
+ * @pMemory:    pointer to memory that will be set to zero
+ * @numBytes:   number of bytes zero
+ * @value:      byte set in memory
+ *
+ *  This function sets the memory location to all zeros, essentially clearing
+ *  the memory.
+ *
+ * Return:
+ *      Nothing
+ *
+ */
+void cdf_mem_zero(void *ptr, uint32_t numBytes)
+{
+	if (0 == numBytes) {
+		/* special case where ptr can be NULL */
+		return;
+	}
+
+	if (ptr == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter ptr", __func__);
+		return;
+	}
+	memset(ptr, 0, numBytes);
+}
+
+/**
+ * cdf_mem_copy() - copy memory
+ * @pDst:       Pointer to destination memory location (to copy to)
+ * @pSrc:       Pointer to source memory location (to copy from)
+ * @numBytes:   Number of bytes to copy.
+ *
+ * Copy host memory from one location to another, similar to memcpy in
+ * standard C.  Note this function does not specifically handle overlapping
+ * source and destination memory locations.  Calling this function with
+ * overlapping source and destination memory locations will result in
+ * unpredictable results.  Use cdf_mem_move() if the memory locations
+ * for the source and destination are overlapping (or could be overlapping!)
+ *
+ * Return:
+ *    Nothing
+ *
+ */
+void cdf_mem_copy(void *pDst, const void *pSrc, uint32_t numBytes)
+{
+	if (0 == numBytes) {
+		/* special case where pDst or pSrc can be NULL */
+		return;
+	}
+
+	if ((pDst == NULL) || (pSrc == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter, source:%p destination:%p",
+			  __func__, pSrc, pDst);
+		CDF_ASSERT(0);
+		return;
+	}
+	memcpy(pDst, pSrc, numBytes);
+}
+
+/**
+ * cdf_mem_move() - move memory
+ * @pDst:       pointer to destination memory location (to move to)
+ * @pSrc:       pointer to source memory location (to move from)
+ * @numBytes:   number of bytes to move.
+ *
+ * Move host memory from one location to another, similar to memmove in
+ * standard C.  Note this function *does* handle overlapping
+ * source and destination memory locations.
+
+ * Return:
+ *      Nothing
+ */
+void cdf_mem_move(void *pDst, const void *pSrc, uint32_t numBytes)
+{
+	if (0 == numBytes) {
+		/* special case where pDst or pSrc can be NULL */
+		return;
+	}
+
+	if ((pDst == NULL) || (pSrc == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter, source:%p destination:%p",
+			  __func__, pSrc, pDst);
+		CDF_ASSERT(0);
+		return;
+	}
+	memmove(pDst, pSrc, numBytes);
+}
+
+/**
+ * cdf_mem_compare() - memory compare
+ * @pMemory1:   pointer to one location in memory to compare.
+ * @pMemory2:   pointer to second location in memory to compare.
+ * @numBytes:   the number of bytes to compare.
+ *
+ * Function to compare two pieces of memory, similar to memcmp function
+ * in standard C.
+ *
+ * Return:
+ *      bool - returns a bool value that tells if the memory locations
+ *      are equal or not equal.
+ *
+ */
+bool cdf_mem_compare(const void *pMemory1, const void *pMemory2,
+		     uint32_t numBytes)
+{
+	if (0 == numBytes) {
+		/* special case where pMemory1 or pMemory2 can be NULL */
+		return true;
+	}
+
+	if ((pMemory1 == NULL) || (pMemory2 == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter, p1:%p p2:%p",
+			  __func__, pMemory1, pMemory2);
+		CDF_ASSERT(0);
+		return false;
+	}
+	return memcmp(pMemory1, pMemory2, numBytes) ? false : true;
+}
+
+/**
+ * cdf_mem_compare2() - memory compare
+ * @pMemory1: pointer to one location in memory to compare.
+ * @pMemory2:   pointer to second location in memory to compare.
+ * @numBytes:   the number of bytes to compare.
+ *
+ * Function to compare two pieces of memory, similar to memcmp function
+ * in standard C.
+ * Return:
+ *       int32_t - returns a bool value that tells if the memory
+ *       locations are equal or not equal.
+ *       0 -- equal
+ *       < 0 -- *pMemory1 is less than *pMemory2
+ *       > 0 -- *pMemory1 is bigger than *pMemory2
+ */
+int32_t cdf_mem_compare2(const void *pMemory1, const void *pMemory2,
+			 uint32_t numBytes)
+{
+	return (int32_t) memcmp(pMemory1, pMemory2, numBytes);
+}
+
+/**
+ * cdf_os_mem_alloc_consistent() - allocates consistent cdf memory
+ * @osdev: OS device handle
+ * @size: Size to be allocated
+ * @paddr: Physical address
+ * @mctx: Pointer to DMA context
+ *
+ * Return: pointer of allocated memory or null if memory alloc fails
+ */
+inline void *cdf_os_mem_alloc_consistent(cdf_device_t osdev, cdf_size_t size,
+					 cdf_dma_addr_t *paddr,
+					 cdf_dma_context_t memctx)
+{
+#if defined(A_SIMOS_DEVHOST)
+	static int first = 1;
+	void *vaddr;
+
+	if (first) {
+		first = 0;
+		pr_err("Warning: bypassing %s\n", __func__);
+	}
+	vaddr = cdf_mem_malloc(size);
+	*paddr = ((cdf_dma_addr_t) vaddr);
+	return vaddr;
+#else
+	int flags = GFP_KERNEL;
+	void *alloc_mem = NULL;
+
+	if (in_interrupt() || irqs_disabled() || in_atomic())
+		flags = GFP_ATOMIC;
+
+	alloc_mem = dma_alloc_coherent(osdev->dev, size, paddr, flags);
+	if (alloc_mem == NULL)
+		pr_err("%s Warning: unable to alloc consistent memory of size %zu!\n",
+			__func__, size);
+	return alloc_mem;
+#endif
+}
+
+/**
+ * cdf_os_mem_free_consistent() - free consistent cdf memory
+ * @osdev: OS device handle
+ * @size: Size to be allocated
+ * @paddr: Physical address
+ * @mctx: Pointer to DMA context
+ *
+ * Return: none
+ */
+inline void
+cdf_os_mem_free_consistent(cdf_device_t osdev,
+			   cdf_size_t size,
+			   void *vaddr,
+			   cdf_dma_addr_t paddr, cdf_dma_context_t memctx)
+{
+#if defined(A_SIMOS_DEVHOST)
+	static int first = 1;
+
+	if (first) {
+		first = 0;
+		pr_err("Warning: bypassing %s\n", __func__);
+	}
+	cdf_mem_free(vaddr);
+	return;
+#else
+	dma_free_coherent(osdev->dev, size, vaddr, paddr);
+#endif
+}
+
+
+/**
+ * cdf_os_mem_dma_sync_single_for_device() - assign memory to device
+ * @osdev: OS device handle
+ * @bus_addr: dma address to give to the device
+ * @size: Size of the memory block
+ * @direction: direction data will be dma'ed
+ *
+ * Assgin memory to the remote device.
+ * The cache lines are flushed to ram or invalidated as needed.
+ *
+ * Return: none
+ */
+
+inline void
+cdf_os_mem_dma_sync_single_for_device(cdf_device_t osdev,
+				      cdf_dma_addr_t bus_addr,
+				      cdf_size_t size,
+				      enum dma_data_direction direction)
+{
+	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
+}

+ 1017 - 0
core/cdf/src/cdf_nbuf.c

@@ -0,0 +1,1017 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cdf_nbuf.c
+ *
+ * Connectivity driver framework(CDF) network buffer management APIs
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <cdf_types.h>
+#include <cdf_nbuf.h>
+#include <cdf_memory.h>
+#include <cdf_trace.h>
+#include <cdf_status.h>
+#include <cdf_lock.h>
+
+#if defined(FEATURE_TSO)
+#include <net/ipv6.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#endif /* FEATURE_TSO */
+
+/* Packet Counter */
+static uint32_t nbuf_tx_mgmt[NBUF_TX_PKT_STATE_MAX];
+static uint32_t nbuf_tx_data[NBUF_TX_PKT_STATE_MAX];
+
+/**
+ * cdf_nbuf_tx_desc_count_display() - Displays the packet counter
+ *
+ * Return: none
+ */
+void cdf_nbuf_tx_desc_count_display(void)
+{
+	cdf_print("Current Snapshot of the Driver:\n");
+	cdf_print("Data Packets:\n");
+	cdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
+		nbuf_tx_data[NBUF_TX_PKT_HDD] -
+		(nbuf_tx_data[NBUF_TX_PKT_TXRX] +
+		nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] -
+		nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE]),
+		nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] -
+		nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE],
+		nbuf_tx_data[NBUF_TX_PKT_TXRX] - nbuf_tx_data[NBUF_TX_PKT_HTT],
+		nbuf_tx_data[NBUF_TX_PKT_HTT]  - nbuf_tx_data[NBUF_TX_PKT_HTC]);
+	cdf_print(" HTC %d  HIF %d CE %d TX_COMP %d\n",
+		nbuf_tx_data[NBUF_TX_PKT_HTC]  - nbuf_tx_data[NBUF_TX_PKT_HIF],
+		nbuf_tx_data[NBUF_TX_PKT_HIF]  - nbuf_tx_data[NBUF_TX_PKT_CE],
+		nbuf_tx_data[NBUF_TX_PKT_CE]   - nbuf_tx_data[NBUF_TX_PKT_FREE],
+		nbuf_tx_data[NBUF_TX_PKT_FREE]);
+	cdf_print("Mgmt Packets:\n");
+	cdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
+		nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_ENQUEUE] -
+		nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_DEQUEUE],
+		nbuf_tx_mgmt[NBUF_TX_PKT_TXRX] - nbuf_tx_mgmt[NBUF_TX_PKT_HTT],
+		nbuf_tx_mgmt[NBUF_TX_PKT_HTT]  - nbuf_tx_mgmt[NBUF_TX_PKT_HTC],
+		nbuf_tx_mgmt[NBUF_TX_PKT_HTC]  - nbuf_tx_mgmt[NBUF_TX_PKT_HIF],
+		nbuf_tx_mgmt[NBUF_TX_PKT_HIF]  - nbuf_tx_mgmt[NBUF_TX_PKT_CE],
+		nbuf_tx_mgmt[NBUF_TX_PKT_CE]   - nbuf_tx_mgmt[NBUF_TX_PKT_FREE],
+		nbuf_tx_mgmt[NBUF_TX_PKT_FREE]);
+}
+
+/**
+ * cdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
+ * @packet_type   : packet type either mgmt/data
+ * @current_state : layer at which the packet currently present
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_tx_desc_count_update(uint8_t packet_type,
+							uint8_t current_state)
+{
+	switch (packet_type) {
+	case NBUF_TX_PKT_MGMT_TRACK:
+		nbuf_tx_mgmt[current_state]++;
+		break;
+	case NBUF_TX_PKT_DATA_TRACK:
+		nbuf_tx_data[current_state]++;
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * cdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
+ *
+ * Return: none
+ */
+void cdf_nbuf_tx_desc_count_clear(void)
+{
+	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
+	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
+}
+
+/**
+ * cdf_nbuf_set_state() - Updates the packet state
+ * @nbuf:            network buffer
+ * @current_state :  layer at which the packet currently is
+ *
+ * This function updates the packet state to the layer at which the packet
+ * currently is
+ *
+ * Return: none
+ */
+void cdf_nbuf_set_state(cdf_nbuf_t nbuf, uint8_t current_state)
+{
+	/*
+	 * Only Mgmt, Data Packets are tracked. WMI messages
+	 * such as scan commands are not tracked
+	 */
+	uint8_t packet_type;
+	packet_type = NBUF_GET_PACKET_TRACK(nbuf);
+
+	if ((packet_type != NBUF_TX_PKT_DATA_TRACK) &&
+		(packet_type != NBUF_TX_PKT_MGMT_TRACK)) {
+		return;
+	}
+	NBUF_SET_PACKET_STATE(nbuf, current_state);
+	cdf_nbuf_tx_desc_count_update(packet_type,
+					current_state);
+}
+
+cdf_nbuf_trace_update_t trace_update_cb = NULL;
+
+/**
+ * __cdf_nbuf_alloc() - Allocate nbuf
+ * @hdl: Device handle
+ * @size: Netbuf requested size
+ * @reserve: Reserve
+ * @align: Align
+ * @prio: Priority
+ *
+ * This allocates an nbuf aligns if needed and reserves some space in the front,
+ * since the reserve is done after alignment the reserve value if being
+ * unaligned will result in an unaligned address.
+ *
+ * Return: nbuf or %NULL if no memory
+ */
+struct sk_buff *__cdf_nbuf_alloc(cdf_device_t osdev, size_t size, int reserve,
+				 int align, int prio)
+{
+	struct sk_buff *skb;
+	unsigned long offset;
+
+	if (align)
+		size += (align - 1);
+
+	skb = dev_alloc_skb(size);
+
+	if (!skb) {
+		pr_err("ERROR:NBUF alloc failed\n");
+		return NULL;
+	}
+	memset(skb->cb, 0x0, sizeof(skb->cb));
+
+	/*
+	 * The default is for netbuf fragments to be interpreted
+	 * as wordstreams rather than bytestreams.
+	 * Set the CVG_NBUF_MAX_EXTRA_FRAGS+1 wordstream_flags bits,
+	 * to provide this default.
+	 */
+	NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) =
+		(1 << (CVG_NBUF_MAX_EXTRA_FRAGS + 1)) - 1;
+
+	/*
+	 * XXX:how about we reserve first then align
+	 * Align & make sure that the tail & data are adjusted properly
+	 */
+
+	if (align) {
+		offset = ((unsigned long)skb->data) % align;
+		if (offset)
+			skb_reserve(skb, align - offset);
+	}
+
+	/*
+	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
+	 * pointer
+	 */
+	skb_reserve(skb, reserve);
+
+	return skb;
+}
+
+/**
+ * __cdf_nbuf_free() - free the nbuf its interrupt safe
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+void __cdf_nbuf_free(struct sk_buff *skb)
+{
+	if ((NBUF_OWNER_ID(skb) == IPA_NBUF_OWNER_ID) && NBUF_CALLBACK_FN(skb))
+		NBUF_CALLBACK_FN_EXEC(skb);
+	else
+		dev_kfree_skb_any(skb);
+}
+
+/**
+ * __cdf_nbuf_map() - get the dma map of the nbuf
+ * @osdev: OS device
+ * @bmap: Bitmap
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS
+__cdf_nbuf_map(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir)
+{
+#ifdef CDF_OS_DEBUG
+	struct skb_shared_info *sh = skb_shinfo(skb);
+#endif
+	cdf_assert((dir == CDF_DMA_TO_DEVICE)
+		   || (dir == CDF_DMA_FROM_DEVICE));
+
+	/*
+	 * Assume there's only a single fragment.
+	 * To support multiple fragments, it would be necessary to change
+	 * cdf_nbuf_t to be a separate object that stores meta-info
+	 * (including the bus address for each fragment) and a pointer
+	 * to the underlying sk_buff.
+	 */
+	cdf_assert(sh->nr_frags == 0);
+
+	return __cdf_nbuf_map_single(osdev, skb, dir);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * __cdf_nbuf_unmap() - to unmap a previously mapped buf
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: none
+ */
+void
+__cdf_nbuf_unmap(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir)
+{
+	cdf_assert((dir == CDF_DMA_TO_DEVICE)
+		   || (dir == CDF_DMA_FROM_DEVICE));
+
+	cdf_assert(((dir == CDF_DMA_TO_DEVICE)
+		    || (dir == CDF_DMA_FROM_DEVICE)));
+	/*
+	 * Assume there's a single fragment.
+	 * If this is not true, the assertion in __cdf_nbuf_map will catch it.
+	 */
+	__cdf_nbuf_unmap_single(osdev, skb, dir);
+}
+
+/**
+ * __cdf_nbuf_map_single() - dma map of the nbuf
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS
+__cdf_nbuf_map_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
+{
+	uint32_t paddr_lo;
+
+/* tempory hack for simulation */
+#ifdef A_SIMOS_DEVHOST
+	NBUF_MAPPED_PADDR_LO(buf) = paddr_lo = (uint32_t) buf->data;
+	return CDF_STATUS_SUCCESS;
+#else
+	/* assume that the OS only provides a single fragment */
+	NBUF_MAPPED_PADDR_LO(buf) = paddr_lo =
+					dma_map_single(osdev->dev, buf->data,
+					skb_end_pointer(buf) - buf->data, dir);
+	return dma_mapping_error(osdev->dev, paddr_lo) ?
+	       CDF_STATUS_E_FAILURE : CDF_STATUS_SUCCESS;
+#endif /* #ifdef A_SIMOS_DEVHOST */
+}
+
+/**
+ * __cdf_nbuf_unmap_single() - dma unmap nbuf
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: none
+ */
+void
+__cdf_nbuf_unmap_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
+{
+#if !defined(A_SIMOS_DEVHOST)
+	dma_unmap_single(osdev->dev, NBUF_MAPPED_PADDR_LO(buf),
+			 skb_end_pointer(buf) - buf->data, dir);
+#endif /* #if !defined(A_SIMOS_DEVHOST) */
+}
+
+/**
+ * __cdf_nbuf_set_rx_cksum() - set rx checksum
+ * @skb: Pointer to network buffer
+ * @cksum: Pointer to checksum value
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS
+__cdf_nbuf_set_rx_cksum(struct sk_buff *skb, cdf_nbuf_rx_cksum_t *cksum)
+{
+	switch (cksum->l4_result) {
+	case CDF_NBUF_RX_CKSUM_NONE:
+		skb->ip_summed = CHECKSUM_NONE;
+		break;
+	case CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		break;
+	case CDF_NBUF_RX_CKSUM_TCP_UDP_HW:
+		skb->ip_summed = CHECKSUM_PARTIAL;
+		skb->csum = cksum->val;
+		break;
+	default:
+		pr_err("ADF_NET:Unknown checksum type\n");
+		cdf_assert(0);
+		return CDF_STATUS_E_NOSUPPORT;
+	}
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * __cdf_nbuf_get_tx_cksum() - get tx checksum
+ * @skb: Pointer to network buffer
+ *
+ * Return: TX checksum value
+ */
+cdf_nbuf_tx_cksum_t __cdf_nbuf_get_tx_cksum(struct sk_buff *skb)
+{
+	switch (skb->ip_summed) {
+	case CHECKSUM_NONE:
+		return CDF_NBUF_TX_CKSUM_NONE;
+	case CHECKSUM_PARTIAL:
+		/* XXX ADF and Linux checksum don't map with 1-to-1. This is
+		 * not 100% correct */
+		return CDF_NBUF_TX_CKSUM_TCP_UDP;
+	case CHECKSUM_COMPLETE:
+		return CDF_NBUF_TX_CKSUM_TCP_UDP_IP;
+	default:
+		return CDF_NBUF_TX_CKSUM_NONE;
+	}
+}
+
+/**
+ * __cdf_nbuf_get_tid() - get tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: tid
+ */
+uint8_t __cdf_nbuf_get_tid(struct sk_buff *skb)
+{
+	return skb->priority;
+}
+
+/**
+ * __cdf_nbuf_set_tid() - set tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+void __cdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
+{
+	skb->priority = tid;
+}
+
+/**
+ * __cdf_nbuf_set_tid() - set tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+uint8_t __cdf_nbuf_get_exemption_type(struct sk_buff *skb)
+{
+	return CDF_NBUF_EXEMPT_NO_EXEMPTION;
+}
+
+/**
+ * __cdf_nbuf_reg_trace_cb() - register trace callback
+ * @cb_func_ptr: Pointer to trace callback function
+ *
+ * Return: none
+ */
+void __cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr)
+{
+	trace_update_cb = cb_func_ptr;
+	return;
+}
+
+#ifdef QCA_PKT_PROTO_TRACE
+/**
+ * __cdf_nbuf_trace_update() - update trace event
+ * @skb: Pointer to network buffer
+ * @event_string: Pointer to trace callback function
+ *
+ * Return: none
+ */
+void __cdf_nbuf_trace_update(struct sk_buff *buf, char *event_string)
+{
+	char string_buf[NBUF_PKT_TRAC_MAX_STRING];
+
+	if ((!trace_update_cb) || (!event_string))
+		return;
+
+	if (!cdf_nbuf_trace_get_proto_type(buf))
+		return;
+
+	/* Buffer over flow */
+	if (NBUF_PKT_TRAC_MAX_STRING <=
+	    (cdf_str_len(event_string) + NBUF_PKT_TRAC_PROTO_STRING)) {
+		return;
+	}
+
+	cdf_mem_zero(string_buf, NBUF_PKT_TRAC_MAX_STRING);
+	cdf_mem_copy(string_buf, event_string, cdf_str_len(event_string));
+	if (NBUF_PKT_TRAC_TYPE_EAPOL & cdf_nbuf_trace_get_proto_type(buf)) {
+		cdf_mem_copy(string_buf + cdf_str_len(event_string),
+			     "EPL", NBUF_PKT_TRAC_PROTO_STRING);
+	} else if (NBUF_PKT_TRAC_TYPE_DHCP & cdf_nbuf_trace_get_proto_type(buf)) {
+		cdf_mem_copy(string_buf + cdf_str_len(event_string),
+			     "DHC", NBUF_PKT_TRAC_PROTO_STRING);
+	} else if (NBUF_PKT_TRAC_TYPE_MGMT_ACTION &
+		   cdf_nbuf_trace_get_proto_type(buf)) {
+		cdf_mem_copy(string_buf + cdf_str_len(event_string),
+			     "MACT", NBUF_PKT_TRAC_PROTO_STRING);
+	}
+
+	trace_update_cb(string_buf);
+	return;
+}
+#endif /* QCA_PKT_PROTO_TRACE */
+
+#ifdef MEMORY_DEBUG
+#define CDF_NET_BUF_TRACK_MAX_SIZE    (1024)
+
+/**
+ * struct cdf_nbuf_track_t - Network buffer track structure
+ *
+ * @p_next: Pointer to next
+ * @net_buf: Pointer to network buffer
+ * @file_name: File name
+ * @line_num: Line number
+ * @size: Size
+ */
+struct cdf_nbuf_track_t {
+	struct cdf_nbuf_track_t *p_next;
+	cdf_nbuf_t net_buf;
+	uint8_t *file_name;
+	uint32_t line_num;
+	size_t size;
+};
+
+spinlock_t g_cdf_net_buf_track_lock;
+typedef struct cdf_nbuf_track_t CDF_NBUF_TRACK;
+
+CDF_NBUF_TRACK *gp_cdf_net_buf_track_tbl[CDF_NET_BUF_TRACK_MAX_SIZE];
+
+/**
+ * cdf_net_buf_debug_init() - initialize network buffer debug functionality
+ *
+ * CDF network buffer debug feature tracks all SKBs allocated by WLAN driver
+ * in a hash table and when driver is unloaded it reports about leaked SKBs.
+ * WLAN driver module whose allocated SKB is freed by network stack are
+ * suppose to call cdf_net_buf_debug_release_skb() such that the SKB is not
+ * reported as memory leak.
+ *
+ * Return: none
+ */
+void cdf_net_buf_debug_init(void)
+{
+	uint32_t i;
+	unsigned long irq_flag;
+
+	spin_lock_init(&g_cdf_net_buf_track_lock);
+
+	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
+
+	for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++)
+		gp_cdf_net_buf_track_tbl[i] = NULL;
+
+	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+
+/**
+ * cdf_net_buf_debug_init() - exit network buffer debug functionality
+ *
+ * Exit network buffer tracking debug functionality and log SKB memory leaks
+ *
+ * Return: none
+ */
+void cdf_net_buf_debug_exit(void)
+{
+	uint32_t i;
+	unsigned long irq_flag;
+	CDF_NBUF_TRACK *p_node;
+	CDF_NBUF_TRACK *p_prev;
+
+	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
+
+	for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) {
+		p_node = gp_cdf_net_buf_track_tbl[i];
+		while (p_node) {
+			p_prev = p_node;
+			p_node = p_node->p_next;
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+				  "SKB buf memory Leak@ File %s, @Line %d, size %zu\n",
+				  p_prev->file_name, p_prev->line_num,
+				  p_prev->size);
+		}
+	}
+
+	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+
+/**
+ * cdf_net_buf_debug_clean() - clean up network buffer debug functionality
+ *
+ * Return: none
+ */
+void cdf_net_buf_debug_clean(void)
+{
+	uint32_t i;
+	unsigned long irq_flag;
+	CDF_NBUF_TRACK *p_node;
+	CDF_NBUF_TRACK *p_prev;
+
+	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
+
+	for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) {
+		p_node = gp_cdf_net_buf_track_tbl[i];
+		while (p_node) {
+			p_prev = p_node;
+			p_node = p_node->p_next;
+			cdf_mem_free(p_prev);
+		}
+	}
+
+	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+
+/**
+ * cdf_net_buf_debug_hash() - hash network buffer pointer
+ *
+ * Return: hash value
+ */
+uint32_t cdf_net_buf_debug_hash(cdf_nbuf_t net_buf)
+{
+	uint32_t i;
+
+	i = (uint32_t) ((uintptr_t) net_buf & (CDF_NET_BUF_TRACK_MAX_SIZE - 1));
+
+	return i;
+}
+
+/**
+ * cdf_net_buf_debug_look_up() - look up network buffer in debug hash table
+ *
+ * Return: If skb is found in hash table then return pointer to network buffer
+ *	else return %NULL
+ */
+CDF_NBUF_TRACK *cdf_net_buf_debug_look_up(cdf_nbuf_t net_buf)
+{
+	uint32_t i;
+	CDF_NBUF_TRACK *p_node;
+
+	i = cdf_net_buf_debug_hash(net_buf);
+	p_node = gp_cdf_net_buf_track_tbl[i];
+
+	while (p_node) {
+		if (p_node->net_buf == net_buf)
+			return p_node;
+		p_node = p_node->p_next;
+	}
+
+	return NULL;
+
+}
+
+/**
+ * cdf_net_buf_debug_add_node() - store skb in debug hash table
+ *
+ * Return: none
+ */
+void cdf_net_buf_debug_add_node(cdf_nbuf_t net_buf, size_t size,
+				uint8_t *file_name, uint32_t line_num)
+{
+	uint32_t i;
+	unsigned long irq_flag;
+	CDF_NBUF_TRACK *p_node;
+
+	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
+
+	i = cdf_net_buf_debug_hash(net_buf);
+	p_node = cdf_net_buf_debug_look_up(net_buf);
+
+	if (p_node) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "Double allocation of skb ! Already allocated from %s %d",
+			  p_node->file_name, p_node->line_num);
+		CDF_ASSERT(0);
+		goto done;
+	} else {
+		p_node = (CDF_NBUF_TRACK *) cdf_mem_malloc(sizeof(*p_node));
+		if (p_node) {
+			p_node->net_buf = net_buf;
+			p_node->file_name = file_name;
+			p_node->line_num = line_num;
+			p_node->size = size;
+			p_node->p_next = gp_cdf_net_buf_track_tbl[i];
+			gp_cdf_net_buf_track_tbl[i] = p_node;
+		} else {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
+				  file_name, line_num, size);
+			CDF_ASSERT(0);
+		}
+	}
+
+done:
+	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+
+/**
+ * cdf_net_buf_debug_delete_node() - remove skb from debug hash table
+ *
+ * Return: none
+ */
+void cdf_net_buf_debug_delete_node(cdf_nbuf_t net_buf)
+{
+	uint32_t i;
+	bool found = false;
+	CDF_NBUF_TRACK *p_head;
+	CDF_NBUF_TRACK *p_node;
+	unsigned long irq_flag;
+	CDF_NBUF_TRACK *p_prev;
+
+	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
+
+	i = cdf_net_buf_debug_hash(net_buf);
+	p_head = gp_cdf_net_buf_track_tbl[i];
+
+	/* Unallocated SKB */
+	if (!p_head)
+		goto done;
+
+	p_node = p_head;
+	/* Found at head of the table */
+	if (p_head->net_buf == net_buf) {
+		gp_cdf_net_buf_track_tbl[i] = p_node->p_next;
+		cdf_mem_free((void *)p_node);
+		found = true;
+		goto done;
+	}
+
+	/* Search in collision list */
+	while (p_node) {
+		p_prev = p_node;
+		p_node = p_node->p_next;
+		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
+			p_prev->p_next = p_node->p_next;
+			cdf_mem_free((void *)p_node);
+			found = true;
+			break;
+		}
+	}
+
+done:
+	if (!found) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "Unallocated buffer ! Double free of net_buf %p ?",
+			  net_buf);
+		CDF_ASSERT(0);
+	}
+
+	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+
+/**
+ * cdf_net_buf_debug_release_skb() - release skb to avoid memory leak
+ *
+ * WLAN driver module whose allocated SKB is freed by network stack are
+ * suppose to call this API before returning SKB to network stack such
+ * that the SKB is not reported as memory leak.
+ *
+ * Return: none
+ */
+void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf)
+{
+	cdf_net_buf_debug_delete_node(net_buf);
+}
+
+#endif /*MEMORY_DEBUG */
+#if defined(FEATURE_TSO)
+
+struct cdf_tso_cmn_seg_info_t {
+	uint16_t ethproto;
+	uint16_t ip_tcp_hdr_len;
+	uint16_t l2_len;
+	unsigned char *eit_hdr;
+	unsigned int eit_hdr_len;
+	struct tcphdr *tcphdr;
+	uint16_t ipv4_csum_en;
+	uint16_t tcp_ipv4_csum_en;
+	uint16_t tcp_ipv6_csum_en;
+	uint16_t ip_id;
+	uint32_t tcp_seq_num;
+};
+
+/**
+ * __cdf_nbuf_get_tso_cmn_seg_info() - get TSO common
+ * information
+ *
+ * Get the TSO information that is common across all the TCP
+ * segments of the jumbo packet
+ *
+ * Return: 0 - success 1 - failure
+ */
+uint8_t __cdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
+	struct cdf_tso_cmn_seg_info_t *tso_info)
+{
+	/* Get ethernet type and ethernet header length */
+	tso_info->ethproto = vlan_get_protocol(skb);
+
+	/* Determine whether this is an IPv4 or IPv6 packet */
+	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
+		/* for IPv4, get the IP ID and enable TCP and IP csum */
+		struct iphdr *ipv4_hdr = ip_hdr(skb);
+		tso_info->ip_id = ntohs(ipv4_hdr->id);
+		tso_info->ipv4_csum_en = 1;
+		tso_info->tcp_ipv4_csum_en = 1;
+		if (cdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
+			cdf_print("TSO IPV4 proto 0x%x not TCP\n",
+				 ipv4_hdr->protocol);
+			return 1;
+		}
+	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
+		/* for IPv6, enable TCP csum. No IP ID or IP csum */
+		tso_info->tcp_ipv6_csum_en = 1;
+	} else {
+		cdf_print("TSO: ethertype 0x%x is not supported!\n",
+			 tso_info->ethproto);
+		return 1;
+	}
+
+	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
+	tso_info->tcphdr = tcp_hdr(skb);
+	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
+	/* get pointer to the ethernet + IP + TCP header and their length */
+	tso_info->eit_hdr = skb->data;
+	tso_info->eit_hdr_len = (skb_transport_header(skb)
+		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
+	tso_info->ip_tcp_hdr_len = tso_info->eit_hdr_len - tso_info->l2_len;
+	return 0;
+}
+
+/**
+ * __cdf_nbuf_get_tso_info() - function to divide a TSO nbuf
+ * into segments
+ * @nbuf:   network buffer to be segmented
+ * @tso_info:  This is the output. The information about the
+ *      TSO segments will be populated within this.
+ *
+ * This function fragments a TCP jumbo packet into smaller
+ * segments to be transmitted by the driver. It chains the TSO
+ * segments created into a list.
+ *
+ * Return: number of TSO segments
+ */
+uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
+		struct cdf_tso_info_t *tso_info)
+{
+	/* common accross all segments */
+	struct cdf_tso_cmn_seg_info_t tso_cmn_info;
+
+	/* segment specific */
+	char *tso_frag_vaddr;
+	uint32_t tso_frag_paddr_32 = 0;
+	uint32_t num_seg = 0;
+	struct cdf_tso_seg_elem_t *curr_seg;
+	const struct skb_frag_struct *frag = NULL;
+	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
+	uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
+	uint32_t foffset = 0; /* offset into the skb's fragment */
+	uint32_t skb_proc = 0; /* bytes of the skb that have been processed*/
+	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
+
+	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
+
+	if (cdf_unlikely(__cdf_nbuf_get_tso_cmn_seg_info(skb, &tso_cmn_info))) {
+		cdf_print("TSO: error getting common segment info\n");
+		return 0;
+	}
+	curr_seg = tso_info->tso_seg_list;
+
+	/* length of the first chunk of data in the skb */
+	skb_proc = skb_frag_len = skb->len - skb->data_len;
+
+	/* the 0th tso segment's 0th fragment always contains the EIT header */
+	/* update the remaining skb fragment length and TSO segment length */
+	skb_frag_len -= tso_cmn_info.eit_hdr_len;
+	skb_proc -= tso_cmn_info.eit_hdr_len;
+
+	/* get the address to the next tso fragment */
+	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
+	/* get the length of the next tso fragment */
+	tso_frag_len = min(skb_frag_len, tso_seg_size);
+	tso_frag_paddr_32 = dma_map_single(osdev->dev,
+		 tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
+
+	num_seg = tso_info->num_segs;
+	tso_info->num_segs = 0;
+	tso_info->is_tso = 1;
+
+	while (num_seg && curr_seg) {
+		int i = 1; /* tso fragment index */
+		int j = 0; /* skb fragment index */
+		uint8_t more_tso_frags = 1;
+		uint8_t from_frag_table = 0;
+
+		/* Initialize the flags to 0 */
+		memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
+		tso_info->num_segs++;
+
+		/* The following fields remain the same across all segments of
+		 a jumbo packet */
+		curr_seg->seg.tso_flags.tso_enable = 1;
+		curr_seg->seg.tso_flags.partial_checksum_en = 0;
+		curr_seg->seg.tso_flags.ipv4_checksum_en =
+			tso_cmn_info.ipv4_csum_en;
+		curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
+			tso_cmn_info.tcp_ipv6_csum_en;
+		curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
+			tso_cmn_info.tcp_ipv4_csum_en;
+		curr_seg->seg.tso_flags.l2_len = 0;
+		curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
+		curr_seg->seg.num_frags = 0;
+
+		/* The following fields change for the segments */
+		curr_seg->seg.tso_flags.ip_id = tso_cmn_info.ip_id;
+		tso_cmn_info.ip_id++;
+
+		curr_seg->seg.tso_flags.syn = tso_cmn_info.tcphdr->syn;
+		curr_seg->seg.tso_flags.rst = tso_cmn_info.tcphdr->rst;
+		curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
+		curr_seg->seg.tso_flags.ack = tso_cmn_info.tcphdr->ack;
+		curr_seg->seg.tso_flags.urg = tso_cmn_info.tcphdr->urg;
+		curr_seg->seg.tso_flags.ece = tso_cmn_info.tcphdr->ece;
+		curr_seg->seg.tso_flags.cwr = tso_cmn_info.tcphdr->cwr;
+
+		curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info.tcp_seq_num;
+
+		/* First fragment for each segment always contains the ethernet,
+		IP and TCP header */
+		curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info.eit_hdr;
+		curr_seg->seg.tso_frags[0].length = tso_cmn_info.eit_hdr_len;
+		tso_info->total_len = curr_seg->seg.tso_frags[0].length;
+		curr_seg->seg.tso_frags[0].paddr_low_32 =
+			 dma_map_single(osdev->dev, tso_cmn_info.eit_hdr,
+				tso_cmn_info.eit_hdr_len, DMA_TO_DEVICE);
+		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
+		curr_seg->seg.num_frags++;
+
+		while (more_tso_frags) {
+			curr_seg->seg.tso_frags[i].vaddr = tso_frag_vaddr;
+			curr_seg->seg.tso_frags[i].length = tso_frag_len;
+			tso_info->total_len +=
+				 curr_seg->seg.tso_frags[i].length;
+			curr_seg->seg.tso_flags.ip_len +=
+				 curr_seg->seg.tso_frags[i].length;
+			curr_seg->seg.num_frags++;
+			skb_proc = skb_proc - curr_seg->seg.tso_frags[i].length;
+
+			/* increment the TCP sequence number */
+			tso_cmn_info.tcp_seq_num += tso_frag_len;
+			curr_seg->seg.tso_frags[i].paddr_upper_16 = 0;
+			curr_seg->seg.tso_frags[i].paddr_low_32 =
+				 tso_frag_paddr_32;
+
+			/* if there is no more data left in the skb */
+			if (!skb_proc)
+				return tso_info->num_segs;
+
+			/* get the next payload fragment information */
+			/* check if there are more fragments in this segment */
+			if ((tso_seg_size - tso_frag_len)) {
+				more_tso_frags = 1;
+				i++;
+			} else {
+				more_tso_frags = 0;
+				/* reset i and the tso payload size */
+				i = 1;
+				tso_seg_size = skb_shinfo(skb)->gso_size;
+			}
+
+			/* if the next fragment is contiguous */
+			if (tso_frag_len < skb_frag_len) {
+				skb_frag_len = skb_frag_len - tso_frag_len;
+				tso_frag_len = min(skb_frag_len, tso_seg_size);
+				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
+				if (from_frag_table) {
+					tso_frag_paddr_32 =
+						 skb_frag_dma_map(osdev->dev,
+							 frag, foffset,
+							 tso_frag_len,
+							 DMA_TO_DEVICE);
+				} else {
+					tso_frag_paddr_32 =
+						 dma_map_single(osdev->dev,
+							 tso_frag_vaddr,
+							 tso_frag_len,
+							 DMA_TO_DEVICE);
+				}
+			} else { /* the next fragment is not contiguous */
+				tso_frag_len = min(skb_frag_len, tso_seg_size);
+				frag = &skb_shinfo(skb)->frags[j];
+				skb_frag_len = skb_frag_size(frag);
+
+				tso_frag_vaddr = skb_frag_address(frag);
+				tso_frag_paddr_32 = skb_frag_dma_map(osdev->dev,
+					 frag, 0, tso_frag_len,
+					 DMA_TO_DEVICE);
+				foffset += tso_frag_len;
+				from_frag_table = 1;
+				j++;
+			}
+		}
+		num_seg--;
+		/* if TCP FIN flag was set, set it in the last segment */
+		if (!num_seg)
+			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
+
+		curr_seg = curr_seg->next;
+	}
+	return tso_info->num_segs;
+}
+
+/**
+ * __cdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
+ * into segments
+ * @nbuf:   network buffer to be segmented
+ * @tso_info:  This is the output. The information about the
+ *      TSO segments will be populated within this.
+ *
+ * This function fragments a TCP jumbo packet into smaller
+ * segments to be transmitted by the driver. It chains the TSO
+ * segments created into a list.
+ *
+ * Return: 0 - success, 1 - failure
+ */
+uint32_t __cdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
+{
+	uint32_t gso_size, tmp_len, num_segs = 0;
+
+	gso_size = skb_shinfo(skb)->gso_size;
+	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
+		+ tcp_hdrlen(skb));
+	while (tmp_len) {
+		num_segs++;
+		if (tmp_len > gso_size)
+			tmp_len -= gso_size;
+		else
+			break;
+	}
+	return num_segs;
+}
+
+struct sk_buff *__cdf_nbuf_inc_users(struct sk_buff *skb)
+{
+	atomic_inc(&skb->users);
+	return skb;
+}
+
+#endif /* FEATURE_TSO */

+ 107 - 0
core/cdf/src/cdf_threads.c

@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC:  cdf_threads
+ *
+ * Connectivity driver framework (CDF) thread APIs
+ *
+ */
+
+/* Include Files */
+#include <cdf_threads.h>
+#include <cdf_trace.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+/* Preprocessor definitions and constants */
+
+/* Type declarations */
+
+/* Function declarations and documenation */
+
+/**
+ *  cdf_sleep() - sleep
+ *  @msInterval : Number of milliseconds to suspend the current thread.
+ *  A value of 0 may or may not cause the current thread to yield.
+ *
+ *  This function suspends the execution of the current thread
+ *  until the specified time out interval elapses.
+ *
+ *  Return: nothing
+ */
+void cdf_sleep(uint32_t msInterval)
+{
+	if (in_interrupt()) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		return;
+	}
+	msleep_interruptible(msInterval);
+}
+
+/**
+ *  cdf_sleep_us() - sleep
+ *  @usInterval : Number of microseconds to suspend the current thread.
+ *  A value of 0 may or may not cause the current thread to yield.
+ *
+ *  This function suspends the execution of the current thread
+ *  until the specified time out interval elapses.
+ *
+ *  Return : nothing
+ */
+void cdf_sleep_us(uint32_t usInterval)
+{
+	unsigned long timeout = usecs_to_jiffies(usInterval) + 1;
+	if (in_interrupt()) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		return;
+	}
+
+	while (timeout && !signal_pending(current))
+		timeout = schedule_timeout_interruptible(timeout);
+}
+
+/**
+ *  cdf_busy_wait() - busy wait
+ *  @usInterval : Number of microseconds to busy wait.
+ *
+ *  This function places the current thread in busy wait until the specified
+ *  time out interval elapses. If the interval is greater than 50us on WM, the
+ *  behaviour is undefined.
+ *
+ *  Return : nothing
+ */
+void cdf_busy_wait(uint32_t usInterval)
+{
+	udelay(usInterval);
+}

+ 1018 - 0
core/cdf/src/cdf_trace.c

@@ -0,0 +1,1018 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ *  DOC:  cdf_trace
+ *
+ *  Connectivity driver framework (CDF) trace APIs
+ *
+ *  Trace, logging, and debugging definitions and APIs
+ *
+ */
+
+/* Include Files */
+#include <cdf_trace.h>
+#include <ani_global.h>
+#include <wlan_logging_sock_svc.h>
+#include "cdf_time.h"
+/* Preprocessor definitions and constants */
+
+#define CDF_TRACE_BUFFER_SIZE (512)
+
+/* macro to map cdf trace levels into the bitmask */
+#define CDF_TRACE_LEVEL_TO_MODULE_BITMASK(_level) ((1 << (_level)))
+
+typedef struct {
+	/* Trace level for a module, as a bitmask.  The bits in this mask
+	 * are ordered by CDF_TRACE_LEVEL.  For example, each bit represents
+	 * one of the bits in CDF_TRACE_LEVEL that may be turned on to have
+	 * traces at that level logged, i.e. if CDF_TRACE_LEVEL_ERROR is
+	 * == 2, then if bit 2 (low order) is turned ON, then ERROR traces
+	 * will be printed to the trace log.
+	 * Note that all bits turned OFF means no traces
+	 */
+	uint16_t moduleTraceLevel;
+
+	/* 3 character string name for the module */
+	unsigned char moduleNameStr[4]; /* 3 chars plus the NULL */
+} moduleTraceInfo;
+
+#define CDF_DEFAULT_TRACE_LEVEL	\
+	((1 << CDF_TRACE_LEVEL_FATAL) | (1 << CDF_TRACE_LEVEL_ERROR))
+
+/* Array of static data that contains all of the per module trace
+ * information.  This includes the trace level for the module and
+ * the 3 character 'name' of the module for marking the trace logs
+ */
+moduleTraceInfo g_cdf_trace_info[CDF_MODULE_ID_MAX] = {
+	[CDF_MODULE_ID_TLSHIM] = {CDF_DEFAULT_TRACE_LEVEL, "DP"},
+	[CDF_MODULE_ID_WMI] = {CDF_DEFAULT_TRACE_LEVEL, "WMI"},
+	[CDF_MODULE_ID_HDD] = {CDF_DEFAULT_TRACE_LEVEL, "HDD"},
+	[CDF_MODULE_ID_SME] = {CDF_DEFAULT_TRACE_LEVEL, "SME"},
+	[CDF_MODULE_ID_PE] = {CDF_DEFAULT_TRACE_LEVEL, "PE "},
+	[CDF_MODULE_ID_WMA] = {CDF_DEFAULT_TRACE_LEVEL, "WMA"},
+	[CDF_MODULE_ID_SYS] = {CDF_DEFAULT_TRACE_LEVEL, "SYS"},
+	[CDF_MODULE_ID_CDF] = {CDF_DEFAULT_TRACE_LEVEL, "CDF"},
+	[CDF_MODULE_ID_SAP] = {CDF_DEFAULT_TRACE_LEVEL, "SAP"},
+	[CDF_MODULE_ID_HDD_SOFTAP] = {CDF_DEFAULT_TRACE_LEVEL, "HSP"},
+	[CDF_MODULE_ID_HDD_DATA] = {CDF_DEFAULT_TRACE_LEVEL, "HDP"},
+	[CDF_MODULE_ID_HDD_SAP_DATA] = {CDF_DEFAULT_TRACE_LEVEL, "SDP"},
+	[CDF_MODULE_ID_BMI] = {CDF_DEFAULT_TRACE_LEVEL, "BMI"},
+	[CDF_MODULE_ID_HIF] = {CDF_DEFAULT_TRACE_LEVEL, "HIF"},
+	[CDF_MODULE_ID_TXRX] = {CDF_DEFAULT_TRACE_LEVEL, "TRX"},
+	[CDF_MODULE_ID_HTT] = {CDF_DEFAULT_TRACE_LEVEL, "HTT"},
+};
+
+/* Static and Global variables */
+static spinlock_t ltrace_lock;
+
+static cdf_trace_record_t g_cdf_trace_tbl[MAX_CDF_TRACE_RECORDS];
+/* global cdf trace data */
+static t_cdf_trace_data g_cdf_trace_data;
+/*
+ * all the call back functions for dumping MTRACE messages from ring buffer
+ * are stored in cdf_trace_cb_table,these callbacks are initialized during init
+ * only so, we will make a copy of these call back functions and maintain in to
+ * cdf_trace_restore_cb_table. Incase if we make modifications to
+ * cdf_trace_cb_table, we can certainly retrieve all the call back functions
+ * back from Restore Table
+ */
+static tp_cdf_trace_cb cdf_trace_cb_table[CDF_MODULE_ID_MAX];
+static tp_cdf_trace_cb cdf_trace_restore_cb_table[CDF_MODULE_ID_MAX];
+
+/* Static and Global variables */
+static spinlock_t l_dp_trace_lock;
+
+static struct cdf_dp_trace_record_s
+			g_cdf_dp_trace_tbl[MAX_CDF_DP_TRACE_RECORDS];
+
+/*
+ * all the options to configure/control DP trace are
+ * defined in this structure
+ */
+static struct s_cdf_dp_trace_data g_cdf_dp_trace_data;
+/*
+ * all the call back functions for dumping DPTRACE messages from ring buffer
+ * are stored in cdf_dp_trace_cb_table, callbacks are initialized during init
+ */
+static tp_cdf_dp_trace_cb cdf_dp_trace_cb_table[CDF_DP_TRACE_MAX];
+
+/**
+ * cdf_trace_set_level() - Set the trace level for a particular module
+ * @level : trace level
+ *
+ * Trace level is a member of the CDF_TRACE_LEVEL enumeration indicating
+ * the severity of the condition causing the trace message to be issued.
+ * More severe conditions are more likely to be logged.
+ *
+ * This is an external API that allows trace levels to be set for each module.
+ *
+ * Return:  nothing
+ */
+void cdf_trace_set_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level)
+{
+	/* make sure the caller is passing in a valid LEVEL */
+	if (level >= CDF_TRACE_LEVEL_MAX) {
+		pr_err("%s: Invalid trace level %d passed in!\n", __func__,
+		       level);
+		return;
+	}
+
+	/* Treat 'none' differently.  NONE means we have to run off all
+	 * the bits in the bit mask so none of the traces appear. Anything
+	 * other than 'none' means we need to turn ON a bit in the bitmask
+	 */
+	if (CDF_TRACE_LEVEL_NONE == level)
+		g_cdf_trace_info[module].moduleTraceLevel =
+			CDF_TRACE_LEVEL_NONE;
+	else
+		/* set the desired bit in the bit mask for the module trace
+		 * level */
+		g_cdf_trace_info[module].moduleTraceLevel |=
+			CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level);
+}
+
+/**
+ * cdf_trace_set_module_trace_level() - Set module trace level
+ * @module: Module id
+ * @level: Trace level for a module, as a bitmask as per 'moduleTraceInfo'
+ *
+ * Sets the module trace level where the trace level is given as a bit mask
+ *
+ * Return: None
+ */
+void cdf_trace_set_module_trace_level(CDF_MODULE_ID module, uint32_t level)
+{
+	if (module < 0 || module >= CDF_MODULE_ID_MAX) {
+		pr_err("%s: Invalid module id %d passed\n", __func__, module);
+		return;
+	}
+	g_cdf_trace_info[module].moduleTraceLevel = level;
+}
+
+void cdf_trace_set_value(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
+			 uint8_t on)
+{
+	/* make sure the caller is passing in a valid LEVEL */
+	if (level < 0 || level >= CDF_TRACE_LEVEL_MAX) {
+		pr_err("%s: Invalid trace level %d passed in!\n", __func__,
+		       level);
+		return;
+	}
+
+	/* make sure the caller is passing in a valid module */
+	if (module < 0 || module >= CDF_MODULE_ID_MAX) {
+		pr_err("%s: Invalid module id %d passed in!\n", __func__,
+		       module);
+		return;
+	}
+
+	/* Treat 'none' differently.  NONE means we have to turn off all
+	   the bits in the bit mask so none of the traces appear */
+	if (CDF_TRACE_LEVEL_NONE == level) {
+		g_cdf_trace_info[module].moduleTraceLevel =
+			CDF_TRACE_LEVEL_NONE;
+	}
+	/* Treat 'All' differently.  All means we have to turn on all
+	   the bits in the bit mask so all of the traces appear */
+	else if (CDF_TRACE_LEVEL_ALL == level) {
+		g_cdf_trace_info[module].moduleTraceLevel = 0xFFFF;
+	} else {
+		if (on)
+			/* set the desired bit in the bit mask for the module
+			   trace level */
+			g_cdf_trace_info[module].moduleTraceLevel |=
+				CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level);
+		else
+			/* clear the desired bit in the bit mask for the module
+			   trace level */
+			g_cdf_trace_info[module].moduleTraceLevel &=
+				~(CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level));
+	}
+}
+
+/**
+ * cdf_trace_get_level() - get the trace level
+ * @level : trace level
+ *
+ * This is an external API that returns a bool value to signify if a
+ * particular trace level is set for the specified module.
+ * A member of the CDF_TRACE_LEVEL enumeration indicating the severity
+ * of the condition causing the trace message to be issued.
+ *
+ * Note that individual trace levels are the only valid values
+ * for this API.  CDF_TRACE_LEVEL_NONE and CDF_TRACE_LEVEL_ALL
+ * are not valid input and will return false
+ *
+ * Return:
+ *      false - the specified trace level for the specified module is OFF
+ *      true - the specified trace level for the specified module is ON
+ */
+bool cdf_trace_get_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level)
+{
+	bool traceOn = false;
+
+	if ((CDF_TRACE_LEVEL_NONE == level) ||
+	    (CDF_TRACE_LEVEL_ALL == level) || (level >= CDF_TRACE_LEVEL_MAX)) {
+		traceOn = false;
+	} else {
+		traceOn = (level & g_cdf_trace_info[module].moduleTraceLevel)
+			  ? true : false;
+	}
+
+	return traceOn;
+}
+
+void cdf_snprintf(char *strBuffer, unsigned int size, char *strFormat, ...)
+{
+	va_list val;
+
+	va_start(val, strFormat);
+	snprintf(strBuffer, size, strFormat, val);
+	va_end(val);
+}
+
+#ifdef CDF_ENABLE_TRACING
+
+/**
+ * cdf_trace_msg() - externally called trace function
+ * @module : Module identifier a member of the CDF_MODULE_ID
+ *	enumeration that identifies the module issuing the trace message.
+ * @level : Trace level a member of the CDF_TRACE_LEVEL enumeration
+ *	indicating the severity of the condition causing the trace message
+ *	to be issued.   More severe conditions are more likely to be logged.
+ * @strFormat : Format string  in which the message to be logged.  This format
+ *	string contains printf-like replacement parameters, which follow
+ *	this parameter in the variable argument list.
+ *
+ *  Checks the level of severity and accordingly prints the trace messages
+ *
+ *  Return:  nothing
+ *
+ */
+void cdf_trace_msg(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
+		   char *strFormat, ...)
+{
+	char strBuffer[CDF_TRACE_BUFFER_SIZE];
+	int n;
+
+	/* Print the trace message when the desired level bit is set in
+	   the module tracel level mask */
+	if (g_cdf_trace_info[module].moduleTraceLevel &
+	    CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)) {
+		/* the trace level strings in an array.  these are ordered in
+		 * the same order as the trace levels are defined in the enum
+		 * (see CDF_TRACE_LEVEL) so we can index into this array with
+		 * the level and get the right string. The cdf trace levels
+		 * are... none, Fatal, Error, Warning, Info, InfoHigh, InfoMed,
+		 * InfoLow, Debug
+		 */
+		static const char *TRACE_LEVEL_STR[] = { "  ", "F ", "E ", "W ",
+						"I ", "IH", "IM", "IL", "D" };
+		va_list val;
+		va_start(val, strFormat);
+
+		/* print the prefix string into the string buffer... */
+		n = snprintf(strBuffer, CDF_TRACE_BUFFER_SIZE,
+			     "wlan: [%d:%2s:%3s] ",
+			     in_interrupt() ? 0 : current->pid,
+			     (char *)TRACE_LEVEL_STR[level],
+			     (char *)g_cdf_trace_info[module].moduleNameStr);
+
+		/* print the formatted log message after the prefix string */
+		if ((n >= 0) && (n < CDF_TRACE_BUFFER_SIZE)) {
+			vsnprintf(strBuffer + n, CDF_TRACE_BUFFER_SIZE - n,
+				  strFormat, val);
+#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE)
+			wlan_log_to_user(level, (char *)strBuffer,
+					 strlen(strBuffer));
+#else
+			pr_err("%s\n", strBuffer);
+#endif
+		}
+		va_end(val);
+	}
+}
+
+void cdf_trace_display(void)
+{
+	CDF_MODULE_ID moduleId;
+
+	pr_err
+		("     1)FATAL  2)ERROR  3)WARN  4)INFO  5)INFO_H  6)INFO_M  7)INFO_L 8)DEBUG\n");
+	for (moduleId = 0; moduleId < CDF_MODULE_ID_MAX; ++moduleId) {
+		pr_err
+			("%2d)%s    %s        %s       %s       %s        %s         %s         %s        %s\n",
+			(int)moduleId, g_cdf_trace_info[moduleId].moduleNameStr,
+			(g_cdf_trace_info[moduleId].
+			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_FATAL)) ? "X" :
+			" ",
+			(g_cdf_trace_info[moduleId].
+			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_ERROR)) ? "X" :
+			" ",
+			(g_cdf_trace_info[moduleId].
+			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_WARN)) ? "X" :
+			" ",
+			(g_cdf_trace_info[moduleId].
+			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_INFO)) ? "X" :
+			" ",
+			(g_cdf_trace_info[moduleId].
+			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_INFO_HIGH)) ? "X"
+			: " ",
+			(g_cdf_trace_info[moduleId].
+			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_INFO_MED)) ? "X"
+			: " ",
+			(g_cdf_trace_info[moduleId].
+			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_INFO_LOW)) ? "X"
+			: " ",
+			(g_cdf_trace_info[moduleId].
+			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_DEBUG)) ? "X" :
+			" ");
+	}
+}
+
+/**
+ * cdf_trace_hex_dump() - externally called hex dump function
+ * @module : Module identifier a member of the CDF_MODULE_ID enumeration that
+ *	     identifies the module issuing the trace message.
+ * @level : Trace level a member of the CDF_TRACE_LEVEL enumeration indicating
+ *	    the severity of the condition causing the trace message to be
+ *	    issued. More severe conditions are more likely to be logged.
+ * @data : The base address of the buffer to be logged.
+ * @buf_len : The size of the buffer to be logged.
+ *
+ *  Checks the level of severity and accordingly prints the trace messages
+ *
+ *  Return :  nothing
+ */
+void cdf_trace_hex_dump(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
+			void *data, int buf_len)
+{
+	char *buf = (char *)data;
+	int i;
+
+	if (!(g_cdf_trace_info[module].moduleTraceLevel &
+	      CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)))
+		return;
+
+	for (i = 0; (i + 15) < buf_len; i += 16) {
+		cdf_trace_msg(module, level,
+			      "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
+			      buf[i],
+			      buf[i + 1],
+			      buf[i + 2],
+			      buf[i + 3],
+			      buf[i + 4],
+			      buf[i + 5],
+			      buf[i + 6],
+			      buf[i + 7],
+			      buf[i + 8],
+			      buf[i + 9],
+			      buf[i + 10],
+			      buf[i + 11],
+			      buf[i + 12],
+			      buf[i + 13], buf[i + 14], buf[i + 15]);
+	}
+
+	/* Dump the bytes in the last line */
+	for (; i < buf_len; i++)
+		cdf_trace_msg(module, level, "%02x ", buf[i]);
+}
+
+#endif
+
+/**
+ * cdf_trace_enable() - Enable MTRACE for specific modules
+ * @bitmask_of_moduleId : Bitmask according to enum of the modules.
+ *  32 [dec]  = 0010 0000 [bin] <enum of HDD is 5>
+ *  64 [dec]  = 0100 0000 [bin] <enum of SME is 6>
+ *  128 [dec] = 1000 0000 [bin] <enum of PE is 7>
+ * @enable : can be true or false true implies enabling MTRACE false implies
+ *		disabling MTRACE.
+ *
+ * Enable MTRACE for specific modules whose bits are set in bitmask and enable
+ * is true. if enable is false it disables MTRACE for that module. set the
+ * bitmask according to enum value of the modules.
+ * This functions will be called when you issue ioctl as mentioned following
+ * [iwpriv wlan0 setdumplog <value> <enable>].
+ * <value> - Decimal number, i.e. 64 decimal value shows only SME module,
+ * 128 decimal value shows only PE module, 192 decimal value shows PE and SME.
+ *
+ *
+ * Return : nothing
+ */
+void cdf_trace_enable(uint32_t bitmask_of_moduleId, uint8_t enable)
+{
+	int i;
+	if (bitmask_of_moduleId) {
+		for (i = 0; i < CDF_MODULE_ID_MAX; i++) {
+			if (((bitmask_of_moduleId >> i) & 1)) {
+				if (enable) {
+					if (NULL !=
+					    cdf_trace_restore_cb_table[i]) {
+						cdf_trace_cb_table[i] =
+						cdf_trace_restore_cb_table[i];
+					}
+				} else {
+					cdf_trace_restore_cb_table[i] =
+						cdf_trace_cb_table[i];
+					cdf_trace_cb_table[i] = NULL;
+				}
+			}
+		}
+	} else {
+		if (enable) {
+			for (i = 0; i < CDF_MODULE_ID_MAX; i++) {
+				if (NULL != cdf_trace_restore_cb_table[i]) {
+					cdf_trace_cb_table[i] =
+						cdf_trace_restore_cb_table[i];
+				}
+			}
+		} else {
+			for (i = 0; i < CDF_MODULE_ID_MAX; i++) {
+				cdf_trace_restore_cb_table[i] =
+					cdf_trace_cb_table[i];
+				cdf_trace_cb_table[i] = NULL;
+			}
+		}
+	}
+}
+
+/**
+ * cdf_trace_init() - initializes cdf trace structures and variables
+ *
+ * Called immediately after cds_preopen, so that we can start recording HDD
+ * events ASAP.
+ *
+ * Return : nothing
+ */
+void cdf_trace_init(void)
+{
+	uint8_t i;
+	g_cdf_trace_data.head = INVALID_CDF_TRACE_ADDR;
+	g_cdf_trace_data.tail = INVALID_CDF_TRACE_ADDR;
+	g_cdf_trace_data.num = 0;
+	g_cdf_trace_data.enable = true;
+	g_cdf_trace_data.dumpCount = DEFAULT_CDF_TRACE_DUMP_COUNT;
+	g_cdf_trace_data.numSinceLastDump = 0;
+
+	for (i = 0; i < CDF_MODULE_ID_MAX; i++) {
+		cdf_trace_cb_table[i] = NULL;
+		cdf_trace_restore_cb_table[i] = NULL;
+	}
+}
+
+/**
+ * cdf_trace() - puts the messages in to ring-buffer
+ * @module : Enum of module, basically module id.
+ * @param : Code to be recorded
+ * @session : Session ID of the log
+ * @data : Actual message contents
+ *
+ * This function will be called from each module who wants record the messages
+ * in circular queue. Before calling this functions make sure you have
+ * registered your module with cdf through cdf_trace_register function.
+ *
+ *
+ * Return : nothing
+ */
+void cdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data)
+{
+	tp_cdf_trace_record rec = NULL;
+	unsigned long flags;
+
+	if (!g_cdf_trace_data.enable)
+		return;
+
+	/* if module is not registered, don't record for that module */
+	if (NULL == cdf_trace_cb_table[module])
+		return;
+
+	/* Aquire the lock so that only one thread at a time can fill the ring
+	 * buffer
+	 */
+	spin_lock_irqsave(&ltrace_lock, flags);
+
+	g_cdf_trace_data.num++;
+
+	if (g_cdf_trace_data.num > MAX_CDF_TRACE_RECORDS)
+		g_cdf_trace_data.num = MAX_CDF_TRACE_RECORDS;
+
+	if (INVALID_CDF_TRACE_ADDR == g_cdf_trace_data.head) {
+		/* first record */
+		g_cdf_trace_data.head = 0;
+		g_cdf_trace_data.tail = 0;
+	} else {
+		/* queue is not empty */
+		uint32_t tail = g_cdf_trace_data.tail + 1;
+
+		if (MAX_CDF_TRACE_RECORDS == tail)
+			tail = 0;
+
+		if (g_cdf_trace_data.head == tail) {
+			/* full */
+			if (MAX_CDF_TRACE_RECORDS == ++g_cdf_trace_data.head)
+				g_cdf_trace_data.head = 0;
+		}
+		g_cdf_trace_data.tail = tail;
+	}
+
+	rec = &g_cdf_trace_tbl[g_cdf_trace_data.tail];
+	rec->code = code;
+	rec->session = session;
+	rec->data = data;
+	rec->time = cdf_get_log_timestamp();
+	rec->module = module;
+	rec->pid = (in_interrupt() ? 0 : current->pid);
+	g_cdf_trace_data.numSinceLastDump++;
+	spin_unlock_irqrestore(&ltrace_lock, flags);
+}
+
+/**
+ * cdf_trace_spin_lock_init() - initializes the lock variable before use
+ *
+ * This function will be called from cds_alloc_global_context, we will have lock
+ * available to use ASAP
+ *
+ * Return : nothing
+ */
+CDF_STATUS cdf_trace_spin_lock_init(void)
+{
+	spin_lock_init(&ltrace_lock);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdf_trace_register() - registers the call back functions
+ * @moduleID - enum value of module
+ * @cdf_trace_callback - call back functions to display the messages in
+ *  particular format.
+ *
+ * Registers the call back functions to display the messages in particular
+ * format mentioned in these call back functions. This functions should be
+ * called by interested module in their init part as we will be ready to
+ * register as soon as modules are up.
+ *
+ * Return : nothing
+ */
+void cdf_trace_register(CDF_MODULE_ID moduleID,
+			tp_cdf_trace_cb cdf_trace_callback)
+{
+	cdf_trace_cb_table[moduleID] = cdf_trace_callback;
+}
+
+/**
+ * cdf_trace_dump_all() - Dump data from ring buffer via call back functions
+ *			  registered with CDF
+ * @pMac : Context of particular module
+ * @code : Reason code
+ * @session : Session id of log
+ * @count : Number of lines to dump starting from tail to head
+ *
+ * This function will be called up on issueing ioctl call as mentioned following
+ * [iwpriv wlan0 dumplog 0 0 <n> <bitmask_of_module>]
+ *
+ *  <n> - number lines to dump starting from tail to head.
+ *
+ *  <bitmask_of_module> - if anybody wants to know how many messages were
+ *  recorded for particular module/s mentioned by setbit in bitmask from last
+ *  <n> messages. It is optional, if you don't provide then it will dump
+ *  everything from buffer.
+ *
+ * Return : nothing
+ */
+void cdf_trace_dump_all(void *pMac, uint8_t code, uint8_t session,
+			uint32_t count, uint32_t bitmask_of_module)
+{
+	cdf_trace_record_t pRecord;
+	int32_t i, tail;
+
+	if (!g_cdf_trace_data.enable) {
+		CDF_TRACE(CDF_MODULE_ID_SYS,
+			  CDF_TRACE_LEVEL_ERROR, "Tracing Disabled");
+		return;
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
+		  "Total Records: %d, Head: %d, Tail: %d",
+		  g_cdf_trace_data.num, g_cdf_trace_data.head,
+		  g_cdf_trace_data.tail);
+
+	/* aquire the lock so that only one thread at a time can read
+	 * the ring buffer
+	 */
+	spin_lock(&ltrace_lock);
+
+	if (g_cdf_trace_data.head != INVALID_CDF_TRACE_ADDR) {
+		i = g_cdf_trace_data.head;
+		tail = g_cdf_trace_data.tail;
+
+		if (count) {
+			if (count > g_cdf_trace_data.num)
+				count = g_cdf_trace_data.num;
+			if (tail >= (count - 1))
+				i = tail - count + 1;
+			else if (count != MAX_CDF_TRACE_RECORDS)
+				i = MAX_CDF_TRACE_RECORDS - ((count - 1) -
+							     tail);
+		}
+
+		pRecord = g_cdf_trace_tbl[i];
+		/* right now we are not using numSinceLastDump member but
+		 * in future we might re-visit and use this member to track
+		 * how many latest messages got added while we were dumping
+		 * from ring buffer
+		 */
+		g_cdf_trace_data.numSinceLastDump = 0;
+		spin_unlock(&ltrace_lock);
+		for (;; ) {
+			if ((code == 0 || (code == pRecord.code)) &&
+			    (cdf_trace_cb_table[pRecord.module] != NULL)) {
+				if (0 == bitmask_of_module) {
+					cdf_trace_cb_table[pRecord.
+							   module] (pMac,
+								    &pRecord,
+								    (uint16_t)
+								    i);
+				} else {
+					if (bitmask_of_module &
+					    (1 << pRecord.module)) {
+						cdf_trace_cb_table[pRecord.
+								   module]
+							(pMac, &pRecord,
+							(uint16_t) i);
+					}
+				}
+			}
+
+			if (i == tail)
+				break;
+			i += 1;
+
+			spin_lock(&ltrace_lock);
+			if (MAX_CDF_TRACE_RECORDS == i) {
+				i = 0;
+				pRecord = g_cdf_trace_tbl[0];
+			} else {
+				pRecord = g_cdf_trace_tbl[i];
+			}
+			spin_unlock(&ltrace_lock);
+		}
+	} else {
+		spin_unlock(&ltrace_lock);
+	}
+}
+
+/**
+ * cdf_dp_trace_init() - enables the DP trace
+ * Called during driver load and it enables DP trace
+ *
+ * Return: None
+ */
+void cdf_dp_trace_init(void)
+{
+	uint8_t i;
+
+	cdf_dp_trace_spin_lock_init();
+	g_cdf_dp_trace_data.head = INVALID_CDF_DP_TRACE_ADDR;
+	g_cdf_dp_trace_data.tail = INVALID_CDF_DP_TRACE_ADDR;
+	g_cdf_dp_trace_data.num = 0;
+	g_cdf_dp_trace_data.proto_bitmap = 0;
+	g_cdf_dp_trace_data.no_of_record = 0;
+	g_cdf_dp_trace_data.verbosity    = CDF_DP_TRACE_VERBOSITY_DEFAULT;
+	g_cdf_dp_trace_data.enable = true;
+
+	for (i = 0; i < CDF_DP_TRACE_MAX; i++)
+		cdf_dp_trace_cb_table[i] = cdf_dp_display_record;
+}
+
+/**
+ * cdf_dp_trace_set_value() - Configure the value to control DP trace
+ * @proto_bitmap  : defines the protocol to be tracked
+ * @no_of_records : defines the nth packet which is traced
+ * @verbosity     : defines the verbosity level
+ *
+ * Return: None
+ */
+void cdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_record,
+			 uint8_t verbosity)
+{
+	g_cdf_dp_trace_data.proto_bitmap = proto_bitmap;
+	g_cdf_dp_trace_data.no_of_record = no_of_record;
+	g_cdf_dp_trace_data.verbosity    = verbosity;
+	return;
+}
+
+/**
+ * cdf_dp_trace_enable_track() - enable the tracing for netbuf
+ * @code : defines the event
+ *
+ * Return: true or false depends on whether tracing enabled
+ */
+static bool cdf_dp_trace_enable_track(enum CDF_DP_TRACE_ID code)
+{
+	if (g_cdf_dp_trace_data.verbosity == CDF_DP_TRACE_VERBOSITY_HIGH)
+		return true;
+	if (g_cdf_dp_trace_data.verbosity == CDF_DP_TRACE_VERBOSITY_MEDIUM
+		&& (code <= CDF_DP_TRACE_HIF_PACKET_PTR_RECORD))
+		return true;
+	if (g_cdf_dp_trace_data.verbosity == CDF_DP_TRACE_VERBOSITY_LOW
+		&& (code <= CDF_DP_TRACE_CE_PACKET_RECORD))
+		return true;
+	if (g_cdf_dp_trace_data.verbosity == CDF_DP_TRACE_VERBOSITY_DEFAULT
+		&& (code == CDF_DP_TRACE_DROP_PACKET_RECORD))
+		return true;
+	return false;
+}
+
+/**
+ * cdf_dp_trace_set_track() - Marks whether the packet needs to be traced
+ * @nbuf  : defines the netbuf
+ *
+ * Return: None
+ */
+void cdf_dp_trace_set_track(cdf_nbuf_t nbuf)
+{
+	spin_lock_bh(&l_dp_trace_lock);
+	g_cdf_dp_trace_data.count++;
+	if (g_cdf_dp_trace_data.proto_bitmap != 0) {
+		if (cds_pkt_get_proto_type(nbuf,
+			g_cdf_dp_trace_data.proto_bitmap, 0)) {
+			CDF_NBUF_SET_DP_TRACE(nbuf, 1);
+		}
+	}
+	if ((g_cdf_dp_trace_data.no_of_record != 0) &&
+		(g_cdf_dp_trace_data.count %
+			g_cdf_dp_trace_data.no_of_record == 0)) {
+		CDF_NBUF_SET_DP_TRACE(nbuf, 1);
+	}
+	spin_unlock_bh(&l_dp_trace_lock);
+	return;
+}
+
+/**
+ * dump_hex_trace() - Display the data in buffer
+ * @buf:     buffer which contains data to be displayed
+ * @buf_len: defines the size of the data to be displayed
+ *
+ * Return: None
+ */
+static void dump_hex_trace(uint8_t *buf, uint8_t buf_len)
+{
+	uint8_t i = 0;
+	/* Dump the bytes in the last line */
+	cdf_print("DATA: ");
+	for (i = 0; i < buf_len; i++)
+		cdf_print("%02x ", buf[i]);
+	cdf_print("\n");
+}
+
+/**
+ * cdf_dp_display_trace() - Displays a record in DP trace
+ * @pRecord  : pointer to a record in DP trace
+ * @recIndex : record index
+ *
+ * Return: None
+ */
+void cdf_dp_display_record(struct cdf_dp_trace_record_s *pRecord ,
+				uint16_t recIndex)
+{
+	cdf_print("INDEX: %04d TIME: %012llu CODE: %02d\n", recIndex,
+						pRecord->time, pRecord->code);
+	switch (pRecord->code) {
+	case  CDF_DP_TRACE_HDD_TX_TIMEOUT:
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+						"HDD TX Timeout\n");
+		break;
+	case  CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+						"HDD SoftAP TX Timeout\n");
+		break;
+	case  CDF_DP_TRACE_VDEV_PAUSE:
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+						"VDEV Pause\n");
+		break;
+	case  CDF_DP_TRACE_VDEV_UNPAUSE:
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+						"VDEV UnPause\n");
+		break;
+	default:
+		dump_hex_trace(pRecord->data, pRecord->size);
+	}
+	return;
+}
+
+/**
+ * cdf_dp_trace() - Stores the data in buffer
+ * @nbuf  : defines the netbuf
+ * @code : defines the event
+ * @data : defines the data to be stored
+ * @size : defines the size of the data record
+ *
+ * Return: None
+ */
+void cdf_dp_trace(cdf_nbuf_t nbuf, enum CDF_DP_TRACE_ID code,
+			uint8_t *data, uint8_t size)
+{
+	struct cdf_dp_trace_record_s *rec = NULL;
+
+	/* Return when Dp trace is not enabled */
+	if (!g_cdf_dp_trace_data.enable)
+		return;
+
+	/* If nbuf is NULL, check for VDEV PAUSE, UNPAUSE, TIMEOUT */
+	if (!nbuf) {
+		switch (code) {
+		case CDF_DP_TRACE_HDD_TX_TIMEOUT:
+		case CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
+		case CDF_DP_TRACE_VDEV_PAUSE:
+		case CDF_DP_TRACE_VDEV_UNPAUSE:
+			if (cdf_dp_trace_enable_track(code))
+				goto  register_record;
+			else
+				return;
+
+		default:
+			return;
+		}
+	}
+
+	/* Return when the packet is not a data packet */
+	if (NBUF_GET_PACKET_TRACK(nbuf) != NBUF_TX_PKT_DATA_TRACK)
+		return;
+
+	/* Return when nbuf is not marked for dp tracing or
+	 * verbosity does not allow
+	 */
+	if (cdf_dp_trace_enable_track(code) == false ||
+			!CDF_NBUF_GET_DP_TRACE(nbuf))
+		return;
+
+	/* Acquire the lock so that only one thread at a time can fill the ring
+	 * buffer
+	 */
+
+register_record:
+
+	spin_lock_bh(&l_dp_trace_lock);
+
+	g_cdf_dp_trace_data.num++;
+
+	if (g_cdf_dp_trace_data.num > MAX_CDF_DP_TRACE_RECORDS)
+		g_cdf_dp_trace_data.num = MAX_CDF_DP_TRACE_RECORDS;
+
+	if (INVALID_CDF_DP_TRACE_ADDR == g_cdf_dp_trace_data.head) {
+		/* first record */
+		g_cdf_dp_trace_data.head = 0;
+		g_cdf_dp_trace_data.tail = 0;
+	} else {
+		/* queue is not empty */
+		g_cdf_dp_trace_data.tail++;
+
+		if (MAX_CDF_DP_TRACE_RECORDS == g_cdf_dp_trace_data.tail)
+			g_cdf_dp_trace_data.tail = 0;
+
+		if (g_cdf_dp_trace_data.head == g_cdf_dp_trace_data.tail) {
+			/* full */
+			if (MAX_CDF_DP_TRACE_RECORDS ==
+				++g_cdf_dp_trace_data.head)
+				g_cdf_dp_trace_data.head = 0;
+		}
+	}
+
+	rec = &g_cdf_dp_trace_tbl[g_cdf_dp_trace_data.tail];
+	rec->code = code;
+	rec->size = 0;
+	if (data != NULL && size > 0) {
+		if (size > CDF_DP_TRACE_RECORD_SIZE)
+			size = CDF_DP_TRACE_RECORD_SIZE;
+
+		rec->size = size;
+		switch (code) {
+		case CDF_DP_TRACE_HDD_PACKET_PTR_RECORD:
+		case CDF_DP_TRACE_CE_PACKET_PTR_RECORD:
+		case CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD:
+		case CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD:
+		case CDF_DP_TRACE_HTT_PACKET_PTR_RECORD:
+		case CDF_DP_TRACE_HTC_PACKET_PTR_RECORD:
+		case CDF_DP_TRACE_HIF_PACKET_PTR_RECORD:
+			cdf_mem_copy(rec->data, (uint8_t *)(&data), size);
+			break;
+
+		case CDF_DP_TRACE_DROP_PACKET_RECORD:
+		case CDF_DP_TRACE_HDD_PACKET_RECORD:
+		case CDF_DP_TRACE_CE_PACKET_RECORD:
+			cdf_mem_copy(rec->data, data, size);
+			break;
+		default:
+			break;
+		}
+	}
+	rec->time = cdf_get_log_timestamp();
+	rec->pid = (in_interrupt() ? 0 : current->pid);
+	spin_unlock_bh(&l_dp_trace_lock);
+}
+
+/**
+ * cdf_dp_trace_spin_lock_init() - initializes the lock variable before use
+ * This function will be called from cds_alloc_global_context, we will have lock
+ * available to use ASAP
+ *
+ * Return : nothing
+ */
+void cdf_dp_trace_spin_lock_init(void)
+{
+	spin_lock_init(&l_dp_trace_lock);
+
+	return;
+}
+
+/**
+ * cdf_dp_trace_dump_all() - Dump data from ring buffer via call back functions
+ *			  registered with CDF
+ * @code : Reason code
+ * @count : Number of lines to dump starting from tail to head
+ *
+ * Return : nothing
+ */
+void cdf_dp_trace_dump_all(uint32_t count)
+{
+	struct cdf_dp_trace_record_s pRecord;
+	int32_t i, tail;
+
+	if (!g_cdf_dp_trace_data.enable) {
+		CDF_TRACE(CDF_MODULE_ID_SYS,
+			  CDF_TRACE_LEVEL_ERROR, "Tracing Disabled");
+		return;
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
+		  "Total Records: %d, Head: %d, Tail: %d",
+		  g_cdf_dp_trace_data.num, g_cdf_dp_trace_data.head,
+		  g_cdf_dp_trace_data.tail);
+
+	/* aquire the lock so that only one thread at a time can read
+	 * the ring buffer
+	 */
+	spin_lock_bh(&l_dp_trace_lock);
+
+	if (g_cdf_dp_trace_data.head != INVALID_CDF_DP_TRACE_ADDR) {
+		i = g_cdf_dp_trace_data.head;
+		tail = g_cdf_dp_trace_data.tail;
+
+		if (count) {
+			if (count > g_cdf_dp_trace_data.num)
+				count = g_cdf_dp_trace_data.num;
+			if (tail >= (count - 1))
+				i = tail - count + 1;
+			else if (count != MAX_CDF_DP_TRACE_RECORDS)
+				i = MAX_CDF_DP_TRACE_RECORDS - ((count - 1) -
+							     tail);
+		}
+
+		pRecord = g_cdf_dp_trace_tbl[i];
+		spin_unlock_bh(&l_dp_trace_lock);
+		for (;; ) {
+
+			cdf_dp_trace_cb_table[pRecord.
+					   code] (&pRecord, (uint16_t)i);
+			if (i == tail)
+				break;
+			i += 1;
+
+			spin_lock_bh(&l_dp_trace_lock);
+			if (MAX_CDF_DP_TRACE_RECORDS == i)
+				i = 0;
+
+			pRecord = g_cdf_dp_trace_tbl[i];
+			spin_unlock_bh(&l_dp_trace_lock);
+		}
+	} else {
+		spin_unlock_bh(&l_dp_trace_lock);
+	}
+}

+ 78 - 0
core/cdf/src/i_cdf_atomic.h

@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+#ifndef I_CDF_ATOMIC_H
+#define I_CDF_ATOMIC_H
+
+#include <cdf_status.h>         /* CDF_STATUS */
+
+#include <linux/atomic.h>
+
+typedef atomic_t __cdf_atomic_t;
+
+static inline CDF_STATUS __cdf_atomic_init(__cdf_atomic_t *v)
+{
+	atomic_set(v, 0);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+static inline uint32_t __cdf_atomic_read(__cdf_atomic_t *v)
+{
+	return atomic_read(v);
+}
+
+static inline void __cdf_atomic_inc(__cdf_atomic_t *v)
+{
+	atomic_inc(v);
+}
+
+static inline void __cdf_atomic_dec(__cdf_atomic_t *v)
+{
+	atomic_dec(v);
+}
+
+static inline void __cdf_atomic_add(int i, __cdf_atomic_t *v)
+{
+	atomic_add(i, v);
+}
+
+static inline uint32_t __cdf_atomic_dec_and_test(__cdf_atomic_t *v)
+{
+	return atomic_dec_and_test(v);
+}
+
+static inline void __cdf_atomic_set(__cdf_atomic_t *v, int i)
+{
+	atomic_set(v, i);
+}
+
+static inline uint32_t __cdf_atomic_inc_return(__cdf_atomic_t *v)
+{
+	return atomic_inc_return(v);
+}
+
+#endif

+ 106 - 0
core/cdf/src/i_cdf_defer.h

@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _I_CDF_DEFER_H
+#define _I_CDF_DEFER_H
+
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+#include <cdf_types.h>
+#include <cdf_status.h>
+#include <cdf_trace.h>
+
+typedef struct tasklet_struct __cdf_bh_t;
+
+typedef void (*__cdf_bh_fn_t)(unsigned long arg);
+
+/* wrapper around the real task func */
+typedef struct {
+	struct work_struct work;
+	cdf_defer_fn_t fn;
+	void *arg;
+} __cdf_work_t;
+
+extern void __cdf_defer_func(struct work_struct *work);
+
+static inline CDF_STATUS
+__cdf_init_work(cdf_handle_t hdl,
+		__cdf_work_t *work, cdf_defer_fn_t func, void *arg)
+{
+	/*Initilize func and argument in work struct */
+	work->fn = func;
+	work->arg = arg;
+#ifdef CONFIG_CNSS
+	cnss_init_work(&work->work, __cdf_defer_func);
+#else
+	INIT_WORK(&work->work, __cdf_defer_func);
+#endif
+	return CDF_STATUS_SUCCESS;
+}
+
+static inline CDF_STATUS __cdf_sched_work(cdf_handle_t hdl, __cdf_work_t *work)
+{
+	schedule_work(&work->work);
+	return CDF_STATUS_SUCCESS;
+}
+
+static inline CDF_STATUS
+__cdf_disable_work(cdf_handle_t hdl, __cdf_work_t *work)
+{
+	return CDF_STATUS_SUCCESS;
+}
+
+static inline CDF_STATUS __cdf_init_bh(cdf_handle_t hdl,
+				       struct tasklet_struct *bh,
+				       cdf_defer_fn_t func, void *arg)
+{
+	tasklet_init(bh, (__cdf_bh_fn_t) func, (unsigned long)arg);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+static inline CDF_STATUS
+__cdf_sched_bh(cdf_handle_t hdl, struct tasklet_struct *bh)
+{
+	tasklet_schedule(bh);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+static inline CDF_STATUS
+__cdf_disable_bh(cdf_handle_t hdl, struct tasklet_struct *bh)
+{
+	tasklet_kill(bh);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+#endif /*_I_CDF_DEFER_H*/

+ 62 - 0
core/cdf/src/i_cdf_event.h

@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__I_CDF_EVENT_H)
+#define __I_CDF_EVENT_H
+
+/**
+ * DOC: i_cdf_event.h
+ *
+ * Linux-specific definitions for CDF Events
+ */
+
+/* Include Files */
+#include <cdf_types.h>
+#include <linux/completion.h>
+
+/* Preprocessor definitions and constants */
+#define LINUX_EVENT_COOKIE 0x12341234
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+#define INIT_COMPLETION(event) reinit_completion(&event)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* Type declarations */
+
+typedef struct evt {
+	struct completion complete;
+	uint32_t cookie;
+} cdf_event_t;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __I_CDF_EVENT_H */

+ 255 - 0
core/cdf/src/i_cdf_lock.h

@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__I_CDF_LOCK_H)
+#define __I_CDF_LOCK_H
+
+/**
+ * DOC: i_cdf_lock.h
+ *
+ * Linux-specific definitions for CDF Locks
+ *
+ */
+
+/* Include Files */
+#include <cdf_types.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif
+
+/* Preprocessor definitions and constants */
+
+/* define for flag */
+#define ADF_OS_LINUX_UNLOCK_BH  1
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * typedef struct - cdf_mutex_t
+ * @m_lock: Mutex lock
+ * @cookie: Lock cookie
+ * @processID: Process ID to track lock
+ * @state: Lock status
+ * @refcount: Reference count for recursive lock
+ */
+typedef struct cdf_lock_s {
+	struct mutex m_lock;
+	uint32_t cookie;
+	int processID;
+	uint32_t state;
+	uint8_t refcount;
+} cdf_mutex_t;
+
+/**
+ * typedef struct - cdf_spinlock_t
+ * @spinlock: Spin lock
+ * @flags: Lock flag
+ * @_flags: Internal lock flag
+ */
+typedef struct __cdf_spinlock {
+	spinlock_t spinlock;
+	unsigned int flags;
+	unsigned long _flags;
+} cdf_spinlock_t;
+
+typedef cdf_spinlock_t __cdf_spinlock_t;
+typedef struct semaphore __cdf_semaphore_t;
+
+#if defined CONFIG_CNSS
+typedef struct wakeup_source cdf_wake_lock_t;
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+typedef struct wake_lock cdf_wake_lock_t;
+#else
+typedef int cdf_wake_lock_t;
+#endif
+
+/* Function declarations and documenation */
+
+/**
+ * __cdf_semaphore_init() - initialize the semaphore
+ * @m: Semaphore object
+ *
+ * Return: CDF_STATUS_SUCCESS
+ */
+static inline CDF_STATUS __cdf_semaphore_init(struct semaphore *m)
+{
+	sema_init(m, 1);
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * __cdf_semaphore_acquire() - acquire semaphore
+ * @m: Semaphore object
+ *
+ * Return: 0
+ */
+static inline int
+__cdf_semaphore_acquire(cdf_device_t osdev, struct semaphore *m)
+{
+	down(m);
+	return 0;
+}
+
+/**
+ * __cdf_semaphore_release() - release semaphore
+ * @m: Semaphore object
+ *
+ * Return: result of UP operation in integer
+ */
+static inline void
+__cdf_semaphore_release(cdf_device_t osdev, struct semaphore *m)
+{
+	up(m);
+}
+
+/**
+ * __cdf_spinlock_init() - initialize spin lock
+ * @lock: Spin lock object
+ *
+ * Return: CDF_STATUS_SUCCESS
+ */
+static inline CDF_STATUS __cdf_spinlock_init(__cdf_spinlock_t *lock)
+{
+	spin_lock_init(&lock->spinlock);
+	lock->flags = 0;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+#define __cdf_spinlock_destroy(lock)
+/**
+ * __cdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void
+__cdf_spin_lock(__cdf_spinlock_t *lock)
+{
+	spin_lock(&lock->spinlock);
+}
+
+/**
+ * __cdf_spin_unlock() - Unlock the spinlock and enables the Preemption
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void
+__cdf_spin_unlock(__cdf_spinlock_t *lock)
+{
+	spin_unlock(&lock->spinlock);
+}
+
+/**
+ * __cdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
+ *				(Preemptive) and disable IRQs
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void
+__cdf_spin_lock_irqsave(__cdf_spinlock_t *lock)
+{
+	spin_lock_irqsave(&lock->spinlock, lock->_flags);
+}
+/**
+ * __cdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
+ *					Preemption and enable IRQ
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void
+__cdf_spin_unlock_irqrestore(__cdf_spinlock_t *lock)
+{
+	spin_unlock_irqrestore(&lock->spinlock, lock->_flags);
+}
+
+/*
+ * Synchronous versions - only for OS' that have interrupt disable
+ */
+#define __cdf_spin_lock_irq(_pLock, _flags)    spin_lock_irqsave(_pLock, _flags)
+#define __cdf_spin_unlock_irq(_pLock, _flags)  spin_unlock_irqrestore(_pLock, _flags)
+
+/**
+ * __cdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void
+__cdf_spin_lock_bh(__cdf_spinlock_t *lock)
+{
+	if (likely(irqs_disabled() || in_softirq())) {
+		spin_lock(&lock->spinlock);
+	} else {
+		spin_lock_bh(&lock->spinlock);
+		lock->flags |= ADF_OS_LINUX_UNLOCK_BH;
+	}
+
+}
+
+/**
+ * __cdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void
+__cdf_spin_unlock_bh(__cdf_spinlock_t *lock)
+{
+	if (unlikely(lock->flags & ADF_OS_LINUX_UNLOCK_BH)) {
+		lock->flags &= ~ADF_OS_LINUX_UNLOCK_BH;
+		spin_unlock_bh(&lock->spinlock);
+	} else
+		spin_unlock(&lock->spinlock);
+}
+
+/**
+ * __cdf_in_softirq() - in soft irq context
+ *
+ * Return: true if in softirs context else false
+ */
+static inline bool __cdf_in_softirq(void)
+{
+	return in_softirq();
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __I_CDF_LOCK_H */

+ 61 - 0
core/cdf/src/i_cdf_mc_timer.h

@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__I_CDF_MC_TIMER_H)
+#define __I_CDF_MC_TIMER_H
+
+/**
+ * DOC: i_cdf_mc_timer.h
+ *
+ * Linux-specific definitions for CDF timers serialized to MC thread
+ */
+
+/* Include Files */
+#include <cdf_mc_timer.h>
+#include <cdf_types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/jiffies.h>
+
+/* Preprocessor definitions and constants */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+/* Type declarations */
+
+typedef struct cdf_mc_timer_platform_s {
+	struct timer_list Timer;
+	int threadID;
+	uint32_t cookie;
+	spinlock_t spinlock;
+} cdf_mc_timer_platform_t;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __I_CDF_MC_TIMER_H */

+ 1092 - 0
core/cdf/src/i_cdf_nbuf.h

@@ -0,0 +1,1092 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_cdf_nbuf.h
+ *
+ * Linux implementation of skbuf
+ */
+#ifndef _I_CDF_NET_BUF_H
+#define _I_CDF_NET_BUF_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <cdf_types.h>
+#include <cdf_status.h>
+
+#define __CDF_NBUF_NULL   NULL
+
+
+/*
+ * Use socket buffer as the underlying implentation as skbuf .
+ * Linux use sk_buff to represent both packet and data,
+ * so we use sk_buffer to represent both skbuf .
+ */
+typedef struct sk_buff *__cdf_nbuf_t;
+
+typedef void (*__cdf_nbuf_callback_fn)(struct sk_buff *skb);
+#define OSDEP_EAPOL_TID 6       /* send it on VO queue */
+
+/* CVG_NBUF_MAX_OS_FRAGS -
+ * max tx fragments provided by the OS
+ */
+#define CVG_NBUF_MAX_OS_FRAGS 1
+
+/* CVG_NBUF_MAX_EXTRA_FRAGS -
+ * max tx fragments added by the driver
+ * The driver will always add one tx fragment (the tx descriptor) and may
+ * add a second tx fragment (e.g. a TSO segment's modified IP header).
+ */
+#define CVG_NBUF_MAX_EXTRA_FRAGS 2
+
+typedef void (*cdf_nbuf_trace_update_t)(char *);
+
+/**
+ * struct cvg_nbuf_cb - network buffer control block
+ * @data_attr: Value that is programmed in CE descriptor, contains:
+ *		1) CE classification enablement bit
+ *		2) Pkt type (802.3 or Ethernet Type II)
+ *		3) Pkt Offset (Usually the length of HTT/HTC desc.)
+ * @trace: info for DP tracing
+ * @mapped_paddr_lo: DMA mapping info
+ * @extra_frags: Extra tx fragments
+ * @owner_id: Owner id
+ * @cdf_nbuf_callback_fn: Callback function
+ * @priv_data: IPA specific priv data
+ * @proto_type: Protocol type
+ * @vdev_id: vdev id
+ * @tx_htt2_frm: HTT 2 frame
+ * @tx_htt2_reserved: HTT 2 reserved bits
+ */
+struct cvg_nbuf_cb {
+	uint32_t data_attr;
+	/*
+	 * Store info for data path tracing
+	 */
+	struct {
+		uint8_t packet_state;
+		uint8_t packet_track;
+		uint8_t dp_trace;
+	} trace;
+
+	/*
+	 * Store the DMA mapping info for the network buffer fragments
+	 * provided by the OS.
+	 */
+	uint32_t mapped_paddr_lo[CVG_NBUF_MAX_OS_FRAGS];
+
+	/* store extra tx fragments provided by the driver */
+	struct {
+		/* vaddr -
+		 * CPU address (a.k.a. virtual address) of the tx fragments
+		 * added by the driver
+		 */
+		unsigned char *vaddr[CVG_NBUF_MAX_EXTRA_FRAGS];
+		/* paddr_lo -
+		 * bus address (a.k.a. physical address) of the tx fragments
+		 * added by the driver
+		 */
+		uint32_t paddr_lo[CVG_NBUF_MAX_EXTRA_FRAGS];
+		uint16_t len[CVG_NBUF_MAX_EXTRA_FRAGS];
+		uint8_t num;    /* how many extra frags has the driver added */
+		uint8_t
+		/*
+		 * Store a wordstream vs. bytestream flag for each extra
+		 * fragment, plus one more flag for the original fragment(s)
+		 * of the netbuf.
+		 */
+wordstream_flags:CVG_NBUF_MAX_EXTRA_FRAGS + 1;
+	} extra_frags;
+	uint32_t owner_id;
+	__cdf_nbuf_callback_fn cdf_nbuf_callback_fn;
+	unsigned long priv_data;
+#ifdef QCA_PKT_PROTO_TRACE
+	unsigned char proto_type;
+	unsigned char vdev_id;
+#endif /* QCA_PKT_PROTO_TRACE */
+#ifdef QCA_TX_HTT2_SUPPORT
+	unsigned char tx_htt2_frm:1;
+	unsigned char tx_htt2_reserved:7;
+#endif /* QCA_TX_HTT2_SUPPORT */
+};
+#define NBUF_OWNER_ID(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->owner_id)
+#define NBUF_OWNER_PRIV_DATA(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->priv_data)
+#define NBUF_CALLBACK_FN(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->cdf_nbuf_callback_fn)
+#define NBUF_CALLBACK_FN_EXEC(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->cdf_nbuf_callback_fn)(skb)
+#define NBUF_MAPPED_PADDR_LO(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->mapped_paddr_lo[0])
+#define NBUF_NUM_EXTRA_FRAGS(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.num)
+#define NBUF_EXTRA_FRAG_VADDR(skb, frag_num) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.vaddr[(frag_num)])
+#define NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num)	\
+	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.paddr_lo[(frag_num)])
+#define NBUF_EXTRA_FRAG_LEN(skb, frag_num) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.len[(frag_num)])
+#define NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.wordstream_flags)
+
+#ifdef QCA_PKT_PROTO_TRACE
+#define NBUF_SET_PROTO_TYPE(skb, proto_type) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->proto_type = proto_type)
+#define NBUF_GET_PROTO_TYPE(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->proto_type)
+#else
+#define NBUF_SET_PROTO_TYPE(skb, proto_type);
+#define NBUF_GET_PROTO_TYPE(skb) 0;
+#endif /* QCA_PKT_PROTO_TRACE */
+
+#ifdef QCA_TX_HTT2_SUPPORT
+#define NBUF_SET_TX_HTT2_FRM(skb, candi) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->tx_htt2_frm = candi)
+#define NBUF_GET_TX_HTT2_FRM(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->tx_htt2_frm)
+#else
+#define NBUF_SET_TX_HTT2_FRM(skb, candi)
+#define NBUF_GET_TX_HTT2_FRM(skb) 0
+#endif /* QCA_TX_HTT2_SUPPORT */
+
+#define NBUF_DATA_ATTR_SET(skb, data_attr)	\
+	(((struct cvg_nbuf_cb *)((skb)->cb))->data_attr = data_attr)
+
+#define NBUF_DATA_ATTR_GET(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->data_attr)
+
+#if defined(FEATURE_LRO)
+/**
+ * struct nbuf_rx_cb - network buffer control block
+ * on the receive path of the skb
+ * @lro_eligible: indicates whether the msdu is LRO eligible
+ * @tcp_proto: indicates if this is a TCP packet
+ * @ipv6_proto: indicates if this is an IPv6 packet
+ * @ip_offset: offset to the IP header
+ * @tcp_offset: offset to the TCP header
+ * @tcp_udp_chksum: TCP payload checksum
+ * @tcp_seq_num: TCP sequence number
+ * @tcp_ack_num: TCP acknowledgement number
+ * @flow_id_toeplitz: 32 bit 5-tuple flow id toeplitz hash
+ */
+struct nbuf_rx_cb {
+	uint32_t lro_eligible:1,
+		tcp_proto:1,
+		tcp_pure_ack:1,
+		ipv6_proto:1,
+		ip_offset:7,
+		tcp_offset:7;
+	uint32_t tcp_udp_chksum:16,
+		tcp_win:16;
+	uint32_t tcp_seq_num;
+	uint32_t tcp_ack_num;
+	uint32_t flow_id_toeplitz;
+};
+
+#define NBUF_LRO_ELIGIBLE(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->lro_eligible)
+#define NBUF_TCP_PROTO(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_proto)
+#define NBUF_TCP_PURE_ACK(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_pure_ack)
+#define NBUF_IPV6_PROTO(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->ipv6_proto)
+#define NBUF_IP_OFFSET(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->ip_offset)
+#define NBUF_TCP_OFFSET(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_offset)
+#define NBUF_TCP_CHKSUM(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_udp_chksum)
+#define NBUF_TCP_SEQ_NUM(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_seq_num)
+#define NBUF_TCP_ACK_NUM(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_ack_num)
+#define NBUF_TCP_WIN(skb) \
+	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_win)
+#define NBUF_FLOW_ID_TOEPLITZ(skb)	\
+	(((struct nbuf_rx_cb *)((skb)->cb))->flow_id_toeplitz)
+
+/**
+ * cdf_print_lro_info() - prints the LRO information
+ * @skb : network buffer
+ *
+ * This function prints out the LRO related fields in the rx
+ * descriptor
+ *
+ * Return: none
+ */
+static inline void cdf_print_lro_info(struct sk_buff *skb)
+{
+	cdf_print("NBUF_LRO_ELIGIBLE 0x%x\n"
+		 "NBUF_TCP_PROTO 0x%x\n"
+		 "NBUF_TCP_PURE_ACK 0x%x\n"
+		 "NBUF_TCP_CHKSUM 0x%x\n"
+		 "NBUF_IPV6_PROTO 0x%x\n"
+		 "NBUF_IP_OFFSET 0x%x\n"
+		 "NBUF_TCP_OFFSET 0x%x\n"
+		 "NBUF_TCP_SEQ_NUM 0x%x\n"
+		 "NBUF_TCP_ACK_NUM 0x%x\n"
+		 "NBUF_TCP_WIN 0x%x\n"
+		 "NBUF_FLOW_ID_TOEPLITZ 0x%x\n",
+		 NBUF_LRO_ELIGIBLE(skb),
+		 NBUF_TCP_PROTO(skb),
+		 NBUF_TCP_PURE_ACK(skb),
+		 NBUF_TCP_CHKSUM(skb),
+		 NBUF_IPV6_PROTO(skb),
+		 NBUF_IP_OFFSET(skb),
+		 NBUF_TCP_OFFSET(skb),
+		 NBUF_TCP_SEQ_NUM(skb),
+		 NBUF_TCP_ACK_NUM(skb),
+		 NBUF_TCP_WIN(skb),
+		 NBUF_FLOW_ID_TOEPLITZ(skb));
+}
+#endif /* FEATURE_LRO */
+
+#define NBUF_SET_PACKET_STATE(skb, pkt_state) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_state = \
+								pkt_state)
+#define NBUF_GET_PACKET_STATE(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_state)
+
+#define NBUF_SET_PACKET_TRACK(skb, pkt_track) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_track = \
+								pkt_track)
+#define NBUF_GET_PACKET_TRACK(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_track)
+
+#define NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
+	cdf_nbuf_set_state(skb, PACKET_STATE)
+
+#define CDF_NBUF_SET_DP_TRACE(skb, enable) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.dp_trace \
+								= enable)
+#define CDF_NBUF_GET_DP_TRACE(skb) \
+	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.dp_trace)
+
+#define __cdf_nbuf_get_num_frags(skb)		   \
+	/* assume the OS provides a single fragment */ \
+	(NBUF_NUM_EXTRA_FRAGS(skb) + 1)
+
+#if defined(FEATURE_TSO)
+#define __cdf_nbuf_dec_num_frags(skb)		   \
+	(NBUF_NUM_EXTRA_FRAGS(skb)--)
+#endif
+
+#define __cdf_nbuf_frag_push_head( \
+		skb, frag_len, frag_vaddr, frag_paddr_lo, frag_paddr_hi) \
+	do { \
+		int frag_num = NBUF_NUM_EXTRA_FRAGS(skb)++; \
+		NBUF_EXTRA_FRAG_VADDR(skb, frag_num) = frag_vaddr; \
+		NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num) = frag_paddr_lo; \
+		NBUF_EXTRA_FRAG_LEN(skb, frag_num) = frag_len; \
+	} while (0)
+
+#define __cdf_nbuf_get_frag_len(skb, frag_num)		 \
+	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?	     \
+	 NBUF_EXTRA_FRAG_LEN(skb, frag_num) : (skb)->len)
+
+#define __cdf_nbuf_get_frag_vaddr(skb, frag_num)	      \
+	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?		  \
+	 NBUF_EXTRA_FRAG_VADDR(skb, frag_num) : ((skb)->data))
+
+#define __cdf_nbuf_get_frag_paddr_lo(skb, frag_num)		 \
+	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?		     \
+	 NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num) :		  \
+	/* assume that the OS only provides a single fragment */ \
+	 NBUF_MAPPED_PADDR_LO(skb))
+
+#define __cdf_nbuf_get_frag_is_wordstream(skb, frag_num) \
+	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?	     \
+	 (NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) >>	  \
+	  (frag_num)) & 0x1 :			       \
+	 (NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) >>	  \
+	  (CVG_NBUF_MAX_EXTRA_FRAGS)) & 0x1)
+
+#define __cdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wordstream)	\
+	do {								    \
+		if (frag_num >= NBUF_NUM_EXTRA_FRAGS(skb)) {			\
+			frag_num = CVG_NBUF_MAX_EXTRA_FRAGS;			    \
+		}								\
+		/* clear the old value */					\
+		NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) &= ~(1 << frag_num);	\
+		/* set the new value */						\
+		NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) |=			\
+			((is_wordstream) << frag_num);				    \
+	} while (0)
+
+#define __cdf_nbuf_trace_set_proto_type(skb, proto_type) \
+	NBUF_SET_PROTO_TYPE(skb, proto_type)
+#define __cdf_nbuf_trace_get_proto_type(skb) \
+	NBUF_GET_PROTO_TYPE(skb);
+
+/**
+ * __cdf_nbuf_data_attr_get() -  Retrieves the data_attr value
+ *				 from cvg_nbuf_cb (skb->cb)
+ * @skb: Pointer to struct sk_buff
+ *
+ * Return: data_attr
+ */
+#define __cdf_nbuf_data_attr_get(skb)		\
+	NBUF_DATA_ATTR_GET(skb)
+
+/**
+ * __cdf_nbuf_data_attr_set()  -  Sets the data_attr value
+ *				  in cvg_nbuf_cb (skb->cb)
+ * @skb: Pointer to struct sk_buff
+ * @data_attr: packet type from the enum cdf_txrx_pkt_type
+ *
+ * Return:
+ */
+static inline void
+__cdf_nbuf_data_attr_set(struct sk_buff *skb,
+			     uint32_t data_attr)
+{
+	NBUF_DATA_ATTR_SET(skb, data_attr);
+}
+
+/**
+ * typedef struct __cdf_nbuf_queue_t -  network buffer queue
+ * @head: Head pointer
+ * @tail: Tail pointer
+ * @qlen: Queue length
+ */
+typedef struct __cdf_nbuf_qhead {
+	struct sk_buff *head;
+	struct sk_buff *tail;
+	unsigned int qlen;
+} __cdf_nbuf_queue_t;
+
+/*
+ * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
+ * Because the queue head will most likely put in some structure,
+ * we don't use pointer type as the definition.
+ */
+
+/*
+ * prototypes. Implemented in cdf_nbuf.c
+ */
+__cdf_nbuf_t __cdf_nbuf_alloc(__cdf_device_t osdev, size_t size, int reserve,
+			      int align, int prio);
+void __cdf_nbuf_free(struct sk_buff *skb);
+CDF_STATUS __cdf_nbuf_map(__cdf_device_t osdev,
+			  struct sk_buff *skb, cdf_dma_dir_t dir);
+void __cdf_nbuf_unmap(__cdf_device_t osdev,
+		      struct sk_buff *skb, cdf_dma_dir_t dir);
+CDF_STATUS __cdf_nbuf_map_single(__cdf_device_t osdev,
+				 struct sk_buff *skb, cdf_dma_dir_t dir);
+void __cdf_nbuf_unmap_single(__cdf_device_t osdev,
+			     struct sk_buff *skb, cdf_dma_dir_t dir);
+void __cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr);
+
+#ifdef QCA_PKT_PROTO_TRACE
+void __cdf_nbuf_trace_update(struct sk_buff *buf, char *event_string);
+#else
+#define __cdf_nbuf_trace_update(skb, event_string)
+#endif /* QCA_PKT_PROTO_TRACE */
+
+/**
+ * __cdf_os_to_status() - OS to CDF status conversion
+ * @error : OS error
+ *
+ * Return: CDF status
+ */
+static inline CDF_STATUS __cdf_os_to_status(signed int error)
+{
+	switch (error) {
+	case 0:
+		return CDF_STATUS_SUCCESS;
+	case ENOMEM:
+	case -ENOMEM:
+		return CDF_STATUS_E_NOMEM;
+	default:
+		return CDF_STATUS_E_NOSUPPORT;
+	}
+}
+
+/**
+ * __cdf_nbuf_len() - return the amount of valid data in the skb
+ * @skb: Pointer to network buffer
+ *
+ * This API returns the amount of valid data in the skb, If there are frags
+ * then it returns total length.
+ *
+ * Return: network buffer length
+ */
+static inline size_t __cdf_nbuf_len(struct sk_buff *skb)
+{
+	int i, extra_frag_len = 0;
+
+	i = NBUF_NUM_EXTRA_FRAGS(skb);
+	while (i-- > 0)
+		extra_frag_len += NBUF_EXTRA_FRAG_LEN(skb, i);
+
+	return extra_frag_len + skb->len;
+}
+
+/**
+ * __cdf_nbuf_cat() - link two nbufs
+ * @dst: Buffer to piggyback into
+ * @src: Buffer to put
+ *
+ * Link tow nbufs the new buf is piggybacked into the older one. The older
+ * (src) skb is released.
+ *
+ * Return: CDF_STATUS (status of the call) if failed the src skb
+ *	   is released
+ */
+static inline CDF_STATUS
+__cdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
+{
+	CDF_STATUS error = 0;
+
+	cdf_assert(dst && src);
+
+	/*
+	 * Since pskb_expand_head unconditionally reallocates the skb->head
+	 * buffer, first check whether the current buffer is already large
+	 * enough.
+	 */
+	if (skb_tailroom(dst) < src->len) {
+		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
+		if (error)
+			return __cdf_os_to_status(error);
+	}
+	memcpy(skb_tail_pointer(dst), src->data, src->len);
+
+	skb_put(dst, src->len);
+	dev_kfree_skb_any(src);
+
+	return __cdf_os_to_status(error);
+}
+
+/**************************nbuf manipulation routines*****************/
+
+/**
+ * __cdf_nbuf_headroom() - return the amount of tail space available
+ * @buf: Pointer to network buffer
+ *
+ * Return: amount of tail room
+ */
+static inline int __cdf_nbuf_headroom(struct sk_buff *skb)
+{
+	return skb_headroom(skb);
+}
+
+/**
+ * __cdf_nbuf_tailroom() - return the amount of tail space available
+ * @buf: Pointer to network buffer
+ *
+ * Return: amount of tail room
+ */
+static inline uint32_t __cdf_nbuf_tailroom(struct sk_buff *skb)
+{
+	return skb_tailroom(skb);
+}
+
+/**
+ * __cdf_nbuf_push_head() - Push data in the front
+ * @skb: Pointer to network buffer
+ * @size: size to be pushed
+ *
+ * Return: New data pointer of this buf after data has been pushed,
+ *	   or NULL if there is not enough room in this buf.
+ */
+static inline uint8_t *__cdf_nbuf_push_head(struct sk_buff *skb, size_t size)
+{
+	if (NBUF_MAPPED_PADDR_LO(skb))
+		NBUF_MAPPED_PADDR_LO(skb) -= size;
+
+	return skb_push(skb, size);
+}
+
+/**
+ * __cdf_nbuf_put_tail() - Puts data in the end
+ * @skb: Pointer to network buffer
+ * @size: size to be pushed
+ *
+ * Return: data pointer of this buf where new data has to be
+ *	   put, or NULL if there is not enough room in this buf.
+ */
+static inline uint8_t *__cdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
+{
+	if (skb_tailroom(skb) < size) {
+		if (unlikely(pskb_expand_head(skb, 0,
+			size - skb_tailroom(skb), GFP_ATOMIC))) {
+			dev_kfree_skb_any(skb);
+			return NULL;
+		}
+	}
+	return skb_put(skb, size);
+}
+
+/**
+ * __cdf_nbuf_pull_head() - pull data out from the front
+ * @skb: Pointer to network buffer
+ * @size: size to be popped
+ *
+ * Return: New data pointer of this buf after data has been popped,
+ *	   or NULL if there is not sufficient data to pull.
+ */
+static inline uint8_t *__cdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
+{
+	if (NBUF_MAPPED_PADDR_LO(skb))
+		NBUF_MAPPED_PADDR_LO(skb) += size;
+
+	return skb_pull(skb, size);
+}
+
+/**
+ * __cdf_nbuf_trim_tail() - trim data out from the end
+ * @skb: Pointer to network buffer
+ * @size: size to be popped
+ *
+ * Return: none
+ */
+static inline void __cdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
+{
+	return skb_trim(skb, skb->len - size);
+}
+
+/*********************nbuf private buffer routines*************/
+
+/**
+ * __cdf_nbuf_peek_header() - return the header's addr & m_len
+ * @skb: Pointer to network buffer
+ * @addr: Pointer to store header's addr
+ * @m_len: network buffer length
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
+{
+	*addr = skb->data;
+	*len = skb->len;
+}
+
+/******************Custom queue*************/
+
+/**
+ * __cdf_nbuf_queue_init() - initiallize the queue head
+ * @qhead: Queue head
+ *
+ * Return: CDF status
+ */
+static inline CDF_STATUS __cdf_nbuf_queue_init(__cdf_nbuf_queue_t *qhead)
+{
+	memset(qhead, 0, sizeof(struct __cdf_nbuf_qhead));
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * __cdf_nbuf_queue_add() - add an skb in the tail of the queue
+ * @qhead: Queue head
+ * @skb: Pointer to network buffer
+ *
+ * This is a lockless version, driver must acquire locks if it
+ * needs to synchronize
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_queue_add(__cdf_nbuf_queue_t *qhead, struct sk_buff *skb)
+{
+	skb->next = NULL;       /*Nullify the next ptr */
+
+	if (!qhead->head)
+		qhead->head = skb;
+	else
+		qhead->tail->next = skb;
+
+	qhead->tail = skb;
+	qhead->qlen++;
+}
+
+/**
+ * __cdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
+ * @qhead: Queue head
+ * @skb: Pointer to network buffer
+ *
+ * This is a lockless version, driver must acquire locks if it needs to
+ * synchronize
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_queue_insert_head(__cdf_nbuf_queue_t *qhead, __cdf_nbuf_t skb)
+{
+	if (!qhead->head) {
+		/*Empty queue Tail pointer Must be updated */
+		qhead->tail = skb;
+	}
+	skb->next = qhead->head;
+	qhead->head = skb;
+	qhead->qlen++;
+}
+
+/**
+ * __cdf_nbuf_queue_remove() - remove a skb from the head of the queue
+ * @qhead: Queue head
+ *
+ * This is a lockless version. Driver should take care of the locks
+ *
+ * Return: skb or NULL
+ */
+static inline
+struct sk_buff *__cdf_nbuf_queue_remove(__cdf_nbuf_queue_t *qhead)
+{
+	__cdf_nbuf_t tmp = NULL;
+
+	if (qhead->head) {
+		qhead->qlen--;
+		tmp = qhead->head;
+		if (qhead->head == qhead->tail) {
+			qhead->head = NULL;
+			qhead->tail = NULL;
+		} else {
+			qhead->head = tmp->next;
+		}
+		tmp->next = NULL;
+	}
+	return tmp;
+}
+
+/**
+ * __cdf_nbuf_queue_len() - return the queue length
+ * @qhead: Queue head
+ *
+ * Return: Queue length
+ */
+static inline uint32_t __cdf_nbuf_queue_len(__cdf_nbuf_queue_t *qhead)
+{
+	return qhead->qlen;
+}
+
+/**
+ * __cdf_nbuf_queue_next() - return the next skb from packet chain
+ * @skb: Pointer to network buffer
+ *
+ * This API returns the next skb from packet chain, remember the skb is
+ * still in the queue
+ *
+ * Return: NULL if no packets are there
+ */
+static inline struct sk_buff *__cdf_nbuf_queue_next(struct sk_buff *skb)
+{
+	return skb->next;
+}
+
+/**
+ * __cdf_nbuf_is_queue_empty() - check if the queue is empty or not
+ * @qhead: Queue head
+ *
+ * Return: true if length is 0 else false
+ */
+static inline bool __cdf_nbuf_is_queue_empty(__cdf_nbuf_queue_t *qhead)
+{
+	return qhead->qlen == 0;
+}
+
+/*
+ * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
+ * Because the queue head will most likely put in some structure,
+ * we don't use pointer type as the definition.
+ */
+
+/*
+ * prototypes. Implemented in cdf_nbuf.c
+ */
+cdf_nbuf_tx_cksum_t __cdf_nbuf_get_tx_cksum(struct sk_buff *skb);
+CDF_STATUS __cdf_nbuf_set_rx_cksum(struct sk_buff *skb,
+				   cdf_nbuf_rx_cksum_t *cksum);
+uint8_t __cdf_nbuf_get_tid(struct sk_buff *skb);
+void __cdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
+uint8_t __cdf_nbuf_get_exemption_type(struct sk_buff *skb);
+
+/*
+ * cdf_nbuf_pool_delete() implementation - do nothing in linux
+ */
+#define __cdf_nbuf_pool_delete(osdev)
+
+/**
+ * __cdf_nbuf_clone() - clone the nbuf (copy is readonly)
+ * @skb: Pointer to network buffer
+ *
+ * if GFP_ATOMIC is overkill then we can check whether its
+ * called from interrupt context and then do it or else in
+ * normal case use GFP_KERNEL
+ *
+ * example     use "in_irq() || irqs_disabled()"
+ *
+ * Return: cloned skb
+ */
+static inline struct sk_buff *__cdf_nbuf_clone(struct sk_buff *skb)
+{
+	return skb_clone(skb, GFP_ATOMIC);
+}
+
+/**
+ * __cdf_nbuf_copy() - returns a private copy of the skb
+ * @skb: Pointer to network buffer
+ *
+ * This API returns a private copy of the skb, the skb returned is completely
+ *  modifiable by callers
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *__cdf_nbuf_copy(struct sk_buff *skb)
+{
+	return skb_copy(skb, GFP_ATOMIC);
+}
+
+#define __cdf_nbuf_reserve      skb_reserve
+
+/***********************XXX: misc api's************************/
+
+/**
+ * __cdf_nbuf_head() - return the pointer the skb's head pointer
+ * @skb: Pointer to network buffer
+ *
+ * Return: Pointer to head buffer
+ */
+static inline uint8_t *__cdf_nbuf_head(struct sk_buff *skb)
+{
+	return skb->head;
+}
+
+/**
+ * __cdf_nbuf_data() - return the pointer to data header in the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: Pointer to skb data
+ */
+static inline uint8_t *__cdf_nbuf_data(struct sk_buff *skb)
+{
+	return skb->data;
+}
+
+/**
+ * __cdf_nbuf_get_protocol() - return the protocol value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb protocol
+ */
+static inline uint16_t __cdf_nbuf_get_protocol(struct sk_buff *skb)
+{
+	return skb->protocol;
+}
+
+/**
+ * __cdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb ip_summed
+ */
+static inline uint8_t __cdf_nbuf_get_ip_summed(struct sk_buff *skb)
+{
+	return skb->ip_summed;
+}
+
+/**
+ * __cdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
+ * @skb: Pointer to network buffer
+ * @ip_summed: ip checksum
+ *
+ * Return: none
+ */
+static inline void __cdf_nbuf_set_ip_summed(struct sk_buff *skb, uint8_t ip_summed)
+{
+	skb->ip_summed = ip_summed;
+}
+
+/**
+ * __cdf_nbuf_get_priority() - return the priority value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb priority
+ */
+static inline uint32_t __cdf_nbuf_get_priority(struct sk_buff *skb)
+{
+	return skb->priority;
+}
+
+/**
+ * __cdf_nbuf_set_priority() - sets the priority value of the skb
+ * @skb: Pointer to network buffer
+ * @p: priority
+ *
+ * Return: none
+ */
+static inline void __cdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
+{
+	skb->priority = p;
+}
+
+/**
+ * __cdf_nbuf_set_next() - sets the next skb pointer of the current skb
+ * @skb: Current skb
+ * @next_skb: Next skb
+ *
+ * Return: void
+ */
+static inline void
+__cdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
+{
+	skb->next = skb_next;
+}
+
+/**
+ * __cdf_nbuf_next() - return the next skb pointer of the current skb
+ * @skb: Current skb
+ *
+ * Return: the next skb pointed to by the current skb
+ */
+static inline struct sk_buff *__cdf_nbuf_next(struct sk_buff *skb)
+{
+	return skb->next;
+}
+
+/**
+ * __cdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
+ * @skb: Current skb
+ * @next_skb: Next skb
+ *
+ * This fn is used to link up extensions to the head skb. Does not handle
+ * linking to the head
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
+{
+	skb->next = skb_next;
+}
+
+/**
+ * __cdf_nbuf_next_ext() - return the next skb pointer of the current skb
+ * @skb: Current skb
+ *
+ * Return: the next skb pointed to by the current skb
+ */
+static inline struct sk_buff *__cdf_nbuf_next_ext(struct sk_buff *skb)
+{
+	return skb->next;
+}
+
+/**
+ * __cdf_nbuf_append_ext_list() - link list of packet extensions to the head
+ * @skb_head: head_buf nbuf holding head segment (single)
+ * @ext_list: nbuf list holding linked extensions to the head
+ * @ext_len: Total length of all buffers in the extension list
+ *
+ * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
+ * to the nbuf holding the head segment (seg0)
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_append_ext_list(struct sk_buff *skb_head,
+			   struct sk_buff *ext_list, size_t ext_len)
+{
+	skb_shinfo(skb_head)->frag_list = ext_list;
+	skb_head->data_len = ext_len;
+	skb_head->len += skb_head->data_len;
+}
+
+/**
+ * __cdf_nbuf_tx_free() - free skb list
+ * @skb: Pointer to network buffer
+ * @tx_err: TX error
+ *
+ * Return: none
+ */
+static inline void __cdf_nbuf_tx_free(struct sk_buff *bufs, int tx_err)
+{
+	while (bufs) {
+		struct sk_buff *next = __cdf_nbuf_next(bufs);
+		__cdf_nbuf_free(bufs);
+		bufs = next;
+	}
+}
+
+/**
+ * __cdf_nbuf_get_age() - return the checksum value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: checksum value
+ */
+static inline uint32_t __cdf_nbuf_get_age(struct sk_buff *skb)
+{
+	return skb->csum;
+}
+
+/**
+ * __cdf_nbuf_set_age() - sets the checksum value of the skb
+ * @skb: Pointer to network buffer
+ * @v: Value
+ *
+ * Return: none
+ */
+static inline void __cdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
+{
+	skb->csum = v;
+}
+
+/**
+ * __cdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
+ * @skb: Pointer to network buffer
+ * @adj: Adjustment value
+ *
+ * Return: none
+ */
+static inline void __cdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
+{
+	skb->csum -= adj;
+}
+
+/**
+ * __cdf_nbuf_copy_bits() - return the length of the copy bits for skb
+ * @skb: Pointer to network buffer
+ * @offset: Offset value
+ * @len: Length
+ * @to: Destination pointer
+ *
+ * Return: length of the copy bits for skb
+ */
+static inline int32_t
+__cdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
+{
+	return skb_copy_bits(skb, offset, to, len);
+}
+
+/**
+ * __cdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
+ * @skb: Pointer to network buffer
+ * @len:  Packet length
+ *
+ * Return: none
+ */
+static inline void __cdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
+{
+	if (skb->len > len) {
+		skb_trim(skb, len);
+	} else {
+		if (skb_tailroom(skb) < len - skb->len) {
+			if (unlikely(pskb_expand_head(skb, 0,
+				len - skb->len - skb_tailroom(skb),
+				GFP_ATOMIC))) {
+				dev_kfree_skb_any(skb);
+				cdf_assert(0);
+			}
+		}
+		skb_put(skb, (len - skb->len));
+	}
+}
+
+/**
+ * __cdf_nbuf_set_protocol() - sets the protocol value of the skb
+ * @skb: Pointer to network buffer
+ * @protocol: Protocol type
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
+{
+	skb->protocol = protocol;
+}
+
+#define __cdf_nbuf_set_tx_htt2_frm(skb, candi) \
+	NBUF_SET_TX_HTT2_FRM(skb, candi)
+#define __cdf_nbuf_get_tx_htt2_frm(skb)	\
+	NBUF_GET_TX_HTT2_FRM(skb)
+
+#if defined(FEATURE_TSO)
+uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
+	struct cdf_tso_info_t *tso_info);
+
+uint32_t __cdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
+
+static inline uint8_t __cdf_nbuf_is_tso(struct sk_buff *skb)
+{
+	return skb_is_gso(skb);
+}
+
+struct sk_buff *__cdf_nbuf_inc_users(struct sk_buff *skb);
+#endif /* TSO */
+
+/**
+ * __cdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
+ *			      and get hw_classify by peeking
+ *			      into packet
+ * @nbuf:		Network buffer (skb on Linux)
+ * @pkt_type:		Pkt type (from enum htt_pkt_type)
+ * @pkt_subtype:	Bit 4 of this field in HTT descriptor
+ *			needs to be set in case of CE classification support
+ *			Is set by this macro.
+ * @hw_classify:	This is a flag which is set to indicate
+ *			CE classification is enabled.
+ *			Do not set this bit for VLAN packets
+ *			OR for mcast / bcast frames.
+ *
+ * This macro parses the payload to figure out relevant Tx meta-data e.g.
+ * whether to enable tx_classify bit in CE.
+ *
+ * Overrides pkt_type only if required for 802.3 frames (original ethernet)
+ * If protocol is less than ETH_P_802_3_MIN (0x600), then
+ * it is the length and a 802.3 frame else it is Ethernet Type II
+ * (RFC 894).
+ * Bit 4 in pkt_subtype is the tx_classify bit
+ *
+ * Return:	void
+ */
+#define __cdf_nbuf_tx_info_get(skb, pkt_type,			\
+				pkt_subtype, hw_classify)	\
+do {								\
+	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
+	uint16_t ether_type = ntohs(eh->h_proto);		\
+	bool is_mc_bc;						\
+								\
+	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
+		   is_multicast_ether_addr((uint8_t *)eh);	\
+								\
+	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
+		hw_classify = 1;				\
+		pkt_subtype = 0x01 <<				\
+			HTT_TX_CLASSIFY_BIT_S;			\
+	}							\
+								\
+	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
+		pkt_type = htt_pkt_type_ethernet;		\
+								\
+} while (0)
+#endif /*_I_CDF_NET_BUF_H */

+ 152 - 0
core/cdf/src/i_cdf_softirq_timer.h

@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _I_CDF_SOFTIRQ_TIMER_H
+#define _I_CDF_SOFTIRQ_TIMER_H
+
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <cdf_types.h>
+
+/* timer data type */
+typedef struct timer_list __cdf_softirq_timer_t;
+
+/* ugly - but every other OS takes, sanely, a void */
+
+typedef void (*cdf_dummy_timer_func_t)(unsigned long arg);
+
+/**
+ * __cdf_softirq_timer_init() - initialize a softirq timer
+ * @hdl: OS handle
+ * @timer: Pointer to timer object
+ * @func: Function pointer
+ * @arg: Arguement
+ * @type: deferrable or non deferrable timer type
+ *
+ * Timer type CDF_TIMER_TYPE_SW means its a deferrable sw timer which will
+ * not cause CPU wake upon expiry
+ * Timer type CDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
+ * will cause CPU wake up on expiry
+ *
+ * Return: none
+ */
+static inline CDF_STATUS
+__cdf_softirq_timer_init(cdf_handle_t hdl,
+			 struct timer_list *timer,
+			 cdf_softirq_timer_func_t func, void *arg,
+			 CDF_TIMER_TYPE type)
+{
+	if (CDF_TIMER_TYPE_SW == type)
+		init_timer_deferrable(timer);
+	else
+		init_timer(timer);
+	timer->function = (cdf_dummy_timer_func_t) func;
+	timer->data = (unsigned long)arg;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * __cdf_softirq_timer_start() - start a cdf softirq timer
+ * @timer: Pointer to timer object
+ * @delay: Delay in milli seconds
+ *
+ * Return: none
+ */
+static inline CDF_STATUS
+__cdf_softirq_timer_start(struct timer_list *timer, uint32_t delay)
+{
+	timer->expires = jiffies + msecs_to_jiffies(delay);
+	add_timer(timer);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * __cdf_softirq_timer_mod() - modify a timer
+ * @timer: Pointer to timer object
+ * @delay: Delay in milli seconds
+ *
+ * Return: none
+ */
+static inline CDF_STATUS
+__cdf_softirq_timer_mod(struct timer_list *timer, uint32_t delay)
+{
+	mod_timer(timer, jiffies + msecs_to_jiffies(delay));
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * __cdf_softirq_timer_cancel() - cancel a timer
+ * @timer: Pointer to timer object
+ *
+ * Return: true if timer was cancelled and deactived,
+ *	false if timer was cancelled but already got fired.
+ */
+static inline bool __cdf_softirq_timer_cancel(struct timer_list *timer)
+{
+	if (likely(del_timer(timer)))
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * __cdf_softirq_timer_free() - free a cdf timer
+ * @timer: Pointer to timer object
+ *
+ * Return: true if timer was cancelled and deactived,
+ *	false if timer was cancelled but already got fired.
+ */
+static inline void __cdf_softirq_timer_free(struct timer_list *timer)
+{
+	del_timer_sync(timer);
+}
+
+/**
+ * __cdf_sostirq_timer_sync_cancel() - Synchronously canel a timer
+ * @timer: Pointer to timer object
+ *
+ * Synchronization Rules:
+ * 1. caller must make sure timer function will not use
+ *    cdf_softirq_set_timer to add iteself again.
+ * 2. caller must not hold any lock that timer function
+ *    is likely to hold as well.
+ * 3. It can't be called from interrupt context.
+ *
+ * Return: true if timer was cancelled and deactived,
+ *	false if timer was cancelled but already got fired.
+ */
+static inline bool __cdf_sostirq_timer_sync_cancel(struct timer_list *timer)
+{
+	return del_timer_sync(timer);
+}
+
+#endif /*_ADF_OS_TIMER_PVT_H*/

+ 217 - 0
core/cdf/src/i_cdf_time.h

@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_cdf_time.h
+ *
+ * Linux specific CDF timing APIs implementation
+ */
+
+#ifndef _I_CDF_TIME_H
+#define _I_CDF_TIME_H
+
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <asm/arch_timer.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+
+typedef unsigned long __cdf_time_t;
+
+/**
+ * __cdf_system_ticks() - get system ticks
+ *
+ * Return: system tick in jiffies
+ */
+static inline __cdf_time_t __cdf_system_ticks(void)
+{
+	return jiffies;
+}
+
+/**
+ * __cdf_system_ticks_to_msecs() - convert system ticks into milli seconds
+ * @ticks: System ticks
+ *
+ * Return: system tick converted into milli seconds
+ */
+static inline uint32_t __cdf_system_ticks_to_msecs(unsigned long ticks)
+{
+	return jiffies_to_msecs(ticks);
+}
+
+/**
+ * __cdf_system_msecs_to_ticks() - convert milli seconds into system ticks
+ * @msecs: Milli seconds
+ *
+ * Return: milli seconds converted into system ticks
+ */
+static inline __cdf_time_t __cdf_system_msecs_to_ticks(uint32_t msecs)
+{
+	return msecs_to_jiffies(msecs);
+}
+
+/**
+ * __cdf_get_system_uptime() - get system uptime
+ *
+ * Return: system uptime in jiffies
+ */
+static inline __cdf_time_t __cdf_get_system_uptime(void)
+{
+	return jiffies;
+}
+
+static inline __cdf_time_t __cdf_get_system_timestamp(void)
+{
+	return (jiffies / HZ) * 1000 + (jiffies % HZ) * (1000 / HZ);
+}
+
+/**
+ * __cdf_udelay() - delay execution for given microseconds
+ * @usecs: Micro seconds to delay
+ *
+ * Return: none
+ */
+static inline void __cdf_udelay(uint32_t usecs)
+{
+#ifdef CONFIG_ARM
+	/*
+	 * This is in support of XScale build.  They have a limit on the udelay
+	 * value, so we have to make sure we don't approach the limit
+	 */
+
+	uint32_t mticks;
+	uint32_t leftover;
+	int i;
+
+	/* slice into 1024 usec chunks (simplifies calculation) */
+
+	mticks = usecs >> 10;
+	leftover = usecs - (mticks << 10);
+
+	for (i = 0; i < mticks; i++)
+		udelay(1024);
+
+	udelay(leftover);
+
+#else
+	/* Normal Delay functions. Time specified in microseconds */
+	udelay(usecs);
+
+#endif
+}
+
+/**
+ * __cdf_mdelay() - delay execution for given milli seconds
+ * @usecs: Milli seconds to delay
+ *
+ * Return: none
+ */
+static inline void __cdf_mdelay(uint32_t msecs)
+{
+	mdelay(msecs);
+}
+
+/**
+ * __cdf_system_time_after() - Check if a is later than b
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ *	true if a < b else false
+ */
+static inline bool __cdf_system_time_after(__cdf_time_t a, __cdf_time_t b)
+{
+	return (long)(b) - (long)(a) < 0;
+}
+
+/**
+ * __cdf_system_time_before() - Check if a is before b
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ *	true if a is before b else false
+ */
+static inline bool __cdf_system_time_before(__cdf_time_t a, __cdf_time_t b)
+{
+	return __cdf_system_time_after(b, a);
+}
+
+/**
+ * __cdf_system_time_before() - Check if a atleast as recent as b, if not
+ *				later
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ *	true if a >= b else false
+ */
+static inline bool __cdf_system_time_after_eq(__cdf_time_t a, __cdf_time_t b)
+{
+	return (long)(a) - (long)(b) >= 0;
+}
+
+/**
+ * __cdf_get_monotonic_boottime() - get monotonic kernel boot time
+ * This API is similar to cdf_get_system_boottime but it includes
+ * time spent in suspend.
+ *
+ * Return: Time in microseconds
+ */
+#ifdef CONFIG_CNSS
+static inline uint64_t __cdf_get_monotonic_boottime(void)
+{
+	struct timespec ts;
+
+	cnss_get_monotonic_boottime(&ts);
+
+	return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
+}
+#else
+static inline uint64_t __cdf_get_monotonic_boottime(void)
+{
+	return __cdf_system_ticks_to_msecs(__cdf_system_ticks()) * 1000;
+}
+#endif /* CONFIG_CNSS */
+
+#ifdef QCA_WIFI_3_0_ADRASTEA
+/**
+ * __cdf_get_qtimer_ticks() - get QTIMER ticks
+ *
+ * Returns QTIMER(19.2 MHz) clock ticks. To convert it into seconds
+ * divide it by 19200.
+ *
+ * Return: QTIMER(19.2 MHz) clock ticks
+ */
+static inline uint64_t __cdf_get_qtimer_ticks(void)
+{
+	return arch_counter_get_cntpct();
+}
+#endif /* QCA_WIFI_3_0_ADRASTEA */
+
+#endif

+ 145 - 0
core/cdf/src/i_cdf_trace.h

@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined(__I_CDF_TRACE_H)
+#define __I_CDF_TRACE_H
+
+#if !defined(__printf)
+#define __printf(a, b)
+#endif
+
+/**
+ * DOC: i_cdf_trace.h
+ *
+ * Linux-specific definitions for CDF trace
+ *
+ */
+
+/* Include Files */
+
+/**
+ * cdf_trace_msg()- logging API
+ * @module: Module identifier. A member of the CDF_MODULE_ID enumeration that
+ *	    identifies the module issuing the trace message.
+ * @level: Trace level. A member of the CDF_TRACE_LEVEL enumeration indicating
+ *	   the severity of the condition causing the trace message to be issued.
+ *	   More severe conditions are more likely to be logged.
+ * @strFormat: Format string. The message to be logged. This format string
+ *	       contains printf-like replacement parameters, which follow this
+ *	       parameter in the variable argument list.
+ *
+ * Users wishing to add tracing information to their code should use
+ * CDF_TRACE.  CDF_TRACE() will compile into a call to cdf_trace_msg() when
+ * tracing is enabled.
+ *
+ * Return: nothing
+ *
+ */
+void __printf(3, 4) cdf_trace_msg(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
+				  char *strFormat, ...);
+
+void cdf_trace_hex_dump(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
+			void *data, int buf_len);
+
+void cdf_trace_display(void);
+
+void cdf_trace_set_value(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
+			 uint8_t on);
+
+void cdf_trace_set_module_trace_level(CDF_MODULE_ID module, uint32_t level);
+
+/* CDF_TRACE is the macro invoked to add trace messages to code.  See the
+ * documenation for cdf_trace_msg() for the parameters etc. for this function.
+ *
+ * NOTE:  Code CDF_TRACE() macros into the source code.  Do not code directly
+ * to the cdf_trace_msg() function.
+ *
+ * NOTE 2:  cdf tracing is totally turned off if WLAN_DEBUG is *not* defined.
+ * This allows us to build 'performance' builds where we can measure performance
+ * without being bogged down by all the tracing in the code
+ */
+
+#if defined(WLAN_DEBUG)
+#define CDF_TRACE cdf_trace_msg
+#define CDF_TRACE_HEX_DUMP cdf_trace_hex_dump
+#else
+#define CDF_TRACE(arg ...)
+#define CDF_TRACE_HEX_DUMP(arg ...)
+#endif
+
+void __printf(3, 4) cdf_snprintf(char *strBuffer, unsigned int size,
+				 char *strFormat, ...);
+#define CDF_SNPRINTF cdf_snprintf
+
+#ifdef CDF_ENABLE_TRACING
+
+#define CDF_ASSERT(_condition) \
+	do { \
+		if (!(_condition)) { \
+			pr_err("CDF ASSERT in %s Line %d\n", \
+			       __func__, __LINE__); \
+			WARN_ON(1); \
+		} \
+	} while (0)
+
+#else
+
+/* This code will be used for compilation if tracing is to be compiled out */
+/* of the code so these functions/macros are 'do nothing' */
+CDF_INLINE_FN void cdf_trace_msg(CDF_MODULE_ID module, ...)
+{
+}
+
+#define CDF_ASSERT(_condition)
+
+#endif
+
+#ifdef PANIC_ON_BUG
+
+#define CDF_BUG(_condition) \
+	do { \
+		if (!(_condition)) { \
+			pr_err("CDF BUG in %s Line %d\n", \
+			       __func__, __LINE__); \
+			BUG_ON(1); \
+		} \
+	} while (0)
+
+#else
+
+#define CDF_BUG(_condition) \
+	do { \
+		if (!(_condition)) { \
+			pr_err("CDF BUG in %s Line %d\n", \
+			       __func__, __LINE__); \
+			WARN_ON(1); \
+		} \
+	} while (0)
+
+#endif
+
+#endif

+ 234 - 0
core/cdf/src/i_cdf_types.h

@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_cdf_types.h
+ *
+ * Connectivity driver framework (CDF) types
+ */
+
+#if !defined(__I_CDF_TYPES_H)
+#define __I_CDF_TYPES_H
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include <asm/div64.h>
+
+#ifndef __KERNEL__
+#define __iomem
+#endif
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/version.h>
+
+#ifdef __KERNEL__
+#include <generated/autoconf.h>
+#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
+#include <linux/wireless.h>
+#include <linux/if.h>
+#else
+
+/*
+ * Hack - coexist with prior defs of dma_addr_t.
+ * Eventually all other defs of dma_addr_t should be removed.
+ * At that point, the "already_defined" wrapper can be removed.
+ */
+#ifndef __dma_addr_t_already_defined__
+#define __dma_addr_t_already_defined__
+typedef unsigned long dma_addr_t;
+#endif
+
+#define SIOCGIWAP       0
+#define IWEVCUSTOM      0
+#define IWEVREGISTERED  0
+#define IWEVEXPIRED     0
+#define SIOCGIWSCAN     0
+#define DMA_TO_DEVICE   0
+#define DMA_FROM_DEVICE 0
+#define __iomem
+#endif /* __KERNEL__ */
+
+/**
+ * max sg that we support
+ */
+#define __CDF_OS_MAX_SCATTER        1
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define CDF_LITTLE_ENDIAN_MACHINE
+#elif defined (__BIG_ENDIAN_BITFIELD)
+#define CDF_BIG_ENDIAN_MACHINE
+#else
+#error  "Please fix <asm/byteorder.h>"
+#endif
+
+#define __cdf_packed          __attribute__ ((packed))
+
+typedef int (*__cdf_os_intr)(void *);
+/**
+ * Private definitions of general data types
+ */
+typedef dma_addr_t __cdf_dma_addr_t;
+typedef dma_addr_t __cdf_dma_context_t;
+
+#define cdf_dma_mem_context(context) dma_addr_t context
+#define cdf_get_dma_mem_context(var, field)   ((cdf_dma_context_t)(var->field))
+
+/**
+ * typedef struct __cdf_resource_t - cdf resource type
+ * @paddr: Physical address
+ * @paddr: Virtual address
+ * @len: Length
+ */
+typedef struct __cdf_os_resource {
+	unsigned long paddr;
+	void __iomem *vaddr;
+	unsigned long len;
+} __cdf_resource_t;
+
+/**
+ * struct __cdf_device - generic cdf device type
+ * @drv: Pointer to driver
+ * @drv_hdl: Pointer to driver handle
+ * @drv_name: Pointer to driver name
+ * @irq: IRQ
+ * @dev: Pointer to device
+ * @res: CDF resource
+ * @func: Interrupt handler
+ */
+struct __cdf_device {
+	void *drv;
+	void *drv_hdl;
+	char *drv_name;
+	int irq;
+	struct device *dev;
+	__cdf_resource_t res;
+	__cdf_os_intr func;
+};
+
+typedef struct __cdf_device *__cdf_device_t;
+
+typedef size_t __cdf_size_t;
+typedef uint8_t __iomem *__cdf_iomem_t;
+
+/**
+ * typedef struct __cdf_segment_t - cdf segment
+ * @daddr: DMA address
+ * @len: Length
+ */
+typedef struct __cdf_segment {
+	dma_addr_t daddr;
+	uint32_t len;
+} __cdf_segment_t;
+
+/**
+ * struct __cdf_dma_map - dma map
+ * @mapped: dma is mapped or not
+ * @nsegs: Number of segments
+ * @coherent: Coherent
+ * @seg: Segment array
+ */
+struct __cdf_dma_map {
+	uint32_t mapped;
+	uint32_t nsegs;
+	uint32_t coherent;
+	__cdf_segment_t seg[__CDF_OS_MAX_SCATTER];
+};
+typedef struct __cdf_dma_map *__cdf_dma_map_t;
+typedef uint32_t ath_dma_addr_t;
+
+#define __cdf_print               printk
+#define __cdf_vprint              vprintk
+#define __cdf_snprint             snprintf
+#define __cdf_vsnprint            vsnprintf
+
+#define __CDF_DMA_BIDIRECTIONAL  DMA_BIDIRECTIONAL
+#define __CDF_DMA_TO_DEVICE      DMA_TO_DEVICE
+#define __CDF_DMA_FROM_DEVICE    DMA_FROM_DEVICE
+#define __cdf_inline             inline
+
+/*
+ * 1. GNU C/C++ Compiler
+ *
+ * How to detect gcc : __GNUC__
+ * How to detect gcc version :
+ *   major version : __GNUC__ (2 = 2.x, 3 = 3.x, 4 = 4.x)
+ *   minor version : __GNUC_MINOR__
+ *
+ * 2. Microsoft C/C++ Compiler
+ *
+ * How to detect msc : _MSC_VER
+ * How to detect msc version :
+ *   _MSC_VER (1200 = MSVC 6.0, 1300 = MSVC 7.0, ...)
+ *
+ */
+
+/* MACROs to help with compiler and OS specifics. May need to get a little
+ * more sophisticated than this and define these to specific 'VERSIONS' of
+ * the compiler and OS.  Until we have a need for that, lets go with this
+ */
+#if defined(_MSC_VER)
+
+#define CDF_COMPILER_MSC
+/* assuming that if we build with MSC, OS is WinMobile */
+#define CDF_OS_WINMOBILE
+
+#elif defined(__GNUC__)
+
+#define CDF_COMPILER_GNUC
+#define CDF_OS_LINUX /* assuming if building with GNUC, OS is Linux */
+
+#endif
+
+#if defined(CDF_COMPILER_MSC)
+
+#define CDF_INLINE_FN  __inline
+
+/* Does nothing on Windows.  packing individual structs is not
+ * supported on the Windows compiler
+ */
+#define CDF_PACK_STRUCT_1
+#define CDF_PACK_STRUCT_2
+#define CDF_PACK_STRUCT_4
+#define CDF_PACK_STRUCT_8
+#define CDF_PACK_STRUCT_16
+
+#elif defined(CDF_COMPILER_GNUC)
+
+#define CDF_INLINE_FN  static inline
+
+#else
+#error "Compiling with an unknown compiler!!"
+#endif
+
+#endif /* __I_CDF_TYPES_H */

+ 107 - 0
core/cdf/src/i_cdf_util.h

@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _I_CDF_UTIL_H
+#define _I_CDF_UTIL_H
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <errno.h>
+
+#include <linux/random.h>
+
+#include <cdf_types.h>
+#include <cdf_status.h>
+#include <asm/byteorder.h>
+/*
+ * Generic compiler-dependent macros if defined by the OS
+ */
+
+#define __cdf_unlikely(_expr)   unlikely(_expr)
+#define __cdf_likely(_expr)     likely(_expr)
+
+/**
+ * cdf_status_to_os_return(): translates cdf_status types to linux return types
+ * @status: status to translate
+ *
+ * Translates error types that linux may want to handle specially.
+ *
+ * return: 0 or the linux error code that most closely matches the CDF_STATUS.
+ *	defaults to -1 (EPERM)
+ */
+static inline int __cdf_status_to_os_return(CDF_STATUS status)
+{
+	switch (status) {
+	case CDF_STATUS_SUCCESS:
+		return 0;
+	case CDF_STATUS_E_NULL_VALUE:
+	case CDF_STATUS_E_FAULT:
+		return -EFAULT;
+	case CDF_STATUS_E_TIMEOUT:
+	case CDF_STATUS_E_BUSY:
+		return -EBUSY;
+	case CDF_STATUS_NOT_INITIALIZED:
+	case CDF_STATUS_E_AGAIN:
+		return -EAGAIN;
+	case CDF_STATUS_E_NOSUPPORT:
+		return -ENOSYS;
+	case CDF_STATUS_E_ALREADY:
+		return -EALREADY;
+	case CDF_STATUS_E_NOMEM:
+		return -ENOMEM;
+	default:
+		return -EPERM;
+	}
+}
+
+
+/**
+ * @brief memory barriers.
+ */
+
+#define __cdf_min(_a, _b)         ((_a) < (_b) ? _a : _b)
+#define __cdf_max(_a, _b)         ((_a) > (_b) ? _a : _b)
+
+/**
+ * @brief Assert
+ */
+#define __cdf_assert(expr)  do {    \
+		if (unlikely(!(expr))) {				\
+			pr_err("Assertion failed! %s:%s %s:%d\n",   \
+			       # expr, __func__, __FILE__, __LINE__);      \
+			dump_stack();					   \
+			panic("Take care of the assert first\n");	   \
+		}     \
+} while (0)
+
+#define __cdf_os_cpu_to_le64                cpu_to_le64
+#define __cdf_le16_to_cpu                le16_to_cpu
+#define __cdf_le32_to_cpu                le32_to_cpu
+#define __cdf_container_of(ptr, type, member) container_of(ptr, type, member)
+
+#endif /*_I_CDF_UTIL_H*/

+ 124 - 0
core/cds/inc/cds_api.h

@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+#if !defined(__CDS_API_H)
+#define __CDS_API_H
+
+/**
+ * DOC:  cds_api.h
+ *
+ * Connectivity driver services public API
+ *
+ */
+
+#include <cdf_types.h>
+#include <cdf_status.h>
+#include <cdf_memory.h>
+#include <cdf_list.h>
+#include <cds_get_bin.h>
+#include <cdf_trace.h>
+#include <cdf_event.h>
+#include <cdf_lock.h>
+#include <cds_reg_service.h>
+#include <cds_mq.h>
+#include <cds_packet.h>
+#include <cds_sched.h>
+#include <cdf_threads.h>
+#include <cdf_mc_timer.h>
+#include <cds_pack_align.h>
+
+/* Amount of time to wait for WMA to perform an asynchronous activity.
+ * This value should be larger than the timeout used by WMI to wait for
+ * a response from target
+ */
+#define CDS_WMA_TIMEOUT  (15000)
+
+CDF_STATUS cds_alloc_global_context(v_CONTEXT_t *p_cds_context);
+
+CDF_STATUS cds_free_global_context(v_CONTEXT_t *p_cds_context);
+
+CDF_STATUS cds_pre_enable(v_CONTEXT_t cds_context);
+
+CDF_STATUS cds_open(v_CONTEXT_t *p_cds_context, uint32_t hddContextSize);
+
+CDF_STATUS cds_enable(v_CONTEXT_t cds_context);
+
+CDF_STATUS cds_disable(v_CONTEXT_t cds_context);
+
+CDF_STATUS cds_close(v_CONTEXT_t cds_context);
+
+CDF_STATUS cds_shutdown(v_CONTEXT_t cds_context);
+
+void cds_core_return_msg(void *pVContext, p_cds_msg_wrapper pMsgWrapper);
+
+void *cds_get_context(CDF_MODULE_ID moduleId);
+
+v_CONTEXT_t cds_get_global_context(void);
+
+uint8_t cds_is_logp_in_progress(void);
+void cds_set_logp_in_progress(uint8_t value);
+
+uint8_t cds_is_load_unload_in_progress(void);
+void cds_set_load_unload_in_progress(uint8_t value);
+
+CDF_STATUS cds_alloc_context(void *p_cds_context, CDF_MODULE_ID moduleID,
+			     void **ppModuleContext, uint32_t size);
+
+CDF_STATUS cds_free_context(void *p_cds_context, CDF_MODULE_ID moduleID,
+			    void *pModuleContext);
+
+CDF_STATUS cds_get_vdev_types(tCDF_CON_MODE mode, uint32_t *type,
+			      uint32_t *subType);
+
+void cds_flush_work(void *work);
+void cds_flush_delayed_work(void *dwork);
+
+bool cds_is_packet_log_enabled(void);
+
+uint64_t cds_get_monotonic_boottime(void);
+
+void cds_trigger_recovery(void);
+
+void cds_set_wakelock_logging(bool value);
+bool cds_is_wakelock_enabled(void);
+void cds_set_ring_log_level(uint32_t ring_id, uint32_t log_level);
+enum wifi_driver_log_level cds_get_ring_log_level(uint32_t ring_id);
+void cds_set_multicast_logging(uint8_t value);
+uint8_t cds_is_multicast_logging(void);
+CDF_STATUS cds_set_log_completion(uint32_t is_fatal,
+		uint32_t type,
+		uint32_t sub_type);
+void cds_get_log_completion(uint32_t *is_fatal,
+		uint32_t *type,
+		uint32_t *sub_type);
+bool cds_is_log_report_in_progress(void);
+void cds_init_log_completion(void);
+void cds_deinit_log_completion(void);
+CDF_STATUS cds_flush_logs(uint32_t is_fatal,
+		uint32_t indicator,
+		uint32_t reason_code);
+void cds_logging_set_fw_flush_complete(void);
+#endif /* if !defined __CDS_API_H */

+ 732 - 0
core/cds/inc/cds_concurrency.h

@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef __CDS_CONCURRENCY_H
+#define __CDS_CONCURRENCY_H
+
+/**
+ * DOC: cds_concurrency.h
+ *
+ * CDS Concurrenct Connection Management entity
+ */
+
+/* Include files */
+
+#include "wlan_hdd_main.h"
+
+#define MAX_NUMBER_OF_CONC_CONNECTIONS 3
+#define MAX_NUM_CHAN    128
+#define DBS_OPPORTUNISTIC_TIME    10
+
+/**
+ * enum cds_chain_mode - Chain Mask tx & rx combination.
+ *
+ * @CDS_ONE_ONE: One for Tx, One for Rx
+ * @CDS_TWO_TWO: Two for Tx, Two for Rx
+ * @CDS_MAX_NO_OF_CHAIN_MODE: Max place holder
+ *
+ * These are generic IDs that identify the various roles
+ * in the software system
+ */
+enum cds_chain_mode {
+	CDS_ONE_ONE = 0,
+	CDS_TWO_TWO,
+	CDS_MAX_NO_OF_CHAIN_MODE
+};
+
+/**
+ * enum cds_conc_priority_mode - t/p, powersave, latency.
+ *
+ * @CDS_THROUGHPUT: t/p is the priority
+ * @CDS_POWERSAVE: powersave is the priority
+ * @CDS_LATENCY: latency is the priority
+ * @CDS_MAX_CONC_PRIORITY_MODE: Max place holder
+ *
+ * These are generic IDs that identify the various roles
+ * in the software system
+ */
+enum cds_conc_priority_mode {
+	CDS_THROUGHPUT = 0,
+	CDS_POWERSAVE,
+	CDS_LATENCY,
+	CDS_MAX_CONC_PRIORITY_MODE
+};
+
+/**
+ * enum cds_con_mode - concurrency mode for PCL table
+ *
+ * @CDS_STA_MODE: station mode
+ * @CDS_SAP_MODE: SAP mode
+ * @CDS_P2P_CLIENT_MODE: P2P client mode
+ * @CDS_P2P_GO_MODE: P2P Go mode
+ * @CDS_IBSS_MODE: IBSS mode
+ * @CDS_MAX_NUM_OF_MODE: max value place holder
+ */
+enum cds_con_mode {
+	CDS_STA_MODE = 0,
+	CDS_SAP_MODE,
+	CDS_P2P_CLIENT_MODE,
+	CDS_P2P_GO_MODE,
+	CDS_IBSS_MODE,
+	CDS_MAX_NUM_OF_MODE
+};
+
+/**
+ * enum cds_pcl_type - Various types of Preferred channel list (PCL).
+ *
+ * @CDS_NONE: No channel preference
+ * @CDS_24G: 2.4 Ghz channels only
+ * @CDS_5G: 5 Ghz channels only
+ * @CDS_SCC_CH: SCC channel only
+ * @CDS_MCC_CH: MCC channels only
+ * @CDS_SCC_CH_24G: SCC channel & 2.4 Ghz channels
+ * @CDS_SCC_CH_5G: SCC channel & 5 Ghz channels
+ * @CDS_24G_SCC_CH: 2.4 Ghz channels & SCC channel
+ * @CDS_5G_SCC_CH: 5 Ghz channels & SCC channel
+ * @CDS_SCC_ON_5_SCC_ON_24_24G: SCC channel on 5 Ghz, SCC
+ *	channel on 2.4 Ghz & 2.4 Ghz channels
+ * @CDS_SCC_ON_5_SCC_ON_24_5G: SCC channel on 5 Ghz, SCC channel
+ *	on 2.4 Ghz & 5 Ghz channels
+ * @CDS_SCC_ON_24_SCC_ON_5_24G: SCC channel on 2.4 Ghz, SCC
+ *	channel on 5 Ghz & 2.4 Ghz channels
+ * @CDS_SCC_ON_24_SCC_ON_5_5G: SCC channel on 2.4 Ghz, SCC
+ *	channel on 5 Ghz & 5 Ghz channels
+ * @CDS_SCC_ON_5_SCC_ON_24: SCC channel on 5 Ghz, SCC channel on
+ *	2.4 Ghz
+ * @CDS_SCC_ON_24_SCC_ON_5: SCC channel on 2.4 Ghz, SCC channel
+ *	on 5 Ghz
+ * @CDS_MCC_CH_24G: MCC channels & 2.4 Ghz channels
+ * @CDS_MCC_CH_5G:  MCC channels & 5 Ghz channels
+ * @CDS_24G_MCC_CH: 2.4 Ghz channels & MCC channels
+ * @CDS_5G_MCC_CH: 5 Ghz channels & MCC channels
+ * @CDS_MAX_PCL_TYPE: Max place holder
+ *
+ * These are generic IDs that identify the various roles
+ * in the software system
+ */
+enum cds_pcl_type {
+	CDS_NONE = 0,
+	CDS_24G,
+	CDS_5G,
+	CDS_SCC_CH,
+	CDS_MCC_CH,
+	CDS_SCC_CH_24G,
+	CDS_SCC_CH_5G,
+	CDS_24G_SCC_CH,
+	CDS_5G_SCC_CH,
+	CDS_SCC_ON_5_SCC_ON_24_24G,
+	CDS_SCC_ON_5_SCC_ON_24_5G,
+	CDS_SCC_ON_24_SCC_ON_5_24G,
+	CDS_SCC_ON_24_SCC_ON_5_5G,
+	CDS_SCC_ON_5_SCC_ON_24,
+	CDS_SCC_ON_24_SCC_ON_5,
+	CDS_MCC_CH_24G,
+	CDS_MCC_CH_5G,
+	CDS_24G_MCC_CH,
+	CDS_5G_MCC_CH,
+
+	CDS_MAX_PCL_TYPE
+};
+
+/**
+ * enum cds_one_connection_mode - Combination of first connection
+ * type, band & spatial stream used.
+ *
+ * @CDS_STA_24_1x1: STA connection using [email protected] Ghz
+ * @CDS_STA_24_2x2: STA connection using [email protected] Ghz
+ * @CDS_STA_5_1x1: STA connection using 1x1@5 Ghz
+ * @CDS_STA_5_2x2: STA connection using 2x2@5 Ghz
+ * @CDS_P2P_CLI_24_1x1: P2P Client connection using [email protected] Ghz
+ * @CDS_P2P_CLI_24_2x2: P2P Client connection using [email protected] Ghz
+ * @CDS_P2P_CLI_5_1x1: P2P Client connection using 1x1@5 Ghz
+ * @CDS_P2P_CLI_5_2x2: P2P Client connection using 2x2@5 Ghz
+ * @CDS_P2P_GO_24_1x1: P2P GO connection using [email protected] Ghz
+ * @CDS_P2P_GO_24_2x2: P2P GO connection using [email protected] Ghz
+ * @CDS_P2P_GO_5_1x1: P2P GO connection using 1x1@5 Ghz
+ * @CDS_P2P_GO_5_2x2: P2P GO connection using 2x2@5 Ghz
+ * @CDS_SAP_24_1x1: SAP connection using [email protected] Ghz
+ * @CDS_SAP_24_2x2: SAP connection using [email protected] Ghz
+ * @CDS_SAP_5_1x1: SAP connection using 1x1@5 Ghz
+ * @CDS_SAP_5_1x1: SAP connection using 2x2@5 Ghz
+ * @CDS_IBSS_24_1x1:  IBSS connection using [email protected] Ghz
+ * @CDS_IBSS_24_2x2:  IBSS connection using [email protected] Ghz
+ * @CDS_IBSS_5_1x1:  IBSS connection using 1x1@5 Ghz
+ * @CDS_IBSS_5_2x2:  IBSS connection using 2x2@5 Ghz
+ * @CDS_MAX_ONE_CONNECTION_MODE: Max place holder
+ *
+ * These are generic IDs that identify the various roles
+ * in the software system
+ */
+enum cds_one_connection_mode {
+	CDS_STA_24_1x1 = 0,
+	CDS_STA_24_2x2,
+	CDS_STA_5_1x1,
+	CDS_STA_5_2x2,
+	CDS_P2P_CLI_24_1x1,
+	CDS_P2P_CLI_24_2x2,
+	CDS_P2P_CLI_5_1x1,
+	CDS_P2P_CLI_5_2x2,
+	CDS_P2P_GO_24_1x1,
+	CDS_P2P_GO_24_2x2,
+	CDS_P2P_GO_5_1x1,
+	CDS_P2P_GO_5_2x2,
+	CDS_SAP_24_1x1,
+	CDS_SAP_24_2x2,
+	CDS_SAP_5_1x1,
+	CDS_SAP_5_2x2,
+	CDS_IBSS_24_1x1,
+	CDS_IBSS_24_2x2,
+	CDS_IBSS_5_1x1,
+	CDS_IBSS_5_2x2,
+
+	CDS_MAX_ONE_CONNECTION_MODE
+};
+
+/**
+ * enum cds_two_connection_mode - Combination of first two
+ * connections type, concurrency state, band & spatial stream
+ * used.
+ *
+ * @CDS_STA_SAP_SCC_24_1x1: STA & SAP connection on SCC using
+ *			[email protected] Ghz
+ * @CDS_STA_SAP_SCC_24_2x2: STA & SAP connection on SCC using
+ *			[email protected] Ghz
+ * @CDS_STA_SAP_MCC_24_1x1: STA & SAP connection on MCC using
+ *			[email protected] Ghz
+ * @CDS_STA_SAP_MCC_24_2x2: STA & SAP connection on MCC using
+ *			[email protected] Ghz
+ * @CDS_STA_SAP_SCC_5_1x1: STA & SAP connection on SCC using
+ *			1x1@5 Ghz
+ * @CDS_STA_SAP_SCC_5_2x2: STA & SAP connection on SCC using
+ *			2x2@5 Ghz
+ * @CDS_STA_SAP_MCC_5_1x1: STA & SAP connection on MCC using
+ *			1x1@5 Ghz
+ * @CDS_STA_SAP_MCC_5_2x2: STA & SAP connection on MCC using
+ *			2x2@5 Ghz
+ * @CDS_STA_SAP_DBS_1x1,: STA & SAP connection on DBS using 1x1
+ * @CDS_STA_P2P_GO_SCC_24_1x1: STA & P2P GO connection on SCC
+ *			using [email protected] Ghz
+ * @CDS_STA_P2P_GO_SCC_24_2x2: STA & P2P GO connection on SCC
+ *			using [email protected] Ghz
+ * @CDS_STA_P2P_GO_MCC_24_1x1: STA & P2P GO connection on MCC
+ *			using [email protected] Ghz
+ * @CDS_STA_P2P_GO_MCC_24_2x2: STA & P2P GO connection on MCC
+ *			using [email protected] Ghz
+ * @CDS_STA_P2P_GO_SCC_5_1x1: STA & P2P GO connection on SCC
+ *			using 1x1@5 Ghz
+ * @CDS_STA_P2P_GO_SCC_5_2x2: STA & P2P GO connection on SCC
+ *			using 2x2@5 Ghz
+ * @CDS_STA_P2P_GO_MCC_5_1x1: STA & P2P GO connection on MCC
+ *			using 1x1@5 Ghz
+ * @CDS_STA_P2P_GO_MCC_5_2x2: STA & P2P GO connection on MCC
+ *			using 2x2@5 Ghz
+ * @CDS_STA_P2P_GO_DBS_1x1: STA & P2P GO connection on DBS using
+ *			1x1
+ * @CDS_STA_P2P_CLI_SCC_24_1x1: STA & P2P CLI connection on SCC
+ *			using [email protected] Ghz
+ * @CDS_STA_P2P_CLI_SCC_24_2x2: STA & P2P CLI connection on SCC
+ *			using [email protected] Ghz
+ * @CDS_STA_P2P_CLI_MCC_24_1x1: STA & P2P CLI connection on MCC
+ *			using [email protected] Ghz
+ * @CDS_STA_P2P_CLI_MCC_24_2x2: STA & P2P CLI connection on MCC
+ *			using [email protected] Ghz
+ * @CDS_STA_P2P_CLI_SCC_5_1x1: STA & P2P CLI connection on SCC
+ *			using 1x1@5 Ghz
+ * @CDS_STA_P2P_CLI_SCC_5_2x2: STA & P2P CLI connection on SCC
+ *			using 2x2@5 Ghz
+ * @CDS_STA_P2P_CLI_MCC_5_1x1: STA & P2P CLI connection on MCC
+ *			using 1x1@5 Ghz
+ * @CDS_STA_P2P_CLI_MCC_5_2x2: STA & P2P CLI connection on MCC
+ *			using 2x2@5 Ghz
+ * @CDS_STA_P2P_CLI_DBS_1x1: STA & P2P CLI connection on DBS
+ *			using 1x1
+ * @CDS_P2P_GO_P2P_CLI_SCC_24_1x1: P2P GO & CLI connection on
+ *			SCC using [email protected] Ghz
+ * @CDS_P2P_GO_P2P_CLI_SCC_24_2x2: P2P GO & CLI connection on
+ *			SCC using [email protected] Ghz
+ * @CDS_P2P_GO_P2P_CLI_MCC_24_1x1: P2P GO & CLI connection on
+ *			MCC using [email protected] Ghz
+ * @CDS_P2P_GO_P2P_CLI_MCC_24_2x2: P2P GO & CLI connection on
+ *			MCC using [email protected] Ghz
+ * @CDS_P2P_GO_P2P_CLI_SCC_5_1x1: P2P GO & CLI connection on
+ *			SCC using 1x1@5 Ghz
+ * @CDS_P2P_GO_P2P_CLI_SCC_5_2x2: P2P GO & CLI connection on
+ *			SCC using 2x2@5 Ghz
+ * @CDS_P2P_GO_P2P_CLI_MCC_5_1x1: P2P GO & CLI connection on
+ *			MCC using 1x1@5 Ghz
+ * @CDS_P2P_GO_P2P_CLI_MCC_5_2x2: P2P GO & CLI connection on
+ *			MCC using 2x2@5 Ghz
+ * @CDS_P2P_GO_P2P_CLI_DBS_1x1: P2P GO & CLI connection on DBS
+ *			using 1x1
+ * @CDS_P2P_GO_SAP_SCC_24_1x1: P2P GO & SAP connection on
+ *			SCC using [email protected] Ghz
+ * @CDS_P2P_GO_SAP_SCC_24_2x2: P2P GO & SAP connection on
+ *			SCC using [email protected] Ghz
+ * @CDS_P2P_GO_SAP_MCC_24_1x1: P2P GO & SAP connection on
+ *			MCC using [email protected] Ghz
+ * @CDS_P2P_GO_SAP_MCC_24_2x2: P2P GO & SAP connection on
+ *			MCC using [email protected] Ghz
+ * @CDS_P2P_GO_SAP_SCC_5_1x1: P2P GO & SAP connection on
+ *			SCC using 1x1@5 Ghz
+ * @CDS_P2P_GO_SAP_SCC_5_2x2: P2P GO & SAP connection on
+ *			SCC using 2x2@5 Ghz
+ * @CDS_P2P_GO_SAP_MCC_5_1x1: P2P GO & SAP connection on
+ *			MCC using 1x1@5 Ghz
+ * @CDS_P2P_GO_SAP_MCC_5_2x2: P2P GO & SAP connection on
+ *			MCC using 2x2@5 Ghz
+ * @CDS_P2P_GO_SAP_DBS_1x1: P2P GO & SAP connection on DBS using
+ *			1x1
+ * @CDS_MAX_TWO_CONNECTION_MODE: Max place holder
+ *
+ * These are generic IDs that identify the various roles
+ * in the software system
+ */
+enum cds_two_connection_mode {
+	CDS_STA_SAP_SCC_24_1x1 = 0,
+	CDS_STA_SAP_SCC_24_2x2,
+	CDS_STA_SAP_MCC_24_1x1,
+	CDS_STA_SAP_MCC_24_2x2,
+	CDS_STA_SAP_SCC_5_1x1,
+	CDS_STA_SAP_SCC_5_2x2,
+	CDS_STA_SAP_MCC_5_1x1,
+	CDS_STA_SAP_MCC_5_2x2,
+	CDS_STA_SAP_MCC_24_5_1x1,
+	CDS_STA_SAP_MCC_24_5_2x2,
+	CDS_STA_SAP_DBS_1x1,
+	CDS_STA_P2P_GO_SCC_24_1x1,
+	CDS_STA_P2P_GO_SCC_24_2x2,
+	CDS_STA_P2P_GO_MCC_24_1x1,
+	CDS_STA_P2P_GO_MCC_24_2x2,
+	CDS_STA_P2P_GO_SCC_5_1x1,
+	CDS_STA_P2P_GO_SCC_5_2x2,
+	CDS_STA_P2P_GO_MCC_5_1x1,
+	CDS_STA_P2P_GO_MCC_5_2x2,
+	CDS_STA_P2P_GO_MCC_24_5_1x1,
+	CDS_STA_P2P_GO_MCC_24_5_2x2,
+	CDS_STA_P2P_GO_DBS_1x1,
+	CDS_STA_P2P_CLI_SCC_24_1x1,
+	CDS_STA_P2P_CLI_SCC_24_2x2,
+	CDS_STA_P2P_CLI_MCC_24_1x1,
+	CDS_STA_P2P_CLI_MCC_24_2x2,
+	CDS_STA_P2P_CLI_SCC_5_1x1,
+	CDS_STA_P2P_CLI_SCC_5_2x2,
+	CDS_STA_P2P_CLI_MCC_5_1x1,
+	CDS_STA_P2P_CLI_MCC_5_2x2,
+	CDS_STA_P2P_CLI_MCC_24_5_1x1,
+	CDS_STA_P2P_CLI_MCC_24_5_2x2,
+	CDS_STA_P2P_CLI_DBS_1x1,
+	CDS_P2P_GO_P2P_CLI_SCC_24_1x1,
+	CDS_P2P_GO_P2P_CLI_SCC_24_2x2,
+	CDS_P2P_GO_P2P_CLI_MCC_24_1x1,
+	CDS_P2P_GO_P2P_CLI_MCC_24_2x2,
+	CDS_P2P_GO_P2P_CLI_SCC_5_1x1,
+	CDS_P2P_GO_P2P_CLI_SCC_5_2x2,
+	CDS_P2P_GO_P2P_CLI_MCC_5_1x1,
+	CDS_P2P_GO_P2P_CLI_MCC_5_2x2,
+	CDS_P2P_GO_P2P_CLI_MCC_24_5_1x1,
+	CDS_P2P_GO_P2P_CLI_MCC_24_5_2x2,
+	CDS_P2P_GO_P2P_CLI_DBS_1x1,
+	CDS_P2P_GO_SAP_SCC_24_1x1,
+	CDS_P2P_GO_SAP_SCC_24_2x2,
+	CDS_P2P_GO_SAP_MCC_24_1x1,
+	CDS_P2P_GO_SAP_MCC_24_2x2,
+	CDS_P2P_GO_SAP_SCC_5_1x1,
+	CDS_P2P_GO_SAP_SCC_5_2x2,
+	CDS_P2P_GO_SAP_MCC_5_1x1,
+	CDS_P2P_GO_SAP_MCC_5_2x2,
+	CDS_P2P_GO_SAP_MCC_24_5_1x1,
+	CDS_P2P_GO_SAP_MCC_24_5_2x2,
+	CDS_P2P_GO_SAP_DBS_1x1,
+
+	CDS_MAX_TWO_CONNECTION_MODE
+};
+
+/**
+ * enum cds_conc_next_action - actions to be taken on old
+ * connections.
+ *
+ * @CDS_NOP: No action
+ * @CDS_DBS: switch to DBS mode
+ * @CDS_DBS_DOWNGRADE: switch to DBS mode & downgrade to 1x1
+ * @CDS_MCC: switch to MCC/SCC mode
+ * @CDS_MCC_UPGRADE: switch to MCC/SCC mode & upgrade to 2x2
+ * @CDS_MAX_CONC_PRIORITY_MODE: Max place holder
+ *
+ * These are generic IDs that identify the various roles
+ * in the software system
+ */
+enum cds_conc_next_action {
+	CDS_NOP = 0,
+	CDS_DBS,
+	CDS_DBS_DOWNGRADE,
+	CDS_MCC,
+	CDS_MCC_UPGRADE,
+	CDS_MAX_CONC_NEXT_ACTION
+};
+
+/**
+ * enum cds_band - wifi band.
+ *
+ * @CDS_BAND_24: 2.4 Ghz band
+ * @CDS_BAND_5: 5 Ghz band
+ * @CDS_MAX_BAND: Max place holder
+ *
+ * These are generic IDs that identify the various roles
+ * in the software system
+ */
+enum cds_band {
+	CDS_BAND_24 = 0,
+	CDS_BAND_5,
+	CDS_MAX_BAND
+};
+
+/**
+ * struct cds_conc_connection_info - information of all existing
+ * connections in the wlan system
+ *
+ * @mode: connection type
+ * @chan: channel of the connection
+ * @mac: The HW mac it is running
+ * @tx_spatial_stream: Tx spatial stream used by the connection
+ * @rx_spatial_stream: Tx spatial stream used by the connection
+ * @original_nss: nss negotiated at connection time
+ * @vdev_id: vdev id of the connection
+ * @in_use: if the table entry is active
+ */
+struct cds_conc_connection_info {
+	enum cds_con_mode mode;
+	uint8_t       chan;
+	uint8_t       mac;
+	enum cds_chain_mode chain_mask;
+	uint8_t       tx_spatial_stream;
+	uint8_t       rx_spatial_stream;
+	uint32_t      original_nss;
+	uint32_t      vdev_id;
+	bool          in_use;
+};
+
+bool cds_is_connection_in_progress(hdd_context_t *hdd_ctx);
+void cds_dump_concurrency_info(hdd_context_t *pHddCtx);
+void cds_set_concurrency_mode(hdd_context_t *pHddCtx, tCDF_CON_MODE mode);
+void cds_clear_concurrency_mode(hdd_context_t *pHddCtx,
+				     tCDF_CON_MODE mode);
+uint32_t cds_get_connection_count(hdd_context_t *hdd_ctx);
+/**
+ * cds_is_sta_connection_pending() - This function will check if sta connection
+ *                                   is pending or not.
+ * @hdd_ctx: pointer to hdd context
+ *
+ * This function will return the status of flag is_sta_connection_pending
+ *
+ * Return: true or false
+ */
+static inline bool
+cds_is_sta_connection_pending(hdd_context_t *hdd_ctx)
+{
+	bool status;
+	spin_lock(&hdd_ctx->sta_update_info_lock);
+	status = hdd_ctx->is_sta_connection_pending;
+	spin_unlock(&hdd_ctx->sta_update_info_lock);
+	return status;
+}
+
+/**
+ * cds_change_sta_conn_pending_status() - This function will change the value
+ *                                        of is_sta_connection_pending
+ * @hdd_ctx: pointer to hdd context
+ * @value: value to set
+ *
+ * This function will change the value of is_sta_connection_pending
+ *
+ * Return: none
+ */
+static inline void
+cds_change_sta_conn_pending_status(hdd_context_t *hdd_ctx,
+		bool value)
+{
+	spin_lock(&hdd_ctx->sta_update_info_lock);
+	hdd_ctx->is_sta_connection_pending = value;
+	spin_unlock(&hdd_ctx->sta_update_info_lock);
+}
+
+/**
+ * cds_is_sap_restart_required() - This function will check if sap restart
+ *                                 is pending or not.
+ * @hdd_ctx: pointer to hdd context.
+ *
+ * This function will return the status of flag is_sap_restart_required.
+ *
+ * Return: true or false
+ */
+static inline bool
+cds_is_sap_restart_required(hdd_context_t *hdd_ctx)
+{
+	bool status;
+	spin_lock(&hdd_ctx->sap_update_info_lock);
+	status = hdd_ctx->is_sap_restart_required;
+	spin_unlock(&hdd_ctx->sap_update_info_lock);
+	return status;
+}
+
+/**
+ * cds_change_sap_restart_required_status() - This function will change the
+ *                                            value of is_sap_restart_required
+ * @hdd_ctx: pointer to hdd context
+ * @value: value to set
+ *
+ * This function will change the value of is_sap_restart_required
+ *
+ * Return: none
+ */
+static inline void
+cds_change_sap_restart_required_status(hdd_context_t *hdd_ctx,
+		bool value)
+{
+	spin_lock(&hdd_ctx->sap_update_info_lock);
+	hdd_ctx->is_sap_restart_required = value;
+	spin_unlock(&hdd_ctx->sap_update_info_lock);
+}
+
+/**
+ * cds_set_connection_in_progress() - to set the connection in progress flag
+ * @hdd_ctx: pointer to hdd context
+ * @value: value to set
+ *
+ * This function will set the passed value to connection in progress flag.
+ * If value is previously being set to true then no need to set it again.
+ *
+ * Return: true if value is being set correctly and false otherwise.
+ */
+static inline bool
+cds_set_connection_in_progress(hdd_context_t *hdd_ctx,
+		bool value)
+{
+	bool status = true;
+	spin_lock(&hdd_ctx->connection_status_lock);
+	/*
+	 * if the value is set to true previously and if someone is
+	 * trying to make it true again then it could be some race
+	 * condition being triggered. Avoid this situation by returning
+	 * false
+	 */
+	if (hdd_ctx->connection_in_progress && value)
+		status = false;
+	else
+		hdd_ctx->connection_in_progress = value;
+	spin_unlock(&hdd_ctx->connection_status_lock);
+	return status;
+}
+
+
+int cds_cfg80211_get_concurrency_matrix(struct wiphy *wiphy,
+			struct wireless_dev *wdev,
+			const void *data,
+			int data_len);
+uint32_t cds_get_concurrency_mode(void);
+CDF_STATUS cds_check_and_restart_sap(hdd_context_t *hdd_ctx,
+		eCsrRoamResult roam_result,
+		hdd_station_ctx_t *hdd_sta_ctx);
+void cds_handle_conc_rule1(hdd_context_t *hdd_ctx,
+		hdd_adapter_t *adapter,
+		tCsrRoamProfile *roam_profile);
+#ifdef FEATURE_WLAN_CH_AVOID
+bool cds_handle_conc_rule2(hdd_context_t *hdd_ctx,
+		hdd_adapter_t *adapter,
+		tCsrRoamProfile *roam_profile,
+		uint32_t *roam_id);
+#else
+static inline bool cds_handle_conc_rule2(hdd_context_t *hdd_ctx,
+		hdd_adapter_t *adapter,
+		tCsrRoamProfile *roam_profile,
+		uint32_t *roam_id)
+{
+		return true;
+}
+#endif /* FEATURE_WLAN_CH_AVOID */
+bool cds_handle_conc_multiport(uint8_t session_id, uint8_t channel);
+
+#ifdef FEATURE_WLAN_FORCE_SAP_SCC
+void cds_force_sap_on_scc(hdd_context_t *hdd_ctx, eCsrRoamResult roam_result);
+#else
+static inline void cds_force_sap_on_scc(hdd_context_t *hdd_ctx,
+		eCsrRoamResult roam_result)
+{
+
+}
+#endif /* FEATURE_WLAN_FORCE_SAP_SCC */
+
+#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
+void cds_check_concurrent_intf_and_restart_sap(
+		hdd_context_t *hdd_ctx,
+		hdd_station_ctx_t *hdd_sta_ctx,
+		hdd_adapter_t *adapter);
+#else
+static inline void cds_check_concurrent_intf_and_restart_sap(
+		hdd_context_t *hdd_ctx,
+		hdd_station_ctx_t *hdd_sta_ctx,
+		hdd_adapter_t *adapter)
+{
+
+}
+#endif /* FEATURE_WLAN_MCC_TO_SCC_SWITCH */
+uint8_t cds_is_mcc_in_24G(hdd_context_t *hdd_ctx);
+int32_t cds_set_mas(hdd_adapter_t *adapter, uint8_t mas_value);
+int cds_set_mcc_p2p_quota(hdd_adapter_t *hostapd_adapter,
+		uint32_t set_value);
+CDF_STATUS cds_change_mcc_go_beacon_interval(hdd_adapter_t *pHostapdAdapter);
+int cds_go_set_mcc_p2p_quota(hdd_adapter_t *hostapd_adapter,
+		uint32_t set_value);
+void cds_set_mcc_latency(hdd_adapter_t *adapter, int set_value);
+#if defined(FEATURE_WLAN_MCC_TO_SCC_SWITCH) || \
+		defined(FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE)
+void cds_restart_sap(hdd_adapter_t *ap_adapter);
+#else
+static inline void cds_restart_sap(hdd_adapter_t *ap_adapter)
+{
+
+}
+#endif /* FEATURE_WLAN_MCC_TO_SCC_SWITCH ||
+	* FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE
+	*/
+
+#ifdef FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE
+void cds_check_and_restart_sap_with_non_dfs_acs(hdd_context_t *hdd_ctx);
+#else
+static inline void cds_check_and_restart_sap_with_non_dfs_acs(
+							hdd_context_t *hdd_ctx)
+{
+
+}
+#endif /* FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE */
+void cds_incr_active_session(hdd_context_t *pHddCtx, tCDF_CON_MODE mode,
+				uint8_t sessionId);
+void cds_decr_active_session(hdd_context_t *pHddCtx, tCDF_CON_MODE mode,
+				uint8_t sessionId);
+void cds_decr_session_set_pcl(hdd_context_t *hdd_ctx,
+		tCDF_CON_MODE mode,
+		uint8_t session_id);
+CDF_STATUS cds_init_policy_mgr(hdd_context_t *hdd_ctx);
+CDF_STATUS cds_get_pcl(hdd_context_t *hdd_ctx, enum cds_con_mode mode,
+				uint8_t *pcl_Channels, uint32_t *len);
+bool cds_allow_concurrency(hdd_context_t *hdd_ctx, enum cds_con_mode mode,
+				uint8_t channel, enum hw_mode_bandwidth bw);
+enum cds_conc_priority_mode cds_get_first_connection_pcl_table_index(
+				hdd_context_t *hdd_ctx);
+enum cds_one_connection_mode cds_get_second_connection_pcl_table_index(
+				hdd_context_t *hdd_ctx);
+enum cds_two_connection_mode cds_get_third_connection_pcl_table_index(
+				hdd_context_t *hdd_ctx);
+CDF_STATUS cds_mode_switch_dbs_to_mcc(hdd_context_t *hdd_ctx);
+CDF_STATUS cds_mode_switch_mcc_to_dbs(hdd_context_t *hdd_ctx);
+CDF_STATUS cds_incr_connection_count(hdd_context_t *hdd_ctx,
+					  uint32_t vdev_id);
+CDF_STATUS cds_update_connection_info(hdd_context_t *hdd_ctx,
+					   uint32_t vdev_id);
+CDF_STATUS cds_decr_connection_count(hdd_context_t *hdd_ctx,
+					  uint32_t vdev_id);
+CDF_STATUS cds_current_connections_update(
+				hdd_context_t *hdd_ctx, uint8_t channel);
+#ifdef MPC_UT_FRAMEWORK
+CDF_STATUS cds_incr_connection_count_utfw(hdd_context_t *hdd_ctx,
+		uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams,
+		uint32_t chain_mask, uint32_t type, uint32_t sub_type,
+		uint32_t channelid, uint32_t mac_id);
+CDF_STATUS cds_update_connection_info_utfw(hdd_context_t *hdd_ctx,
+		uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams,
+		uint32_t chain_mask, uint32_t type, uint32_t sub_type,
+		uint32_t channelid, uint32_t mac_id);
+CDF_STATUS cds_decr_connection_count_utfw(hdd_context_t *hdd_ctx,
+		uint32_t del_all, uint32_t vdev_id);
+struct cds_conc_connection_info *cds_get_conn_info(hdd_context_t *hdd_ctx,
+	uint32_t *len);
+enum cds_pcl_type get_pcl_from_first_conn_table(
+		enum cds_con_mode type,
+		enum cds_conc_priority_mode sys_pref);
+enum cds_pcl_type get_pcl_from_second_conn_table(
+	enum cds_one_connection_mode idx, enum cds_con_mode type,
+	enum cds_conc_priority_mode sys_pref, uint8_t dbs_capable);
+enum cds_pcl_type get_pcl_from_third_conn_table(
+	enum cds_two_connection_mode idx, enum cds_con_mode type,
+	enum cds_conc_priority_mode sys_pref, uint8_t dbs_capable);
+#else
+static inline CDF_STATUS cds_incr_connection_count_utfw(
+		hdd_context_t *hdd_ctx, uint32_t vdev_id,
+		uint32_t tx_streams, uint32_t rx_streams,
+		uint32_t chain_mask, uint32_t type, uint32_t sub_type,
+		uint32_t channelid, uint32_t mac_id)
+{
+	return CDF_STATUS_SUCCESS;
+}
+static inline CDF_STATUS cds_update_connection_info_utfw(
+		hdd_context_t *hdd_ctx, uint32_t vdev_id,
+		uint32_t tx_streams, uint32_t rx_streams,
+		uint32_t chain_mask, uint32_t type, uint32_t sub_type,
+		uint32_t channelid, uint32_t mac_id)
+{
+	return CDF_STATUS_SUCCESS;
+}
+static inline CDF_STATUS cds_decr_connection_count_utfw(
+		hdd_context_t *hdd_ctx,
+		uint32_t del_all, uint32_t vdev_id)
+{
+	return CDF_STATUS_SUCCESS;
+}
+static inline struct cds_conc_connection_info *cds_get_conn_info(
+		hdd_context_t *hdd_ctx, uint32_t *len)
+{
+	return NULL;
+}
+#endif
+enum cds_con_mode cds_convert_device_mode_to_hdd_type(
+				device_mode_t device_mode);
+uint32_t cds_get_connection_count(hdd_context_t *hdd_ctx);
+CDF_STATUS cds_soc_set_hw_mode(hdd_context_t *hdd_ctx,
+		enum hw_mode_ss_config mac0_ss,
+		enum hw_mode_bandwidth mac0_bw,
+		enum hw_mode_ss_config mac1_ss,
+		enum hw_mode_bandwidth mac1_bw,
+		enum hw_mode_dbs_capab dbs,
+		enum hw_mode_agile_dfs_capab dfs);
+enum cds_conc_next_action cds_need_opportunistic_upgrade(
+		hdd_context_t *hdd_ctx);
+CDF_STATUS cds_next_actions(
+		hdd_context_t *hdd_ctx, enum cds_conc_next_action action);
+void cds_set_dual_mac_scan_config(hdd_context_t *hdd_ctx,
+		uint8_t dbs_val,
+		uint8_t dbs_plus_agile_scan_val,
+		uint8_t single_mac_scan_with_dbs_val);
+void cds_set_dual_mac_fw_mode_config(hdd_context_t *hdd_ctx,
+		uint8_t dbs,
+		uint8_t dfs);
+void cds_soc_set_dual_mac_cfg_cb(enum set_hw_mode_status status,
+		uint32_t scan_config,
+		uint32_t fw_mode_config);
+bool cds_map_concurrency_mode(hdd_context_t *hdd_ctx,
+		tCDF_CON_MODE *old_mode, enum cds_con_mode *new_mode);
+CDF_STATUS cds_get_channel_from_scan_result(hdd_adapter_t *adapter,
+		tCsrRoamProfile *roam_profile, uint8_t *channel);
+#endif /* __CDS_CONCURRENCY_H */

+ 183 - 0
core/cds/inc/cds_crypto.h

@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+#if !defined(__CDS_CRYPTO_H)
+#define __CDS_CRYPTO_H
+
+/**
+ * DOC:  cds_crypto.h
+ *
+ * Crypto APIs
+ *
+ */
+
+#include <cdf_types.h>
+#include <cdf_status.h>
+#include <cdf_memory.h>
+#include <cdf_list.h>
+#include <cds_get_bin.h>
+#include <cdf_trace.h>
+#include <cdf_event.h>
+#include <cdf_lock.h>
+#include <cds_reg_service.h>
+#include <cds_mq.h>
+#include <cds_packet.h>
+#include <cds_sched.h>
+#include <cdf_threads.h>
+#include <cdf_mc_timer.h>
+#include <cds_pack_align.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#include <linux/qcomwlan_secif.h>
+#endif
+
+#ifdef CONFIG_CNSS
+static inline struct crypto_ahash *cds_crypto_alloc_ahash(const char *alg_name,
+	u32 type, u32 mask)
+{
+	return wcnss_wlan_crypto_alloc_ahash(alg_name, type, mask);
+}
+#else
+static inline struct crypto_ahash *cds_crypto_alloc_ahash(const char *alg_name,
+	u32 type, u32 mask)
+{
+	return crypto_alloc_ahash(alg_name, type, mask);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline struct crypto_cipher *
+cds_crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask)
+{
+	return wcnss_wlan_crypto_alloc_cipher(alg_name, type, mask);
+}
+#else
+static inline struct crypto_cipher *
+cds_crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask)
+{
+	return crypto_alloc_cipher(alg_name, type, mask);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline void cds_cmac_calc_mic(struct crypto_cipher *tfm, u8 *m,
+		u16 length, u8 *mac)
+{
+	wcnss_wlan_cmac_calc_mic(tfm, m, length, mac);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline void cds_crypto_free_cipher(struct crypto_cipher *tfm)
+{
+	wcnss_wlan_crypto_free_cipher(tfm);
+}
+#else
+static inline void cds_crypto_free_cipher(struct crypto_cipher *tfm)
+{
+	crypto_free_cipher(tfm);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline void cds_crypto_free_ahash(struct crypto_ahash *tfm)
+{
+	wcnss_wlan_crypto_free_ahash(tfm);
+}
+#else
+static inline void cds_crypto_free_ahash(struct crypto_ahash *tfm)
+{
+	crypto_free_ahash(tfm);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline int cds_crypto_ahash_setkey(struct crypto_ahash *tfm,
+	const u8 *key, unsigned int keylen)
+{
+	return wcnss_wlan_crypto_ahash_setkey(tfm, key, keylen);
+}
+#else
+static inline int cds_crypto_ahash_setkey(struct crypto_ahash *tfm,
+	const u8 *key, unsigned int keylen)
+{
+	return crypto_ahash_setkey(tfm, key, keylen);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline int cds_crypto_ahash_digest(struct ahash_request *req)
+{
+	return wcnss_wlan_crypto_ahash_digest(req);
+}
+#else
+static inline int cds_crypto_ahash_digest(struct ahash_request *req)
+{
+	return crypto_ahash_digest(req);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline struct crypto_ablkcipher *
+cds_crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask)
+{
+	return wcnss_wlan_crypto_alloc_ablkcipher(alg_name, type, mask);
+}
+#else
+static inline struct crypto_ablkcipher *
+cds_crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask)
+{
+	return crypto_alloc_ablkcipher(alg_name, type, mask);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline void cds_ablkcipher_request_free(struct ablkcipher_request *req)
+{
+	wcnss_wlan_ablkcipher_request_free(req);
+}
+#else
+static inline void cds_ablkcipher_request_free(struct ablkcipher_request *req)
+{
+	ablkcipher_request_free(req);
+}
+#endif
+
+#ifdef CONFIG_CNSS
+static inline void cds_crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
+{
+	wcnss_wlan_crypto_free_ablkcipher(tfm);
+}
+#else
+static inline void cds_crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
+{
+	crypto_free_ablkcipher(tfm);
+}
+#endif
+
+#endif /* if !defined __CDS_CRYPTO_H */

+ 75 - 0
core/cds/inc/cds_get_bin.h

@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined( __CDS_GETBIN_H )
+#define __CDS_GETBIN_H
+
+/**=========================================================================
+
+   \file  cds_getBin.h
+
+   \brief Connectivity driver services (CDS) binary APIs
+
+   Binary retrieval definitions and APIs.
+
+   These APIs allow components to retrieve binary contents (firmware,
+   configuration data, etc.) from a storage medium on the platform.
+
+   ========================================================================*/
+
+/*--------------------------------------------------------------------------
+   Include Files
+   ------------------------------------------------------------------------*/
+#include <cdf_types.h>
+#include <cdf_status.h>
+
+/*--------------------------------------------------------------------------
+   Preprocessor definitions and constants
+   ------------------------------------------------------------------------*/
+
+/*--------------------------------------------------------------------------
+   Type declarations
+   ------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------
+   Function declarations and documenation
+   ------------------------------------------------------------------------*/
+
+/**----------------------------------------------------------------------------
+   \brief cds_get_conparam()- function to read the insmod parameters
+   -----------------------------------------------------------------------------*/
+tCDF_CON_MODE cds_get_conparam(void);
+bool cds_concurrent_open_sessions_running(void);
+bool cds_max_concurrent_connections_reached(void);
+void cds_clear_concurrent_session_count(void);
+bool cds_is_multiple_active_sta_sessions(void);
+bool cds_is_sta_active_connection_exists(void);
+
+#ifdef WLAN_FEATURE_MBSSID
+bool cds_concurrent_beaconing_sessions_running(void);
+#endif
+#endif /* !defined __CDS_GETBIN_H */

+ 2105 - 0
core/cds/inc/cds_ieee80211_common.h

@@ -0,0 +1,2105 @@
+/*
+ * Copyright (c) 2011,2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef EXTERNAL_USE_ONLY
+#include "osdep.h"
+#endif /* EXTERNAL_USE_ONLY */
+#include "cds_ieee80211_common_i.h"
+
+#ifndef CDS_COMMON_IEEE80211_H_
+#define CDS_COMMON_IEEE80211_H_
+
+/*
+ * 802.11 protocol definitions.
+ */
+
+/* is 802.11 address multicast/broadcast? */
+#define IEEE80211_IS_MULTICAST(_a)  (*(_a) & 0x01)
+
+#define IEEE80211_IS_IPV4_MULTICAST(_a)  (*(_a) == 0x01)
+
+#define IEEE80211_IS_IPV6_MULTICAST(_a)		\
+	((_a)[0] == 0x33 &&			    \
+	 (_a)[1] == 0x33)
+
+#define IEEE80211_IS_BROADCAST(_a)		\
+	((_a)[0] == 0xff &&			    \
+	 (_a)[1] == 0xff &&			    \
+	 (_a)[2] == 0xff &&			    \
+	 (_a)[3] == 0xff &&			    \
+	 (_a)[4] == 0xff &&			    \
+	 (_a)[5] == 0xff)
+
+/* IEEE 802.11 PLCP header */
+struct ieee80211_plcp_hdr {
+	uint16_t i_sfd;
+	uint8_t i_signal;
+	uint8_t i_service;
+	uint16_t i_length;
+	uint16_t i_crc;
+} __packed;
+
+#define IEEE80211_PLCP_SFD      0xF3A0
+#define IEEE80211_PLCP_SERVICE  0x00
+
+/*
+ * generic definitions for IEEE 802.11 frames
+ */
+struct ieee80211_frame {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	union {
+		struct {
+			uint8_t i_addr1[IEEE80211_ADDR_LEN];
+			uint8_t i_addr2[IEEE80211_ADDR_LEN];
+			uint8_t i_addr3[IEEE80211_ADDR_LEN];
+		};
+		uint8_t i_addr_all[3 * IEEE80211_ADDR_LEN];
+	};
+	uint8_t i_seq[2];
+	/* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+	/* see below */
+} __packed;
+
+struct ieee80211_qosframe {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	uint8_t i_qos[2];
+	/* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+	/* see below */
+} __packed;
+
+struct ieee80211_qoscntl {
+	uint8_t i_qos[2];
+};
+
+struct ieee80211_frame_addr4 {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	uint8_t i_addr4[IEEE80211_ADDR_LEN];
+} __packed;
+
+struct ieee80211_qosframe_addr4 {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	uint8_t i_addr4[IEEE80211_ADDR_LEN];
+	uint8_t i_qos[2];
+} __packed;
+
+/* HTC frame for TxBF*/
+/* for TxBF RC */
+struct ieee80211_frame_min_one {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+
+} __packed;                     /* For TxBF RC */
+
+struct ieee80211_qosframe_htc {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	uint8_t i_qos[2];
+	uint8_t i_htc[4];
+	/* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+	/* see below */
+} __packed;
+struct ieee80211_qosframe_htc_addr4 {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	uint8_t i_addr4[IEEE80211_ADDR_LEN];
+	uint8_t i_qos[2];
+	uint8_t i_htc[4];
+} __packed;
+struct ieee80211_htc {
+	uint8_t i_htc[4];
+};
+/*HTC frame for TxBF*/
+
+struct ieee80211_ctlframe_addr2 {
+	uint8_t i_fc[2];
+	uint8_t i_aidordur[2];  /* AID or duration */
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+} __packed;
+
+#define IEEE80211_WHQ(wh)               ((struct ieee80211_qosframe *)(wh))
+#define IEEE80211_WH4(wh)               ((struct ieee80211_frame_addr4 *)(wh))
+#define IEEE80211_WHQ4(wh)              ((struct ieee80211_qosframe_addr4 *)(wh))
+
+#define IEEE80211_FC0_VERSION_MASK          0x03
+#define IEEE80211_FC0_VERSION_SHIFT         0
+#define IEEE80211_FC0_VERSION_0             0x00
+#define IEEE80211_FC0_TYPE_MASK             0x0c
+#define IEEE80211_FC0_TYPE_SHIFT            2
+#define IEEE80211_FC0_TYPE_MGT              0x00
+#define IEEE80211_FC0_TYPE_CTL              0x04
+#define IEEE80211_FC0_TYPE_DATA             0x08
+
+#define IEEE80211_FC0_SUBTYPE_MASK          0xf0
+#define IEEE80211_FC0_SUBTYPE_SHIFT         4
+/* for TYPE_MGT */
+#define IEEE80211_FC0_SUBTYPE_ASSOC_REQ     0x00
+#define IEEE80211_FC0_SUBTYPE_ASSOC_RESP    0x10
+#define IEEE80211_FC0_SUBTYPE_REASSOC_REQ   0x20
+#define IEEE80211_FC0_SUBTYPE_REASSOC_RESP  0x30
+#define IEEE80211_FC0_SUBTYPE_PROBE_REQ     0x40
+#define IEEE80211_FC0_SUBTYPE_PROBE_RESP    0x50
+#define IEEE80211_FC0_SUBTYPE_BEACON        0x80
+#define IEEE80211_FC0_SUBTYPE_ATIM          0x90
+#define IEEE80211_FC0_SUBTYPE_DISASSOC      0xa0
+#define IEEE80211_FC0_SUBTYPE_AUTH          0xb0
+#define IEEE80211_FC0_SUBTYPE_DEAUTH        0xc0
+#define IEEE80211_FC0_SUBTYPE_ACTION        0xd0
+#define IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 0xe0
+/* for TYPE_CTL */
+#define IEEE80211_FCO_SUBTYPE_Control_Wrapper   0x70    /* For TxBF RC */
+#define IEEE80211_FC0_SUBTYPE_BAR           0x80
+#define IEEE80211_FC0_SUBTYPE_PS_POLL       0xa0
+#define IEEE80211_FC0_SUBTYPE_RTS           0xb0
+#define IEEE80211_FC0_SUBTYPE_CTS           0xc0
+#define IEEE80211_FC0_SUBTYPE_ACK           0xd0
+#define IEEE80211_FC0_SUBTYPE_CF_END        0xe0
+#define IEEE80211_FC0_SUBTYPE_CF_END_ACK    0xf0
+/* for TYPE_DATA (bit combination) */
+#define IEEE80211_FC0_SUBTYPE_DATA          0x00
+#define IEEE80211_FC0_SUBTYPE_CF_ACK        0x10
+#define IEEE80211_FC0_SUBTYPE_CF_POLL       0x20
+#define IEEE80211_FC0_SUBTYPE_CF_ACPL       0x30
+#define IEEE80211_FC0_SUBTYPE_NODATA        0x40
+#define IEEE80211_FC0_SUBTYPE_CFACK         0x50
+#define IEEE80211_FC0_SUBTYPE_CFPOLL        0x60
+#define IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK 0x70
+#define IEEE80211_FC0_SUBTYPE_QOS           0x80
+#define IEEE80211_FC0_SUBTYPE_QOS_NULL      0xc0
+
+#define IEEE80211_FC1_DIR_MASK              0x03
+#define IEEE80211_FC1_DIR_NODS              0x00        /* STA->STA */
+#define IEEE80211_FC1_DIR_TODS              0x01        /* STA->AP  */
+#define IEEE80211_FC1_DIR_FROMDS            0x02        /* AP ->STA */
+#define IEEE80211_FC1_DIR_DSTODS            0x03        /* AP ->AP  */
+
+#define IEEE80211_FC1_MORE_FRAG             0x04
+#define IEEE80211_FC1_RETRY                 0x08
+#define IEEE80211_FC1_PWR_MGT               0x10
+#define IEEE80211_FC1_MORE_DATA             0x20
+#define IEEE80211_FC1_WEP                   0x40
+#define IEEE80211_FC1_ORDER                 0x80
+
+#define IEEE80211_SEQ_FRAG_MASK             0x000f
+#define IEEE80211_SEQ_FRAG_SHIFT            0
+#define IEEE80211_SEQ_SEQ_MASK              0xfff0
+#define IEEE80211_SEQ_SEQ_SHIFT             4
+#define IEEE80211_SEQ_MAX                   4096
+
+#define IEEE80211_SEQ_LEQ(a,b)  ((int)((a)-(b)) <= 0)
+
+#define IEEE80211_QOS_TXOP                  0x00ff
+
+#define IEEE80211_QOS_AMSDU                 0x80
+#define IEEE80211_QOS_AMSDU_S               7
+#define IEEE80211_QOS_ACKPOLICY             0x60
+#define IEEE80211_QOS_ACKPOLICY_S           5
+#define IEEE80211_QOS_EOSP                  0x10
+#define IEEE80211_QOS_EOSP_S                4
+#define IEEE80211_QOS_TID                   0x0f
+#define IEEE80211_MFP_TID                   0xff
+
+#define IEEE80211_HTC0_TRQ                  0x02
+#define IEEE80211_HTC2_CalPos               0x03
+#define IEEE80211_HTC2_CalSeq               0x0C
+#define IEEE80211_HTC2_CSI_NONCOMP_BF       0x80
+#define IEEE80211_HTC2_CSI_COMP_BF          0xc0
+
+/* Set bits 14 and 15 to 1 when duration field carries Association ID */
+#define IEEE80211_FIELD_TYPE_AID            0xC000
+
+#define IEEE80211_IS_BEACON(_frame)    ((((_frame)->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT) && \
+					(((_frame)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_BEACON))
+#define IEEE80211_IS_DATA(_frame)      (((_frame)->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA)
+
+#define IEEE80211_IS_MFP_FRAME(_frame) ((((_frame)->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT) && \
+					((_frame)->i_fc[1] & IEEE80211_FC1_WEP) && \
+					((((_frame)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_DEAUTH) || \
+					 (((_frame)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_DISASSOC) || \
+					 (((_frame)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_ACTION)))
+#define IEEE80211_IS_AUTH(_frame)      ((((_frame)->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT) && \
+					(((_frame)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_AUTH))
+
+/* MCS Set */
+#define IEEE80211_RX_MCS_1_STREAM_BYTE_OFFSET 0
+#define IEEE80211_RX_MCS_2_STREAM_BYTE_OFFSET 1
+#define IEEE80211_RX_MCS_3_STREAM_BYTE_OFFSET 2
+#define IEEE80211_RX_MCS_ALL_NSTREAM_RATES 0xff
+#define IEEE80211_TX_MCS_OFFSET 12
+
+#define IEEE80211_TX_MCS_SET_DEFINED 0x80
+#define IEEE80211_TX_RX_MCS_SET_NOT_EQUAL 0x40
+#define IEEE80211_TX_1_SPATIAL_STREAMS 0x0
+#define IEEE80211_TX_2_SPATIAL_STREAMS 0x10
+#define IEEE80211_TX_3_SPATIAL_STREAMS 0x20
+#define IEEE80211_TX_4_SPATIAL_STREAMS 0x30
+
+#define IEEE80211_TX_MCS_SET 0xf8
+
+/*
+ * Subtype data: If bit 6 is set then the data frame contains no actual data.
+ */
+#define IEEE80211_FC0_SUBTYPE_NO_DATA_MASK  0x40
+#define IEEE80211_CONTAIN_DATA(_subtype) \
+	(!((_subtype) & IEEE80211_FC0_SUBTYPE_NO_DATA_MASK))
+
+#define IEEE8023_MAX_LEN 0x600  /* 1536 - larger is Ethernet II */
+#define RFC1042_SNAP_ORGCODE_0 0x00
+#define RFC1042_SNAP_ORGCODE_1 0x00
+#define RFC1042_SNAP_ORGCODE_2 0x00
+
+#define BTEP_SNAP_ORGCODE_0 0x00
+#define BTEP_SNAP_ORGCODE_1 0x00
+#define BTEP_SNAP_ORGCODE_2 0xf8
+
+/* BT 3.0 */
+#define BTAMP_SNAP_ORGCODE_0 0x00
+#define BTAMP_SNAP_ORGCODE_1 0x19
+#define BTAMP_SNAP_ORGCODE_2 0x58
+
+/* Aironet OUI Codes */
+#define AIRONET_SNAP_CODE_0  0x00
+#define AIRONET_SNAP_CODE_1  0x40
+#define AIRONET_SNAP_CODE_2  0x96
+
+#define IEEE80211_LSIG_LEN  3
+#define IEEE80211_HTSIG_LEN 6
+#define IEEE80211_SB_LEN    2
+
+/*
+ * Information element header format
+ */
+struct ieee80211_ie_header {
+	uint8_t element_id;     /* Element Id */
+	uint8_t length;         /* IE Length */
+} __packed;
+
+/*
+ * Country information element.
+ */
+#define IEEE80211_COUNTRY_MAX_TRIPLETS (83)
+struct ieee80211_ie_country {
+	uint8_t country_id;
+	uint8_t country_len;
+	uint8_t country_str[3];
+	uint8_t country_triplet[IEEE80211_COUNTRY_MAX_TRIPLETS * 3];
+} __packed;
+
+/* does frame have QoS sequence control data */
+#define IEEE80211_QOS_HAS_SEQ(wh) \
+	(((wh)->i_fc[0] & \
+	  (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
+	 (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
+
+#define WME_QOSINFO_UAPSD   0x80        /* Mask for U-APSD field */
+#define WME_QOSINFO_COUNT   0x0f        /* Mask for Param Set Count field */
+/*
+ * WME/802.11e information element.
+ */
+struct ieee80211_ie_wme {
+	uint8_t wme_id;         /* IEEE80211_ELEMID_VENDOR */
+	uint8_t wme_len;        /* length in bytes */
+	uint8_t wme_oui[3];     /* 0x00, 0x50, 0xf2 */
+	uint8_t wme_type;       /* OUI type */
+	uint8_t wme_subtype;    /* OUI subtype */
+	uint8_t wme_version;    /* spec revision */
+	uint8_t wme_info;       /* QoS info */
+} __packed;
+
+/*
+ * TS INFO part of the tspec element is a collection of bit flags
+ */
+#if _BYTE_ORDER == _BIG_ENDIAN
+struct ieee80211_tsinfo_bitmap {
+	uint8_t one : 1, direction : 2, tid : 4, reserved1 : 1;
+	uint8_t reserved2 : 2, dot1Dtag : 3, psb : 1, reserved3 : 1, zero : 1;
+	uint8_t reserved5 : 7, reserved4 : 1;
+} __packed;
+#else
+struct ieee80211_tsinfo_bitmap {
+	uint8_t reserved1 : 1, tid : 4, direction : 2, one : 1;
+	uint8_t zero : 1, reserved3 : 1, psb : 1, dot1Dtag : 3, reserved2 : 2;
+	uint8_t reserved4 : 1, reserved5 : 7;
+} __packed;
+#endif
+
+/*
+ * WME/802.11e Tspec Element
+ */
+struct ieee80211_wme_tspec {
+	uint8_t ts_id;
+	uint8_t ts_len;
+	uint8_t ts_oui[3];
+	uint8_t ts_oui_type;
+	uint8_t ts_oui_subtype;
+	uint8_t ts_version;
+	uint8_t ts_tsinfo[3];
+	uint8_t ts_nom_msdu[2];
+	uint8_t ts_max_msdu[2];
+	uint8_t ts_min_svc[4];
+	uint8_t ts_max_svc[4];
+	uint8_t ts_inactv_intv[4];
+	uint8_t ts_susp_intv[4];
+	uint8_t ts_start_svc[4];
+	uint8_t ts_min_rate[4];
+	uint8_t ts_mean_rate[4];
+	uint8_t ts_peak_rate[4];
+	uint8_t ts_max_burst[4];
+	uint8_t ts_delay[4];
+	uint8_t ts_min_phy[4];
+	uint8_t ts_surplus[2];
+	uint8_t ts_medium_time[2];
+} __packed;
+
+/*
+ * WME AC parameter field
+ */
+struct ieee80211_wme_acparams {
+	uint8_t acp_aci_aifsn;
+	uint8_t acp_logcwminmax;
+	uint16_t acp_txop;
+} __packed;
+
+#define IEEE80211_WME_PARAM_LEN 24
+#define WME_NUM_AC              4       /* 4 AC categories */
+
+#define WME_PARAM_ACI           0x60    /* Mask for ACI field */
+#define WME_PARAM_ACI_S         5       /* Shift for ACI field */
+#define WME_PARAM_ACM           0x10    /* Mask for ACM bit */
+#define WME_PARAM_ACM_S         4       /* Shift for ACM bit */
+#define WME_PARAM_AIFSN         0x0f    /* Mask for aifsn field */
+#define WME_PARAM_AIFSN_S       0       /* Shift for aifsn field */
+#define WME_PARAM_LOGCWMIN      0x0f    /* Mask for CwMin field (in log) */
+#define WME_PARAM_LOGCWMIN_S    0       /* Shift for CwMin field */
+#define WME_PARAM_LOGCWMAX      0xf0    /* Mask for CwMax field (in log) */
+#define WME_PARAM_LOGCWMAX_S    4       /* Shift for CwMax field */
+
+#define WME_AC_TO_TID(_ac) (	   \
+		((_ac) == WME_AC_VO) ? 6 : \
+		((_ac) == WME_AC_VI) ? 5 : \
+		((_ac) == WME_AC_BK) ? 1 : \
+		0)
+
+#define TID_TO_WME_AC(_tid) (	   \
+		(((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
+		(((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
+		(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
+		WME_AC_VO)
+
+/*
+ * WME Parameter Element
+ */
+struct ieee80211_wme_param {
+	uint8_t param_id;
+	uint8_t param_len;
+	uint8_t param_oui[3];
+	uint8_t param_oui_type;
+	uint8_t param_oui_sybtype;
+	uint8_t param_version;
+	uint8_t param_qosInfo;
+	uint8_t param_reserved;
+	struct ieee80211_wme_acparams params_acParams[WME_NUM_AC];
+} __packed;
+
+/*
+ * WME U-APSD qos info field defines
+ */
+#define WME_CAPINFO_UAPSD_EN                    0x00000080
+#define WME_CAPINFO_UAPSD_VO                    0x00000001
+#define WME_CAPINFO_UAPSD_VI                    0x00000002
+#define WME_CAPINFO_UAPSD_BK                    0x00000004
+#define WME_CAPINFO_UAPSD_BE                    0x00000008
+#define WME_CAPINFO_UAPSD_ACFLAGS_SHIFT         0
+#define WME_CAPINFO_UAPSD_ACFLAGS_MASK          0xF
+#define WME_CAPINFO_UAPSD_MAXSP_SHIFT           5
+#define WME_CAPINFO_UAPSD_MAXSP_MASK            0x3
+#define WME_CAPINFO_IE_OFFSET                   8
+#define WME_UAPSD_MAXSP(_qosinfo) (((_qosinfo) >> WME_CAPINFO_UAPSD_MAXSP_SHIFT) & WME_CAPINFO_UAPSD_MAXSP_MASK)
+#define WME_UAPSD_AC_ENABLED(_ac, _qosinfo) ( (1<<(3 - (_ac))) &   \
+					      (((_qosinfo) >> WME_CAPINFO_UAPSD_ACFLAGS_SHIFT) & WME_CAPINFO_UAPSD_ACFLAGS_MASK) )
+
+/* Mask used to determined whether all queues are UAPSD-enabled */
+#define WME_CAPINFO_UAPSD_ALL                   (WME_CAPINFO_UAPSD_VO |	\
+						 WME_CAPINFO_UAPSD_VI |	\
+						 WME_CAPINFO_UAPSD_BK |	\
+						 WME_CAPINFO_UAPSD_BE)
+#define WME_CAPINFO_UAPSD_NONE                  0
+
+#define WME_UAPSD_AC_MAX_VAL            1
+#define WME_UAPSD_AC_INVAL                      WME_UAPSD_AC_MAX_VAL+1
+
+/*
+ * Atheros Advanced Capability information element.
+ */
+struct ieee80211_ie_athAdvCap {
+	uint8_t athAdvCap_id;   /* IEEE80211_ELEMID_VENDOR */
+	uint8_t athAdvCap_len;  /* length in bytes */
+	uint8_t athAdvCap_oui[3];       /* 0x00, 0x03, 0x7f */
+	uint8_t athAdvCap_type; /* OUI type */
+	uint16_t athAdvCap_version;     /* spec revision */
+	uint8_t athAdvCap_capability;   /* Capability info */
+	uint16_t athAdvCap_defKeyIndex;
+} __packed;
+
+/*
+ * Atheros Extended Capability information element.
+ */
+struct ieee80211_ie_ath_extcap {
+	uint8_t ath_extcap_id;  /* IEEE80211_ELEMID_VENDOR */
+	uint8_t ath_extcap_len; /* length in bytes */
+	uint8_t ath_extcap_oui[3];      /* 0x00, 0x03, 0x7f */
+	uint8_t ath_extcap_type;        /* OUI type */
+	uint8_t ath_extcap_subtype;     /* OUI subtype */
+	uint8_t ath_extcap_version;     /* spec revision */
+	uint32_t ath_extcap_extcap : 16,  /* B0-15  extended capabilities */
+		 ath_extcap_weptkipaggr_rxdelim : 8, /* B16-23 num delimiters for receiving WEP/TKIP aggregates */
+		 ath_extcap_reserved : 8; /* B24-31 reserved */
+} __packed;
+
+/*
+ * Atheros XR information element.
+ */
+struct ieee80211_xr_param {
+	uint8_t param_id;
+	uint8_t param_len;
+	uint8_t param_oui[3];
+	uint8_t param_oui_type;
+	uint8_t param_oui_sybtype;
+	uint8_t param_version;
+	uint8_t param_Info;
+	uint8_t param_base_bssid[IEEE80211_ADDR_LEN];
+	uint8_t param_xr_bssid[IEEE80211_ADDR_LEN];
+	uint16_t param_xr_beacon_interval;
+	uint8_t param_base_ath_capability;
+	uint8_t param_xr_ath_capability;
+} __packed;
+
+/*
+ * SFA information element.
+ */
+struct ieee80211_ie_sfa {
+	uint8_t sfa_id;         /* IEEE80211_ELEMID_VENDOR */
+	uint8_t sfa_len;        /* length in bytes */
+	uint8_t sfa_oui[3];     /* 0x00, 0x40, 0x96 */
+	uint8_t sfa_type;       /* OUI type */
+	uint8_t sfa_caps;       /* Capabilities */
+} __packed;
+
+/* Atheros capabilities */
+#define IEEE80211_ATHC_TURBOP   0x0001  /* Turbo Prime */
+#define IEEE80211_ATHC_COMP     0x0002  /* Compression */
+#define IEEE80211_ATHC_FF       0x0004  /* Fast Frames */
+#define IEEE80211_ATHC_XR       0x0008  /* Xtended Range support */
+#define IEEE80211_ATHC_AR       0x0010  /* Advanced Radar support */
+#define IEEE80211_ATHC_BURST    0x0020  /* Bursting - not negotiated */
+#define IEEE80211_ATHC_WME      0x0040  /* CWMin tuning */
+#define IEEE80211_ATHC_BOOST    0x0080  /* Boost */
+#define IEEE80211_ATHC_TDLS     0x0100  /* TDLS */
+
+/* Atheros extended capabilities */
+/* OWL device capable of WDS workaround */
+#define IEEE80211_ATHEC_OWLWDSWAR        0x0001
+#define IEEE80211_ATHEC_WEPTKIPAGGR          0x0002
+#define IEEE80211_ATHEC_EXTRADELIMWAR    0x0004
+/*
+ * Management Frames
+ */
+
+/*
+ * *** Platform-specific code?? ***
+ * In Vista one must use bit fields of type (unsigned short = uint16_t) to
+ * ensure data structure is of the correct size. ANSI C used to specify only
+ * "int" bit fields, which led to a larger structure size in Windows (32 bits).
+ *
+ * We must make sure the following construction is valid in all OS's.
+ */
+union ieee80211_capability {
+	struct {
+		uint16_t ess : 1;
+		uint16_t ibss : 1;
+		uint16_t cf_pollable : 1;
+		uint16_t cf_poll_request : 1;
+		uint16_t privacy : 1;
+		uint16_t short_preamble : 1;
+		uint16_t pbcc : 1;
+		uint16_t channel_agility : 1;
+		uint16_t spectrum_management : 1;
+		uint16_t qos : 1;
+		uint16_t short_slot_time : 1;
+		uint16_t apsd : 1;
+		uint16_t reserved2 : 1;
+		uint16_t dsss_ofdm : 1;
+		uint16_t del_block_ack : 1;
+		uint16_t immed_block_ack : 1;
+	};
+
+	uint16_t value;
+} __packed;
+
+struct ieee80211_beacon_frame {
+	uint8_t timestamp[8];   /* the value of sender's TSFTIMER */
+	uint16_t beacon_interval;       /* the number of time units between target beacon transmission times */
+	union ieee80211_capability capability;
+/* Value of capability for every bit
+   #define IEEE80211_CAPINFO_ESS               0x0001
+   #define IEEE80211_CAPINFO_IBSS              0x0002
+   #define IEEE80211_CAPINFO_CF_POLLABLE       0x0004
+   #define IEEE80211_CAPINFO_CF_POLLREQ        0x0008
+   #define IEEE80211_CAPINFO_PRIVACY           0x0010
+   #define IEEE80211_CAPINFO_SHORT_PREAMBLE    0x0020
+   #define IEEE80211_CAPINFO_PBCC              0x0040
+   #define IEEE80211_CAPINFO_CHNL_AGILITY      0x0080
+   #define IEEE80211_CAPINFO_SPECTRUM_MGMT     0x0100
+   #define IEEE80211_CAPINFO_QOS               0x0200
+   #define IEEE80211_CAPINFO_SHORT_SLOTTIME    0x0400
+   #define IEEE80211_CAPINFO_APSD              0x0800
+   #define IEEE80211_CAPINFO_RADIOMEAS         0x1000
+   #define IEEE80211_CAPINFO_DSSSOFDM          0x2000
+   bits 14-15 are reserved
+ */
+	struct ieee80211_ie_header info_elements;
+} __packed;
+
+/*
+ * Management Action Frames
+ */
+
+/* generic frame format */
+struct ieee80211_action {
+	uint8_t ia_category;
+	uint8_t ia_action;
+} __packed;
+
+/* spectrum action frame header */
+struct ieee80211_action_measrep_header {
+	struct ieee80211_action action_header;
+	uint8_t dialog_token;
+} __packed;
+
+/* categories */
+#define IEEE80211_ACTION_CAT_SPECTRUM       0   /* Spectrum management */
+#define IEEE80211_ACTION_CAT_QOS            1   /* IEEE QoS  */
+#define IEEE80211_ACTION_CAT_DLS            2   /* DLS */
+#define IEEE80211_ACTION_CAT_BA             3   /* BA */
+#define IEEE80211_ACTION_CAT_PUBLIC         4   /* Public Action Frame */
+#define IEEE80211_ACTION_CAT_HT             7   /* HT per IEEE802.11n-D1.06 */
+#define IEEE80211_ACTION_CAT_SA_QUERY       8   /* SA Query per IEEE802.11w, PMF */
+#define IEEE80211_ACTION_CAT_WMM_QOS       17   /* QoS from WMM specification */
+#define IEEE80211_ACTION_CAT_VHT           21   /* VHT Action */
+
+/* Spectrum Management actions */
+#define IEEE80211_ACTION_MEAS_REQUEST       0   /* Measure channels */
+#define IEEE80211_ACTION_MEAS_REPORT        1
+#define IEEE80211_ACTION_TPC_REQUEST        2   /* Transmit Power control */
+#define IEEE80211_ACTION_TPC_REPORT         3
+#define IEEE80211_ACTION_CHAN_SWITCH        4   /* 802.11h Channel Switch Announcement */
+
+/* HT actions */
+#define IEEE80211_ACTION_HT_TXCHWIDTH       0   /* recommended transmission channel width */
+#define IEEE80211_ACTION_HT_SMPOWERSAVE     1   /* Spatial Multiplexing (SM) Power Save */
+#define IEEE80211_ACTION_HT_CSI             4   /* CSI Frame */
+#define IEEE80211_ACTION_HT_NONCOMP_BF      5   /* Non-compressed Beamforming */
+#define IEEE80211_ACTION_HT_COMP_BF         6   /* Compressed Beamforming */
+
+/* VHT actions */
+#define IEEE80211_ACTION_VHT_OPMODE         2   /* Operating  mode notification */
+
+/* Spectrum channel switch action frame after IE*/
+/* Public Actions*/
+#define IEEE80211_ACTION_TDLS_DISCRESP  14      /* TDLS Discovery Response frame */
+
+/* HT - recommended transmission channel width */
+struct ieee80211_action_ht_txchwidth {
+	struct ieee80211_action at_header;
+	uint8_t at_chwidth;
+} __packed;
+
+#define IEEE80211_A_HT_TXCHWIDTH_20         0
+#define IEEE80211_A_HT_TXCHWIDTH_2040       1
+
+/* HT - Spatial Multiplexing (SM) Power Save */
+struct ieee80211_action_ht_smpowersave {
+	struct ieee80211_action as_header;
+	uint8_t as_control;
+} __packed;
+
+/*HT - CSI Frame */                        /* for TxBF RC */
+#define MIMO_CONTROL_LEN 6
+struct ieee80211_action_ht_CSI {
+	struct ieee80211_action as_header;
+	uint8_t mimo_control[MIMO_CONTROL_LEN];
+} __packed;
+
+/*HT - V/CV report frame*/
+struct ieee80211_action_ht_txbf_rpt {
+	struct ieee80211_action as_header;
+	uint8_t mimo_control[MIMO_CONTROL_LEN];
+} __packed;
+
+/*
+ * 802.11ac Operating Mode  Notification
+ */
+struct ieee80211_ie_op_mode {
+#if _BYTE_ORDER == _BIG_ENDIAN
+	uint8_t rx_nss_type : 1, rx_nss : 3, reserved : 2, ch_width : 2;
+#else
+	uint8_t ch_width : 2, reserved : 2, rx_nss : 3, rx_nss_type : 1;
+#endif
+} __packed;
+
+struct ieee80211_ie_op_mode_ntfy {
+	uint8_t elem_id;
+	uint8_t elem_len;
+	struct ieee80211_ie_op_mode opmode;
+} __packed;
+
+/* VHT - recommended Channel width and Nss */
+struct ieee80211_action_vht_opmode {
+	struct ieee80211_action at_header;
+	struct ieee80211_ie_op_mode at_op_mode;
+} __packed;
+
+/* values defined for 'as_control' field per 802.11n-D1.06 */
+#define IEEE80211_A_HT_SMPOWERSAVE_DISABLED     0x00    /* SM Power Save Disabled, SM packets ok  */
+#define IEEE80211_A_HT_SMPOWERSAVE_ENABLED      0x01    /* SM Power Save Enabled bit  */
+#define IEEE80211_A_HT_SMPOWERSAVE_MODE         0x02    /* SM Power Save Mode bit */
+#define IEEE80211_A_HT_SMPOWERSAVE_RESERVED     0xFC    /* SM Power Save Reserved bits */
+
+/* values defined for SM Power Save Mode bit */
+#define IEEE80211_A_HT_SMPOWERSAVE_STATIC       0x00    /* Static, SM packets not ok */
+#define IEEE80211_A_HT_SMPOWERSAVE_DYNAMIC      0x02    /* Dynamic, SM packets ok if preceded by RTS */
+
+/* DLS actions */
+#define IEEE80211_ACTION_DLS_REQUEST            0
+#define IEEE80211_ACTION_DLS_RESPONSE           1
+#define IEEE80211_ACTION_DLS_TEARDOWN           2
+
+struct ieee80211_dls_request {
+	struct ieee80211_action hdr;
+	uint8_t dst_addr[IEEE80211_ADDR_LEN];
+	uint8_t src_addr[IEEE80211_ADDR_LEN];
+	uint16_t capa_info;
+	uint16_t timeout;
+} __packed;
+
+struct ieee80211_dls_response {
+	struct ieee80211_action hdr;
+	uint16_t statuscode;
+	uint8_t dst_addr[IEEE80211_ADDR_LEN];
+	uint8_t src_addr[IEEE80211_ADDR_LEN];
+} __packed;
+
+/* BA actions */
+#define IEEE80211_ACTION_BA_ADDBA_REQUEST       0       /* ADDBA request */
+#define IEEE80211_ACTION_BA_ADDBA_RESPONSE      1       /* ADDBA response */
+#define IEEE80211_ACTION_BA_DELBA               2       /* DELBA */
+
+struct ieee80211_ba_parameterset {
+#if _BYTE_ORDER == _BIG_ENDIAN
+	uint16_t buffersize : 10, /* B6-15  buffer size */
+		 tid : 4,       /* B2-5   TID */
+		 bapolicy : 1,  /* B1   block ack policy */
+		 amsdusupported : 1; /* B0   amsdu supported */
+#else
+	uint16_t amsdusupported : 1,      /* B0   amsdu supported */
+		 bapolicy : 1,  /* B1   block ack policy */
+		 tid : 4,       /* B2-5   TID */
+		 buffersize : 10; /* B6-15  buffer size */
+#endif
+} __packed;
+
+#define  IEEE80211_BA_POLICY_DELAYED      0
+#define  IEEE80211_BA_POLICY_IMMEDIATE    1
+#define  IEEE80211_BA_AMSDU_SUPPORTED     1
+
+struct ieee80211_ba_seqctrl {
+#if _BYTE_ORDER == _BIG_ENDIAN
+	uint16_t startseqnum : 12,        /* B4-15  starting sequence number */
+		 fragnum : 4;   /* B0-3  fragment number */
+#else
+	uint16_t fragnum : 4,     /* B0-3  fragment number */
+		 startseqnum : 12; /* B4-15  starting sequence number */
+#endif
+} __packed;
+
+struct ieee80211_delba_parameterset {
+#if _BYTE_ORDER == _BIG_ENDIAN
+	uint16_t tid : 4,         /* B12-15  tid */
+		 initiator : 1, /* B11     initiator */
+		 reserved0 : 11; /* B0-10   reserved */
+#else
+	uint16_t reserved0 : 11,  /* B0-10   reserved */
+		 initiator : 1, /* B11     initiator */
+		 tid : 4;       /* B12-15  tid */
+#endif
+} __packed;
+
+/* BA - ADDBA request */
+struct ieee80211_action_ba_addbarequest {
+	struct ieee80211_action rq_header;
+	uint8_t rq_dialogtoken;
+	struct ieee80211_ba_parameterset rq_baparamset;
+	uint16_t rq_batimeout;  /* in TUs */
+	struct ieee80211_ba_seqctrl rq_basequencectrl;
+} __packed;
+
+/* BA - ADDBA response */
+struct ieee80211_action_ba_addbaresponse {
+	struct ieee80211_action rs_header;
+	uint8_t rs_dialogtoken;
+	uint16_t rs_statuscode;
+	struct ieee80211_ba_parameterset rs_baparamset;
+	uint16_t rs_batimeout;  /* in TUs */
+} __packed;
+
+/* BA - DELBA */
+struct ieee80211_action_ba_delba {
+	struct ieee80211_action dl_header;
+	struct ieee80211_delba_parameterset dl_delbaparamset;
+	uint16_t dl_reasoncode;
+} __packed;
+
+/* MGT Notif actions */
+#define IEEE80211_WMM_QOS_ACTION_SETUP_REQ    0
+#define IEEE80211_WMM_QOS_ACTION_SETUP_RESP   1
+#define IEEE80211_WMM_QOS_ACTION_TEARDOWN     2
+
+#define IEEE80211_WMM_QOS_DIALOG_TEARDOWN     0
+#define IEEE80211_WMM_QOS_DIALOG_SETUP        1
+
+#define IEEE80211_WMM_QOS_TSID_DATA_TSPEC     6
+#define IEEE80211_WMM_QOS_TSID_SIG_TSPEC      7
+
+struct ieee80211_action_wmm_qos {
+	struct ieee80211_action ts_header;
+	uint8_t ts_dialogtoken;
+	uint8_t ts_statuscode;
+	struct ieee80211_wme_tspec ts_tspecie;
+} __packed;
+
+/*
+ * Control frames.
+ */
+struct ieee80211_frame_min {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+/*
+ * BAR frame format
+ */
+#define IEEE80211_BAR_CTL_TID_M     0xF000      /* tid mask             */
+#define IEEE80211_BAR_CTL_TID_S         12      /* tid shift            */
+#define IEEE80211_BAR_CTL_NOACK     0x0001      /* no-ack policy        */
+#define IEEE80211_BAR_CTL_COMBA     0x0004      /* compressed block-ack */
+
+/*
+ * SA Query Action mgmt Frame
+ */
+struct ieee80211_action_sa_query {
+	struct ieee80211_action sa_header;
+	uint16_t sa_transId;
+};
+
+typedef enum ieee80211_action_sa_query_type {
+	IEEE80211_ACTION_SA_QUERY_REQUEST,
+	IEEE80211_ACTION_SA_QUERY_RESPONSE
+} ieee80211_action_sa_query_type_t;
+
+struct ieee80211_frame_bar {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	uint8_t i_ta[IEEE80211_ADDR_LEN];
+	uint16_t i_ctl;
+	uint16_t i_seq;
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_rts {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	uint8_t i_ta[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_cts {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_ack {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_pspoll {
+	uint8_t i_fc[2];
+	uint8_t i_aid[2];
+	uint8_t i_bssid[IEEE80211_ADDR_LEN];
+	uint8_t i_ta[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_cfend {  /* NB: also CF-End+CF-Ack */
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];       /* should be zero */
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	uint8_t i_bssid[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+/*
+ * BEACON management packets
+ *
+ *  octet timestamp[8]
+ *  octet beacon interval[2]
+ *  octet capability information[2]
+ *  information element
+ *      octet elemid
+ *      octet length
+ *      octet information[length]
+ */
+
+typedef uint8_t *ieee80211_mgt_beacon_t;
+
+#define IEEE80211_BEACON_INTERVAL(beacon) \
+	((beacon)[8] | ((beacon)[9] << 8))
+#define IEEE80211_BEACON_CAPABILITY(beacon) \
+	((beacon)[10] | ((beacon)[11] << 8))
+
+#define IEEE80211_CAPINFO_ESS               0x0001
+#define IEEE80211_CAPINFO_IBSS              0x0002
+#define IEEE80211_CAPINFO_CF_POLLABLE       0x0004
+#define IEEE80211_CAPINFO_CF_POLLREQ        0x0008
+#define IEEE80211_CAPINFO_PRIVACY           0x0010
+#define IEEE80211_CAPINFO_SHORT_PREAMBLE    0x0020
+#define IEEE80211_CAPINFO_PBCC              0x0040
+#define IEEE80211_CAPINFO_CHNL_AGILITY      0x0080
+#define IEEE80211_CAPINFO_SPECTRUM_MGMT     0x0100
+#define IEEE80211_CAPINFO_QOS               0x0200
+#define IEEE80211_CAPINFO_SHORT_SLOTTIME    0x0400
+#define IEEE80211_CAPINFO_APSD              0x0800
+#define IEEE80211_CAPINFO_RADIOMEAS         0x1000
+#define IEEE80211_CAPINFO_DSSSOFDM          0x2000
+/* bits 14-15 are reserved */
+
+/*
+ * 802.11i/WPA information element (maximally sized).
+ */
+struct ieee80211_ie_wpa {
+	uint8_t wpa_id;         /* IEEE80211_ELEMID_VENDOR */
+	uint8_t wpa_len;        /* length in bytes */
+	uint8_t wpa_oui[3];     /* 0x00, 0x50, 0xf2 */
+	uint8_t wpa_type;       /* OUI type */
+	uint16_t wpa_version;   /* spec revision */
+	uint32_t wpa_mcipher[1];        /* multicast/group key cipher */
+	uint16_t wpa_uciphercnt;        /* # pairwise key ciphers */
+	uint32_t wpa_uciphers[8];       /* ciphers */
+	uint16_t wpa_authselcnt;        /* authentication selector cnt */
+	uint32_t wpa_authsels[8];       /* selectors */
+	uint16_t wpa_caps;      /* 802.11i capabilities */
+	uint16_t wpa_pmkidcnt;  /* 802.11i pmkid count */
+	uint16_t wpa_pmkids[8]; /* 802.11i pmkids */
+} __packed;
+
+#ifndef _BYTE_ORDER
+#error "Don't know native byte order"
+#endif
+
+#ifndef IEEE80211N_IE
+/* Temporary vendor specific IE for 11n pre-standard interoperability */
+#define VENDOR_HT_OUI       0x00904c
+#define VENDOR_HT_CAP_ID    51
+#define VENDOR_HT_INFO_ID   52
+#endif
+
+#ifdef ATH_SUPPORT_TxBF
+union ieee80211_hc_txbf {
+	struct {
+#if _BYTE_ORDER == _BIG_ENDIAN
+		uint32_t reserved : 3,
+			 channel_estimation_cap : 2,
+			 csi_max_rows_bfer : 2,
+			 comp_bfer_antennas : 2,
+			 noncomp_bfer_antennas : 2,
+			 csi_bfer_antennas : 2,
+			 minimal_grouping : 2,
+			 explicit_comp_bf : 2,
+			 explicit_noncomp_bf : 2,
+			 explicit_csi_feedback : 2,
+			 explicit_comp_steering : 1,
+			 explicit_noncomp_steering : 1,
+			 explicit_csi_txbf_capable : 1,
+			 calibration : 2,
+			 implicit_txbf_capable : 1,
+			 tx_ndp_capable : 1,
+			 rx_ndp_capable : 1,
+			 tx_staggered_sounding : 1,
+			 rx_staggered_sounding : 1, implicit_rx_capable : 1;
+#else
+		uint32_t implicit_rx_capable : 1,
+			 rx_staggered_sounding : 1,
+			 tx_staggered_sounding : 1,
+			 rx_ndp_capable : 1,
+			 tx_ndp_capable : 1,
+			 implicit_txbf_capable : 1,
+			 calibration : 2,
+			 explicit_csi_txbf_capable : 1,
+			 explicit_noncomp_steering : 1,
+			 explicit_comp_steering : 1,
+			 explicit_csi_feedback : 2,
+			 explicit_noncomp_bf : 2,
+			 explicit_comp_bf : 2,
+			 minimal_grouping : 2,
+			 csi_bfer_antennas : 2,
+			 noncomp_bfer_antennas : 2,
+			 comp_bfer_antennas : 2,
+			 csi_max_rows_bfer : 2, channel_estimation_cap : 2, reserved : 3;
+#endif
+	};
+
+	uint32_t value;
+} __packed;
+#endif
+
+struct ieee80211_ie_htcap_cmn {
+	uint16_t hc_cap;        /* HT capabilities */
+#if _BYTE_ORDER == _BIG_ENDIAN
+	uint8_t hc_reserved : 3,  /* B5-7 reserved */
+		hc_mpdudensity : 3, /* B2-4 MPDU density (aka Minimum MPDU Start Spacing) */
+		hc_maxampdu : 2; /* B0-1 maximum rx A-MPDU factor */
+#else
+	uint8_t hc_maxampdu : 2,  /* B0-1 maximum rx A-MPDU factor */
+		hc_mpdudensity : 3, /* B2-4 MPDU density (aka Minimum MPDU Start Spacing) */
+		hc_reserved : 3; /* B5-7 reserved */
+#endif
+	uint8_t hc_mcsset[16];  /* supported MCS set */
+	uint16_t hc_extcap;     /* extended HT capabilities */
+#ifdef ATH_SUPPORT_TxBF
+	union ieee80211_hc_txbf hc_txbf;        /* txbf capabilities */
+#else
+	uint32_t hc_txbf;       /* txbf capabilities */
+#endif
+	uint8_t hc_antenna;     /* antenna capabilities */
+} __packed;
+
+/*
+ * 802.11n HT Capability IE
+ */
+struct ieee80211_ie_htcap {
+	uint8_t hc_id;          /* element ID */
+	uint8_t hc_len;         /* length in bytes */
+	struct ieee80211_ie_htcap_cmn hc_ie;
+} __packed;
+
+/*
+ * Temporary vendor private HT Capability IE
+ */
+struct vendor_ie_htcap {
+	uint8_t hc_id;          /* element ID */
+	uint8_t hc_len;         /* length in bytes */
+	uint8_t hc_oui[3];
+	uint8_t hc_ouitype;
+	struct ieee80211_ie_htcap_cmn hc_ie;
+} __packed;
+
+/* HT capability flags */
+#define IEEE80211_HTCAP_C_ADVCODING             0x0001
+#define IEEE80211_HTCAP_C_CHWIDTH40             0x0002
+#define IEEE80211_HTCAP_C_SMPOWERSAVE_STATIC    0x0000  /* Capable of SM Power Save (Static) */
+#define IEEE80211_HTCAP_C_SMPOWERSAVE_DYNAMIC   0x0004  /* Capable of SM Power Save (Dynamic) */
+#define IEEE80211_HTCAP_C_SM_RESERVED           0x0008  /* Reserved */
+#define IEEE80211_HTCAP_C_SM_ENABLED            0x000c  /* SM enabled, no SM Power Save */
+#define IEEE80211_HTCAP_C_GREENFIELD            0x0010
+#define IEEE80211_HTCAP_C_SHORTGI20             0x0020
+#define IEEE80211_HTCAP_C_SHORTGI40             0x0040
+#define IEEE80211_HTCAP_C_TXSTBC                0x0080
+#define IEEE80211_HTCAP_C_TXSTBC_S                   7
+#define IEEE80211_HTCAP_C_RXSTBC                0x0300  /* 2 bits */
+#define IEEE80211_HTCAP_C_RXSTBC_S                   8
+#define IEEE80211_HTCAP_C_DELAYEDBLKACK         0x0400
+#define IEEE80211_HTCAP_C_MAXAMSDUSIZE          0x0800  /* 1 = 8K, 0 = 3839B */
+#define IEEE80211_HTCAP_C_DSSSCCK40             0x1000
+#define IEEE80211_HTCAP_C_PSMP                  0x2000
+#define IEEE80211_HTCAP_C_INTOLERANT40          0x4000
+#define IEEE80211_HTCAP_C_LSIGTXOPPROT          0x8000
+
+#define IEEE80211_HTCAP_C_SM_MASK               0x000c  /* Spatial Multiplexing (SM) capabitlity bitmask */
+
+/* B0-1 maximum rx A-MPDU factor 2^(13+Max Rx A-MPDU Factor) */
+enum {
+	IEEE80211_HTCAP_MAXRXAMPDU_8192,        /* 2 ^ 13 */
+	IEEE80211_HTCAP_MAXRXAMPDU_16384,       /* 2 ^ 14 */
+	IEEE80211_HTCAP_MAXRXAMPDU_32768,       /* 2 ^ 15 */
+	IEEE80211_HTCAP_MAXRXAMPDU_65536,       /* 2 ^ 16 */
+};
+#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR   13
+
+/* B2-4 MPDU density (usec) */
+enum {
+	IEEE80211_HTCAP_MPDUDENSITY_NA, /* No time restriction */
+	IEEE80211_HTCAP_MPDUDENSITY_0_25,       /* 1/4 usec */
+	IEEE80211_HTCAP_MPDUDENSITY_0_5,        /* 1/2 usec */
+	IEEE80211_HTCAP_MPDUDENSITY_1,  /* 1 usec */
+	IEEE80211_HTCAP_MPDUDENSITY_2,  /* 2 usec */
+	IEEE80211_HTCAP_MPDUDENSITY_4,  /* 4 usec */
+	IEEE80211_HTCAP_MPDUDENSITY_8,  /* 8 usec */
+	IEEE80211_HTCAP_MPDUDENSITY_16, /* 16 usec */
+};
+
+/* HT extended capability flags */
+#define IEEE80211_HTCAP_EXTC_PCO                0x0001
+#define IEEE80211_HTCAP_EXTC_TRANS_TIME_RSVD    0x0000
+#define IEEE80211_HTCAP_EXTC_TRANS_TIME_400     0x0002  /* 20-40 switch time */
+#define IEEE80211_HTCAP_EXTC_TRANS_TIME_1500    0x0004  /* in us             */
+#define IEEE80211_HTCAP_EXTC_TRANS_TIME_5000    0x0006
+#define IEEE80211_HTCAP_EXTC_RSVD_1             0x00f8
+#define IEEE80211_HTCAP_EXTC_MCS_FEEDBACK_NONE  0x0000
+#define IEEE80211_HTCAP_EXTC_MCS_FEEDBACK_RSVD  0x0100
+#define IEEE80211_HTCAP_EXTC_MCS_FEEDBACK_UNSOL 0x0200
+#define IEEE80211_HTCAP_EXTC_MCS_FEEDBACK_FULL  0x0300
+#define IEEE80211_HTCAP_EXTC_RSVD_2             0xfc00
+#ifdef ATH_SUPPORT_TxBF
+#define IEEE80211_HTCAP_EXTC_HTC_SUPPORT        0x0400
+#endif
+
+struct ieee80211_ie_htinfo_cmn {
+	uint8_t hi_ctrlchannel; /* control channel */
+#if _BYTE_ORDER == _BIG_ENDIAN
+	uint8_t hi_serviceinterval : 3,   /* B5-7 svc interval granularity */
+		hi_ctrlaccess : 1, /* B4   controlled access only */
+		hi_rifsmode : 1, /* B3   rifs mode */
+		hi_txchwidth : 1, /* B2   recommended xmiss width set */
+		hi_extchoff : 2; /* B0-1 extension channel offset */
+
+/*
+
+ * The following 2 consecutive bytes are defined in word in 80211n spec.
+
+ * Some processors store MSB byte into lower memory address which causes wrong
+
+ * wrong byte sequence in beacon. Thus we break into byte definition which should
+
+ * avoid the problem for all processors
+
+ */
+
+	uint8_t hi_reserved3 : 3, /* B5-7 reserved */
+		hi_obssnonhtpresent : 1, /* B4   OBSS non-HT STA present */
+		hi_txburstlimit : 1, /* B3   transmit burst limit */
+		hi_nongfpresent : 1, /* B2   non greenfield devices present */
+		hi_opmode : 2;  /* B0-1 operating mode */
+
+	uint8_t hi_reserved0;   /* B0-7 (B8-15 in 11n) reserved */
+
+/* The following 2 consecutive bytes are defined in word in 80211n spec. */
+
+	uint8_t hi_dualctsprot : 1,       /* B7   dual CTS protection */
+		hi_dualbeacon : 1, /* B6   dual beacon */
+		hi_reserved2 : 6; /* B0-5 reserved */
+	uint8_t hi_reserved1 : 4, /* B4-7 (B12-15 in 11n) reserved */
+		hi_pcophase : 1, /* B3   (B11 in 11n)  pco phase */
+		hi_pcoactive : 1, /* B2   (B10 in 11n)  pco active */
+		hi_lsigtxopprot : 1, /* B1   (B9 in 11n)   l-sig txop protection full support */
+		hi_stbcbeacon : 1; /* B0   (B8 in 11n)   STBC beacon */
+#else
+	uint8_t hi_extchoff : 2,  /* B0-1 extension channel offset */
+		hi_txchwidth : 1, /* B2   recommended xmiss width set */
+		hi_rifsmode : 1, /* B3   rifs mode */
+		hi_ctrlaccess : 1, /* B4   controlled access only */
+		hi_serviceinterval : 3; /* B5-7 svc interval granularity */
+	uint16_t hi_opmode : 2,   /* B0-1 operating mode */
+		 hi_nongfpresent : 1, /* B2   non greenfield devices present */
+		 hi_txburstlimit : 1, /* B3   transmit burst limit */
+		 hi_obssnonhtpresent : 1, /* B4   OBSS non-HT STA present */
+		 hi_reserved0 : 11; /* B5-15 reserved */
+	uint16_t hi_reserved2 : 6,        /* B0-5 reserved */
+		 hi_dualbeacon : 1, /* B6   dual beacon */
+		 hi_dualctsprot : 1, /* B7   dual CTS protection */
+		 hi_stbcbeacon : 1, /* B8   STBC beacon */
+		 hi_lsigtxopprot : 1, /* B9   l-sig txop protection full support */
+		 hi_pcoactive : 1, /* B10  pco active */
+		 hi_pcophase : 1, /* B11  pco phase */
+		 hi_reserved1 : 4; /* B12-15 reserved */
+#endif
+	uint8_t hi_basicmcsset[16];     /* basic MCS set */
+} __packed;
+
+/*
+ * 802.11n HT Information IE
+ */
+struct ieee80211_ie_htinfo {
+	uint8_t hi_id;          /* element ID */
+	uint8_t hi_len;         /* length in bytes */
+	struct ieee80211_ie_htinfo_cmn hi_ie;
+} __packed;
+
+/*
+ * Temporary vendor private HT Information IE
+ */
+struct vendor_ie_htinfo {
+	uint8_t hi_id;          /* element ID */
+	uint8_t hi_len;         /* length in bytes */
+	uint8_t hi_oui[3];
+	uint8_t hi_ouitype;
+	struct ieee80211_ie_htinfo_cmn hi_ie;
+} __packed;
+
+/* extension channel offset (2 bit signed number) */
+enum {
+	IEEE80211_HTINFO_EXTOFFSET_NA = 0,      /* 0  no extension channel is present */
+	IEEE80211_HTINFO_EXTOFFSET_ABOVE = 1,   /* +1 extension channel above control channel */
+	IEEE80211_HTINFO_EXTOFFSET_UNDEF = 2,   /* -2 undefined */
+	IEEE80211_HTINFO_EXTOFFSET_BELOW = 3    /* -1 extension channel below control channel */
+};
+
+/* recommended transmission width set */
+enum {
+	IEEE80211_HTINFO_TXWIDTH_20,
+	IEEE80211_HTINFO_TXWIDTH_2040
+};
+
+/* operating flags */
+#define IEEE80211_HTINFO_OPMODE_PURE                0x00        /* no protection */
+#define IEEE80211_HTINFO_OPMODE_MIXED_PROT_OPT      0x01        /* prot optional (legacy device maybe present) */
+#define IEEE80211_HTINFO_OPMODE_MIXED_PROT_40       0x02        /* prot required (20 MHz) */
+#define IEEE80211_HTINFO_OPMODE_MIXED_PROT_ALL      0x03        /* prot required (legacy devices present) */
+#define IEEE80211_HTINFO_OPMODE_NON_GF_PRESENT      0x04        /* non-greenfield devices present */
+
+#define IEEE80211_HTINFO_OPMODE_MASK                0x03        /* For protection 0x00-0x03 */
+
+/* Non-greenfield STAs present */
+enum {
+	IEEE80211_HTINFO_NON_GF_NOT_PRESENT,    /* Non-greenfield STAs not present */
+	IEEE80211_HTINFO_NON_GF_PRESENT,        /* Non-greenfield STAs present */
+};
+
+/* Transmit Burst Limit */
+enum {
+	IEEE80211_HTINFO_TXBURST_UNLIMITED,     /* Transmit Burst is unlimited */
+	IEEE80211_HTINFO_TXBURST_LIMITED,       /* Transmit Burst is limited */
+};
+
+/* OBSS Non-HT STAs present */
+enum {
+	IEEE80211_HTINFO_OBSS_NONHT_NOT_PRESENT,        /* OBSS Non-HT STAs not present */
+	IEEE80211_HTINFO_OBSS_NONHT_PRESENT,    /* OBSS Non-HT STAs present */
+};
+
+/* misc flags */
+#define IEEE80211_HTINFO_DUALBEACON               0x0040        /* B6   dual beacon */
+#define IEEE80211_HTINFO_DUALCTSPROT              0x0080        /* B7   dual stbc protection */
+#define IEEE80211_HTINFO_STBCBEACON               0x0100        /* B8   secondary beacon */
+#define IEEE80211_HTINFO_LSIGTXOPPROT             0x0200        /* B9   lsig txop prot full support */
+#define IEEE80211_HTINFO_PCOACTIVE                0x0400        /* B10  pco active */
+#define IEEE80211_HTINFO_PCOPHASE                 0x0800        /* B11  pco phase */
+
+/* Secondary Channel offset for for 40MHz direct link */
+#define IEEE80211_SECONDARY_CHANNEL_ABOVE         1
+#define IEEE80211_SECONDARY_CHANNEL_BELOW         3
+
+#define IEEE80211_TDLS_CHAN_SX_PROHIBIT         0x00000002      /* bit-2 TDLS Channel Switch Prohibit */
+
+/* RIFS mode */
+enum {
+	IEEE80211_HTINFO_RIFSMODE_PROHIBITED,   /* use of rifs prohibited */
+	IEEE80211_HTINFO_RIFSMODE_ALLOWED,      /* use of rifs permitted */
+};
+
+/*
+ * Management information element payloads.
+ */
+enum {
+	IEEE80211_ELEMID_SSID = 0,
+	IEEE80211_ELEMID_RATES = 1,
+	IEEE80211_ELEMID_FHPARMS = 2,
+	IEEE80211_ELEMID_DSPARMS = 3,
+	IEEE80211_ELEMID_CFPARMS = 4,
+	IEEE80211_ELEMID_TIM = 5,
+	IEEE80211_ELEMID_IBSSPARMS = 6,
+	IEEE80211_ELEMID_COUNTRY = 7,
+	IEEE80211_ELEMID_REQINFO = 10,
+	IEEE80211_ELEMID_QBSS_LOAD = 11,
+	IEEE80211_ELEMID_TCLAS = 14,
+	IEEE80211_ELEMID_CHALLENGE = 16,
+	/* 17-31 reserved for challenge text extension */
+	IEEE80211_ELEMID_PWRCNSTR = 32,
+	IEEE80211_ELEMID_PWRCAP = 33,
+	IEEE80211_ELEMID_TPCREQ = 34,
+	IEEE80211_ELEMID_TPCREP = 35,
+	IEEE80211_ELEMID_SUPPCHAN = 36,
+	IEEE80211_ELEMID_CHANSWITCHANN = 37,
+	IEEE80211_ELEMID_MEASREQ = 38,
+	IEEE80211_ELEMID_MEASREP = 39,
+	IEEE80211_ELEMID_QUIET = 40,
+	IEEE80211_ELEMID_IBSSDFS = 41,
+	IEEE80211_ELEMID_ERP = 42,
+	IEEE80211_ELEMID_TCLAS_PROCESS = 44,
+	IEEE80211_ELEMID_HTCAP_ANA = 45,
+	IEEE80211_ELEMID_RESERVED_47 = 47,
+	IEEE80211_ELEMID_RSN = 48,
+	IEEE80211_ELEMID_XRATES = 50,
+	IEEE80211_ELEMID_HTCAP = 51,
+	IEEE80211_ELEMID_HTINFO = 52,
+	IEEE80211_ELEMID_MOBILITY_DOMAIN = 54,
+	IEEE80211_ELEMID_FT = 55,
+	IEEE80211_ELEMID_TIMEOUT_INTERVAL = 56,
+	IEEE80211_ELEMID_EXTCHANSWITCHANN = 60,
+	IEEE80211_ELEMID_HTINFO_ANA = 61,
+	IEEE80211_ELEMID_SECCHANOFFSET = 62,
+	IEEE80211_ELEMID_WAPI = 68,     /*IE for WAPI */
+	IEEE80211_ELEMID_TIME_ADVERTISEMENT = 69,
+	IEEE80211_ELEMID_RRM = 70,      /* Radio resource measurement */
+	IEEE80211_ELEMID_2040_COEXT = 72,
+	IEEE80211_ELEMID_2040_INTOL = 73,
+	IEEE80211_ELEMID_OBSS_SCAN = 74,
+	IEEE80211_ELEMID_MMIE = 76,     /* 802.11w Management MIC IE */
+	IEEE80211_ELEMID_FMS_DESCRIPTOR = 86,   /* 802.11v FMS descriptor IE */
+	IEEE80211_ELEMID_FMS_REQUEST = 87,      /* 802.11v FMS request IE */
+	IEEE80211_ELEMID_FMS_RESPONSE = 88,     /* 802.11v FMS response IE */
+	IEEE80211_ELEMID_BSSMAX_IDLE_PERIOD = 90,       /* BSS MAX IDLE PERIOD */
+	IEEE80211_ELEMID_TFS_REQUEST = 91,
+	IEEE80211_ELEMID_TFS_RESPONSE = 92,
+	IEEE80211_ELEMID_TIM_BCAST_REQUEST = 94,
+	IEEE80211_ELEMID_TIM_BCAST_RESPONSE = 95,
+	IEEE80211_ELEMID_INTERWORKING = 107,
+	IEEE80211_ELEMID_XCAPS = 127,
+	IEEE80211_ELEMID_RESERVED_133 = 133,
+	IEEE80211_ELEMID_TPC = 150,
+	IEEE80211_ELEMID_CCKM = 156,
+	IEEE80211_ELEMID_VHTCAP = 191,  /* VHT Capabilities */
+	IEEE80211_ELEMID_VHTOP = 192,   /* VHT Operation */
+	IEEE80211_ELEMID_EXT_BSS_LOAD = 193,    /* Extended BSS Load */
+	IEEE80211_ELEMID_WIDE_BAND_CHAN_SWITCH = 194,   /* Wide Band Channel Switch */
+	IEEE80211_ELEMID_VHT_TX_PWR_ENVLP = 195,        /* VHT Transmit Power Envelope */
+	IEEE80211_ELEMID_CHAN_SWITCH_WRAP = 196,        /* Channel Switch Wrapper */
+	IEEE80211_ELEMID_AID = 197,     /* AID */
+	IEEE80211_ELEMID_QUIET_CHANNEL = 198,   /* Quiet Channel */
+	IEEE80211_ELEMID_OP_MODE_NOTIFY = 199,  /* Operating Mode Notification */
+	IEEE80211_ELEMID_VENDOR = 221,  /* vendor private */
+};
+
+#define IEEE80211_MAX_IE_LEN                255
+#define IEEE80211_RSN_IE_LEN                22
+
+#define IEEE80211_CHANSWITCHANN_BYTES        5
+#define IEEE80211_EXTCHANSWITCHANN_BYTES     6
+
+/* TODO -> Need to Check Redefinition Error used in only UMAC */
+#if 0
+struct ieee80211_tim_ie {
+	uint8_t tim_ie;         /* IEEE80211_ELEMID_TIM */
+	uint8_t tim_len;
+	uint8_t tim_count;      /* DTIM count */
+	uint8_t tim_period;     /* DTIM period */
+	uint8_t tim_bitctl;     /* bitmap control */
+	uint8_t tim_bitmap[1];  /* variable-length bitmap */
+} __packed;
+#endif
+
+/* Country IE channel triplet */
+struct country_ie_triplet {
+	union {
+		uint8_t schan;  /* starting channel */
+		uint8_t regextid;       /* Regulatory Extension Identifier */
+	};
+	union {
+		uint8_t nchan;  /* number of channels */
+		uint8_t regclass;       /* Regulatory Class */
+	};
+	union {
+		uint8_t maxtxpwr;       /* tx power  */
+		uint8_t coverageclass;  /* Coverage Class */
+	};
+} __packed;
+
+struct ieee80211_country_ie {
+	uint8_t ie;             /* IEEE80211_ELEMID_COUNTRY */
+	uint8_t len;
+	uint8_t cc[3];          /* ISO CC+(I)ndoor/(O)utdoor */
+	struct country_ie_triplet triplet[1];
+} __packed;
+
+struct ieee80211_fh_ie {
+	uint8_t ie;             /* IEEE80211_ELEMID_FHPARMS */
+	uint8_t len;
+	uint16_t dwell_time;    /* endianess?? */
+	uint8_t hop_set;
+	uint8_t hop_pattern;
+	uint8_t hop_index;
+} __packed;
+
+struct ieee80211_ds_ie {
+	uint8_t ie;             /* IEEE80211_ELEMID_DSPARMS */
+	uint8_t len;
+	uint8_t current_channel;
+} __packed;
+
+struct ieee80211_erp_ie {
+	uint8_t ie;             /* IEEE80211_ELEMID_ERP */
+	uint8_t len;
+	uint8_t value;
+} __packed;
+
+/* TODO -> Need to Check Redefinition Error used in only UMAC */
+#if 0
+struct ieee80211_quiet_ie {
+	uint8_t ie;             /* IEEE80211_ELEMID_QUIET */
+	uint8_t len;
+	uint8_t tbttcount;      /* quiet start */
+	uint8_t period;         /* beacon intervals between quiets */
+	uint16_t duration;      /* TUs of each quiet */
+	uint16_t offset;        /* TUs of from TBTT of quiet start */
+} __packed;
+#endif
+
+struct ieee80211_channelswitch_ie {
+	uint8_t ie;             /* IEEE80211_ELEMID_CHANSWITCHANN */
+	uint8_t len;
+	uint8_t switchmode;
+	uint8_t newchannel;
+	uint8_t tbttcount;
+} __packed;
+
+/* channel switch action frame format definition */
+struct ieee80211_action_spectrum_channel_switch {
+	struct ieee80211_action csa_header;
+	struct ieee80211_channelswitch_ie csa_element;
+} __packed;
+
+struct ieee80211_extendedchannelswitch_ie {
+	uint8_t ie;             /* IEEE80211_ELEMID_EXTCHANSWITCHANN */
+	uint8_t len;
+	uint8_t switchmode;
+	uint8_t newClass;
+	uint8_t newchannel;
+	uint8_t tbttcount;
+} __packed;
+
+struct ieee80211_tpc_ie {
+	uint8_t ie;
+	uint8_t len;
+	uint8_t pwrlimit;
+} __packed;
+
+/*
+ * MHDRIE included in TKIP MFP protected management frames
+ */
+struct ieee80211_ese_mhdr_ie {
+	uint8_t mhdr_id;
+	uint8_t mhdr_len;
+	uint8_t mhdr_oui[3];
+	uint8_t mhdr_oui_type;
+	uint8_t mhdr_fc[2];
+	uint8_t mhdr_bssid[IEEE80211_ADDR_LEN];
+} __packed;
+
+/*
+ * SSID IE
+ */
+struct ieee80211_ie_ssid {
+	uint8_t ssid_id;
+	uint8_t ssid_len;
+	uint8_t ssid[32];
+} __packed;
+
+/*
+ * Supported rates
+ */
+#define IEEE80211_MAX_SUPPORTED_RATES      8
+
+struct ieee80211_ie_rates {
+	uint8_t rate_id;        /* Element Id */
+	uint8_t rate_len;       /* IE Length */
+	uint8_t rate[IEEE80211_MAX_SUPPORTED_RATES];    /* IE Length */
+} __packed;
+
+/*
+ * Extended rates
+ */
+#define IEEE80211_MAX_EXTENDED_RATES     256
+
+struct ieee80211_ie_xrates {
+	uint8_t xrate_id;       /* Element Id */
+	uint8_t xrate_len;      /* IE Length */
+	uint8_t xrate[IEEE80211_MAX_EXTENDED_RATES];    /* IE Length */
+} __packed;
+
+/*
+ * WPS SSID list information element (maximally sized).
+ */
+struct ieee80211_ie_ssidl {
+	uint8_t ssidl_id;       /* IEEE80211_ELEMID_VENDOR */
+	uint8_t ssidl_len;      /* length in bytes */
+	uint8_t ssidl_oui[3];   /* 0x00, 0x50, 0xf2 */
+	uint8_t ssidl_type;     /* OUI type */
+	uint8_t ssidl_prim_cap; /* Primary capabilities */
+	uint8_t ssidl_count;    /* # of secondary SSIDs */
+	uint16_t ssidl_value[248];
+} __packed;
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+struct ieee80211_sec_ssid_cap {
+	uint32_t reserved0 : 1,
+		 akmlist : 6, reserved1 : 4, reeserved2 : 2, ucipher : 15, mcipher : 4;
+};
+#else
+struct ieee80211_sec_ssid_cap {
+	uint32_t mcipher : 4,
+		 ucipher : 15, reserved2 : 2, reserved1 : 4, akmlist : 6, reserved0 : 1;
+};
+#endif
+
+struct ieee80211_ie_qbssload {
+	uint8_t elem_id;        /* IEEE80211_ELEMID_QBSS_LOAD */
+	uint8_t length;         /* length in bytes */
+	uint16_t station_count; /* number of station associated */
+	uint8_t channel_utilization;    /* channel busy time in 0-255 scale */
+	uint16_t aac;           /* available admission capacity */
+} __packed;
+
+#define SEC_SSID_HEADER_LEN  6
+#define SSIDL_IE_HEADER_LEN  6
+
+struct ieee80211_sec_ssid {
+	uint8_t sec_ext_cap;
+	struct ieee80211_sec_ssid_cap sec_cap;
+	uint8_t sec_ssid_len;
+	uint8_t sec_ssid[32];
+} __packed;
+
+/* Definitions of SSIDL IE */
+enum {
+	CAP_MCIPHER_ENUM_NONE = 0,
+	CAP_MCIPHER_ENUM_WEP40,
+	CAP_MCIPHER_ENUM_WEP104,
+	CAP_MCIPHER_ENUM_TKIP,
+	CAP_MCIPHER_ENUM_CCMP,
+	CAP_MCIPHER_ENUM_CKIP_CMIC,
+	CAP_MCIPHER_ENUM_CKIP,
+	CAP_MCIPHER_ENUM_CMIC
+};
+
+#define CAP_UCIPHER_BIT_NONE           0x0001
+#define CAP_UCIPHER_BIT_WEP40          0x0002
+#define CAP_UCIPHER_BIT_WEP104         0x0004
+#define CAP_UCIPHER_BIT_TKIP           0x0008
+#define CAP_UCIPHER_BIT_CCMP           0x0010
+#define CAP_UCIPHER_BIT_CKIP_CMIC      0x0020
+#define CAP_UCIPHER_BIT_CKIP           0x0040
+#define CAP_UCIPHER_BIT_CMIC           0x0080
+#define CAP_UCIPHER_BIT_WPA2_WEP40     0x0100
+#define CAP_UCIPHER_BIT_WPA2_WEP104    0x0200
+#define CAP_UCIPHER_BIT_WPA2_TKIP      0x0400
+#define CAP_UCIPHER_BIT_WPA2_CCMP      0x0800
+#define CAP_UCIPHER_BIT_WPA2_CKIP_CMIC 0x1000
+#define CAP_UCIPHER_BIT_WPA2_CKIP      0x2000
+#define CAP_UCIPHER_BIT_WPA2_CMIC      0x4000
+
+#define CAP_AKM_BIT_WPA1_1X            0x01
+#define CAP_AKM_BIT_WPA1_PSK           0x02
+#define CAP_AKM_BIT_WPA2_1X            0x04
+#define CAP_AKM_BIT_WPA2_PSK           0x08
+#define CAP_AKM_BIT_WPA1_CCKM          0x10
+#define CAP_AKM_BIT_WPA2_CCKM          0x20
+
+#define IEEE80211_CHALLENGE_LEN         128
+
+#define IEEE80211_SUPPCHAN_LEN          26
+
+#define IEEE80211_RATE_BASIC            0x80
+#define IEEE80211_RATE_VAL              0x7f
+
+/* EPR information element flags */
+#define IEEE80211_ERP_NON_ERP_PRESENT   0x01
+#define IEEE80211_ERP_USE_PROTECTION    0x02
+#define IEEE80211_ERP_LONG_PREAMBLE     0x04
+
+/* Atheros private advanced capabilities info */
+#define ATHEROS_CAP_TURBO_PRIME         0x01
+#define ATHEROS_CAP_COMPRESSION         0x02
+#define ATHEROS_CAP_FAST_FRAME          0x04
+/* bits 3-6 reserved */
+#define ATHEROS_CAP_BOOST               0x80
+
+#define ATH_OUI                     0x7f0300    /* Atheros OUI */
+#define ATH_OUI_TYPE                    0x01
+#define ATH_OUI_SUBTYPE                 0x01
+#define ATH_OUI_VERSION                 0x00
+#define ATH_OUI_TYPE_XR                 0x03
+#define ATH_OUI_VER_XR                  0x01
+#define ATH_OUI_EXTCAP_TYPE             0x04    /* Atheros Extended Cap Type */
+#define ATH_OUI_EXTCAP_SUBTYPE          0x01    /* Atheros Extended Cap Sub-type */
+#define ATH_OUI_EXTCAP_VERSION          0x00    /* Atheros Extended Cap Version */
+
+#define WPA_OUI                     0xf25000
+#define WPA_VERSION                        1    /* current supported version */
+#define CSCO_OUI                    0x964000    /* Cisco OUI */
+#define AOW_OUI                     0x4a0100    /* AoW OUI, workaround */
+#define AOW_OUI_TYPE                    0x01
+#define AOW_OUI_VERSION                 0x01
+
+#define WSC_OUI                   0x0050f204
+
+#define WPA_CSE_NULL                    0x00
+#define WPA_CSE_WEP40                   0x01
+#define WPA_CSE_TKIP                    0x02
+#define WPA_CSE_CCMP                    0x04
+#define WPA_CSE_WEP104                  0x05
+
+#define WPA_ASE_NONE                    0x00
+#define WPA_ASE_8021X_UNSPEC            0x01
+#define WPA_ASE_8021X_PSK               0x02
+#define WPA_ASE_FT_IEEE8021X            0x20
+#define WPA_ASE_FT_PSK                  0x40
+#define WPA_ASE_SHA256_IEEE8021X        0x80
+#define WPA_ASE_SHA256_PSK              0x100
+#define WPA_ASE_WPS                     0x200
+
+#define RSN_OUI                     0xac0f00
+#define RSN_VERSION                        1    /* current supported version */
+
+#define RSN_CSE_NULL                    0x00
+#define RSN_CSE_WEP40                   0x01
+#define RSN_CSE_TKIP                    0x02
+#define RSN_CSE_WRAP                    0x03
+#define RSN_CSE_CCMP                    0x04
+#define RSN_CSE_WEP104                  0x05
+#define RSN_CSE_AES_CMAC                0x06
+
+#define RSN_ASE_NONE                    0x00
+#define RSN_ASE_8021X_UNSPEC            0x01
+#define RSN_ASE_8021X_PSK               0x02
+#define RSN_ASE_FT_IEEE8021X            0x20
+#define RSN_ASE_FT_PSK                  0x40
+#define RSN_ASE_SHA256_IEEE8021X        0x80
+#define RSN_ASE_SHA256_PSK              0x100
+#define RSN_ASE_WPS                     0x200
+
+#define AKM_SUITE_TYPE_IEEE8021X        0x01
+#define AKM_SUITE_TYPE_PSK              0x02
+#define AKM_SUITE_TYPE_FT_IEEE8021X     0x03
+#define AKM_SUITE_TYPE_FT_PSK           0x04
+#define AKM_SUITE_TYPE_SHA256_IEEE8021X 0x05
+#define AKM_SUITE_TYPE_SHA256_PSK       0x06
+
+#define RSN_CAP_PREAUTH                 0x01
+#define RSN_CAP_PTKSA_REPLAYCOUNTER    0x0c
+#define RSN_CAP_GTKSA_REPLAYCOUNTER    0x30
+#define RSN_CAP_MFP_REQUIRED            0x40
+#define RSN_CAP_MFP_ENABLED             0x80
+
+#define CCKM_OUI                    0x964000
+#define CCKM_ASE_UNSPEC                    0
+#define WPA_CCKM_AKM              0x00964000
+#define RSN_CCKM_AKM              0x00964000
+
+#define WME_OUI                     0xf25000
+#define WME_OUI_TYPE                    0x02
+#define WME_INFO_OUI_SUBTYPE            0x00
+#define WME_PARAM_OUI_SUBTYPE           0x01
+#define WME_TSPEC_OUI_SUBTYPE           0x02
+
+#define WME_PARAM_OUI_VERSION              1
+#define WME_TSPEC_OUI_VERSION              1
+#define WME_VERSION                        1
+
+/* WME stream classes */
+#define WME_AC_BE                          0    /* best effort */
+#define WME_AC_BK                          1    /* background */
+#define WME_AC_VI                          2    /* video */
+#define WME_AC_VO                          3    /* voice */
+
+/* WCN IE */
+#define WCN_OUI                     0xf25000    /* Microsoft OUI */
+#define WCN_OUI_TYPE                    0x04    /* WCN */
+
+/* Atheros htoui  for ht vender ie; use Epigram OUI for compatibility with pre11n devices */
+#define ATH_HTOUI                   0x00904c
+
+#define SFA_OUI                     0x964000
+#define SFA_OUI_TYPE                    0x14
+#define SFA_IE_CAP_MFP                  0x01
+#define SFA_IE_CAP_DIAG_CHANNEL         0x02
+#define SFA_IE_CAP_LOCATION_SVCS        0x04
+#define SFA_IE_CAP_EXP_BANDWIDTH        0x08
+
+#define WPA_OUI_BYTES       0x00, 0x50, 0xf2
+#define RSN_OUI_BYTES       0x00, 0x0f, 0xac
+#define WME_OUI_BYTES       0x00, 0x50, 0xf2
+#define ATH_OUI_BYTES       0x00, 0x03, 0x7f
+#define SFA_OUI_BYTES       0x00, 0x40, 0x96
+#define CCKM_OUI_BYTES      0x00, 0x40, 0x96
+#define WPA_SEL(x)          (((x)<<24)|WPA_OUI)
+#define RSN_SEL(x)          (((x)<<24)|RSN_OUI)
+#define SFA_SEL(x)          (((x)<<24)|SFA_OUI)
+#define CCKM_SEL(x)         (((x)<<24)|CCKM_OUI)
+
+#define IEEE80211_RV(v)     ((v) & IEEE80211_RATE_VAL)
+
+/*
+ * AUTH management packets
+ *
+ *  octet algo[2]
+ *  octet seq[2]
+ *  octet status[2]
+ *  octet chal.id
+ *  octet chal.length
+ *  octet chal.text[253]
+ */
+
+typedef uint8_t *ieee80211_mgt_auth_t;
+
+#define IEEE80211_AUTH_ALGORITHM(auth) \
+	((auth)[0] | ((auth)[1] << 8))
+#define IEEE80211_AUTH_TRANSACTION(auth) \
+	((auth)[2] | ((auth)[3] << 8))
+#define IEEE80211_AUTH_STATUS(auth) \
+	((auth)[4] | ((auth)[5] << 8))
+
+#define IEEE80211_AUTH_ALG_OPEN     0x0000
+#define IEEE80211_AUTH_ALG_SHARED   0x0001
+#define IEEE80211_AUTH_ALG_FT       0x0002
+#define IEEE80211_AUTH_ALG_LEAP     0x0080
+
+enum {
+	IEEE80211_AUTH_OPEN_REQUEST = 1,
+	IEEE80211_AUTH_OPEN_RESPONSE = 2,
+};
+
+enum {
+	IEEE80211_AUTH_SHARED_REQUEST = 1,
+	IEEE80211_AUTH_SHARED_CHALLENGE = 2,
+	IEEE80211_AUTH_SHARED_RESPONSE = 3,
+	IEEE80211_AUTH_SHARED_PASS = 4,
+};
+
+/*
+ * Reason codes
+ *
+ * Unlisted codes are reserved
+ */
+
+enum {
+	IEEE80211_REASON_UNSPECIFIED = 1,
+	IEEE80211_REASON_AUTH_EXPIRE = 2,
+	IEEE80211_REASON_AUTH_LEAVE = 3,
+	IEEE80211_REASON_ASSOC_EXPIRE = 4,
+	IEEE80211_REASON_ASSOC_TOOMANY = 5,
+	IEEE80211_REASON_NOT_AUTHED = 6,
+	IEEE80211_REASON_NOT_ASSOCED = 7,
+	IEEE80211_REASON_ASSOC_LEAVE = 8,
+	IEEE80211_REASON_ASSOC_NOT_AUTHED = 9,
+
+	IEEE80211_REASON_RSN_REQUIRED = 11,
+	IEEE80211_REASON_RSN_INCONSISTENT = 12,
+	IEEE80211_REASON_IE_INVALID = 13,
+	IEEE80211_REASON_MIC_FAILURE = 14,
+
+	IEEE80211_REASON_QOS = 32,
+	IEEE80211_REASON_QOS_BANDWITDH = 33,
+	IEEE80211_REASON_QOS_CH_CONDITIONS = 34,
+	IEEE80211_REASON_QOS_TXOP = 35,
+	IEEE80211_REASON_QOS_LEAVE = 36,
+	IEEE80211_REASON_QOS_DECLINED = 37,
+	IEEE80211_REASON_QOS_SETUP_REQUIRED = 38,
+	IEEE80211_REASON_QOS_TIMEOUT = 39,
+	IEEE80211_REASON_QOS_CIPHER = 45,
+
+	IEEE80211_STATUS_SUCCESS = 0,
+	IEEE80211_STATUS_UNSPECIFIED = 1,
+	IEEE80211_STATUS_CAPINFO = 10,
+	IEEE80211_STATUS_NOT_ASSOCED = 11,
+	IEEE80211_STATUS_OTHER = 12,
+	IEEE80211_STATUS_ALG = 13,
+	IEEE80211_STATUS_SEQUENCE = 14,
+	IEEE80211_STATUS_CHALLENGE = 15,
+	IEEE80211_STATUS_TIMEOUT = 16,
+	IEEE80211_STATUS_TOOMANY = 17,
+	IEEE80211_STATUS_BASIC_RATE = 18,
+	IEEE80211_STATUS_SP_REQUIRED = 19,
+	IEEE80211_STATUS_PBCC_REQUIRED = 20,
+	IEEE80211_STATUS_CA_REQUIRED = 21,
+	IEEE80211_STATUS_TOO_MANY_STATIONS = 22,
+	IEEE80211_STATUS_RATES = 23,
+	IEEE80211_STATUS_SHORTSLOT_REQUIRED = 25,
+	IEEE80211_STATUS_DSSSOFDM_REQUIRED = 26,
+	IEEE80211_STATUS_NO_HT = 27,
+	IEEE80211_STATUS_REJECT_TEMP = 30,
+	IEEE80211_STATUS_MFP_VIOLATION = 31,
+	IEEE80211_STATUS_REFUSED = 37,
+	IEEE80211_STATUS_INVALID_PARAM = 38,
+
+	IEEE80211_STATUS_DLS_NOT_ALLOWED = 48,
+};
+
+/* private IEEE80211_STATUS */
+#define    IEEE80211_STATUS_CANCEL             -1
+#define    IEEE80211_STATUS_INVALID_IE         -2
+#define    IEEE80211_STATUS_INVALID_CHANNEL    -3
+
+#define IEEE80211_WEP_KEYLEN        5   /* 40bit */
+#define IEEE80211_WEP_IVLEN         3   /* 24bit */
+#define IEEE80211_WEP_KIDLEN        1   /* 1 octet */
+#define IEEE80211_WEP_CRCLEN        4   /* CRC-32 */
+#define IEEE80211_WEP_NKID          4   /* number of key ids */
+
+/*
+ * 802.11i defines an extended IV for use with non-WEP ciphers.
+ * When the EXTIV bit is set in the key id byte an additional
+ * 4 bytes immediately follow the IV for TKIP.  For CCMP the
+ * EXTIV bit is likewise set but the 8 bytes represent the
+ * CCMP header rather than IV+extended-IV.
+ */
+#define IEEE80211_WEP_EXTIV      0x20
+#define IEEE80211_WEP_EXTIVLEN      4   /* extended IV length */
+#define IEEE80211_WEP_MICLEN        8   /* trailing MIC */
+
+#define IEEE80211_CCMP_HEADERLEN    8
+#define IEEE80211_CCMP_MICLEN       8
+
+/*
+ * 802.11w defines a MMIE chunk to be attached at the end of
+ * any outgoing broadcast or multicast robust management frame.
+ * MMIE field is total 18 bytes in size. Following the diagram of MMIE
+ *
+ *        <------------ 18 Bytes MMIE ----------------------->
+ *        +--------+---------+---------+-----------+---------+
+ *        |Element | Length  | Key id  |   IPN     |  MIC    |
+ *        |  id    |         |         |           |         |
+ *        +--------+---------+---------+-----------+---------+
+ * bytes      1         1         2         6            8
+ *
+ */
+#define IEEE80211_MMIE_LEN          18
+#define IEEE80211_MMIE_ELEMENTIDLEN 1
+#define IEEE80211_MMIE_LENGTHLEN    1
+#define IEEE80211_MMIE_KEYIDLEN     2
+#define IEEE80211_MMIE_IPNLEN       6
+#define IEEE80211_MMIE_MICLEN       8
+
+#define IEEE80211_CRC_LEN           4
+
+#define IEEE80211_8021Q_HEADER_LEN  4
+/*
+ * Maximum acceptable MTU is:
+ *  IEEE80211_MAX_LEN - WEP overhead - CRC -
+ *      QoS overhead - RSN/WPA overhead
+ * Min is arbitrarily chosen > IEEE80211_MIN_LEN.  The default
+ * mtu is Ethernet-compatible; it's set by ether_ifattach.
+ */
+#define IEEE80211_MTU_MAX       2290
+#define IEEE80211_MTU_MIN       32
+
+/* Rather than using this default value, customer platforms can provide a custom value for this constant.
+   Coustomer platform will use the different define value by themself */
+#ifndef IEEE80211_MAX_MPDU_LEN
+#define IEEE80211_MAX_MPDU_LEN      (3840 + IEEE80211_CRC_LEN +	\
+				     (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_CRCLEN))
+#endif
+#define IEEE80211_ACK_LEN \
+	(sizeof(struct ieee80211_frame_ack) + IEEE80211_CRC_LEN)
+#define IEEE80211_MIN_LEN \
+	(sizeof(struct ieee80211_frame_min) + IEEE80211_CRC_LEN)
+
+/* An 802.11 data frame can be one of three types:
+   1. An unaggregated frame: The maximum length of an unaggregated data frame is 2324 bytes + headers.
+   2. A data frame that is part of an AMPDU: The maximum length of an AMPDU may be upto 65535 bytes, but data frame is limited to 2324 bytes + header.
+   3. An AMSDU: The maximum length of an AMSDU is eihther 3839 or 7095 bytes.
+   The maximum frame length supported by hardware is 4095 bytes.
+   A length of 3839 bytes is chosen here to support unaggregated data frames, any size AMPDUs and 3839 byte AMSDUs.
+ */
+#define IEEE80211N_MAX_FRAMELEN  3839
+#define IEEE80211N_MAX_LEN (IEEE80211N_MAX_FRAMELEN + IEEE80211_CRC_LEN + \
+			    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_CRCLEN))
+
+#define IEEE80211_TX_CHAINMASK_MIN  1
+#define IEEE80211_TX_CHAINMASK_MAX  7
+
+#define IEEE80211_RX_CHAINMASK_MIN  1
+#define IEEE80211_RX_CHAINMASK_MAX  7
+
+/*
+ * The 802.11 spec says at most 2007 stations may be
+ * associated at once.  For most AP's this is way more
+ * than is feasible so we use a default of 128.  This
+ * number may be overridden by the driver and/or by
+ * user configuration.
+ */
+#define IEEE80211_AID_MAX       2007
+#define IEEE80211_AID_DEF       128
+
+#define IEEE80211_AID(b)    ((b) &~0xc000)
+
+/*
+ * RTS frame length parameters.  The default is specified in
+ * the 802.11 spec.  The max may be wrong for jumbo frames.
+ */
+#define IEEE80211_RTS_DEFAULT       512
+#define IEEE80211_RTS_MIN           0
+#define IEEE80211_RTS_MAX           2347
+
+/*
+ * Fragmentation limits
+ */
+#define IEEE80211_FRAGMT_THRESHOLD_MIN        540       /* min frag threshold */
+#define IEEE80211_FRAGMT_THRESHOLD_MAX       2346       /* max frag threshold */
+
+/*
+ * Regulatory extention identifier for country IE.
+ */
+#define IEEE80211_REG_EXT_ID        201
+
+/*
+ * overlapping BSS
+ */
+#define IEEE80211_OBSS_SCAN_PASSIVE_DWELL_DEF  20
+#define IEEE80211_OBSS_SCAN_ACTIVE_DWELL_DEF   10
+#define IEEE80211_OBSS_SCAN_INTERVAL_DEF       300
+#define IEEE80211_OBSS_SCAN_PASSIVE_TOTAL_DEF  200
+#define IEEE80211_OBSS_SCAN_ACTIVE_TOTAL_DEF   20
+#define IEEE80211_OBSS_SCAN_THRESH_DEF   25
+#define IEEE80211_OBSS_SCAN_DELAY_DEF   5
+
+/*
+ * overlapping BSS scan ie
+ */
+struct ieee80211_ie_obss_scan {
+	uint8_t elem_id;
+	uint8_t elem_len;
+	uint16_t scan_passive_dwell;
+	uint16_t scan_active_dwell;
+	uint16_t scan_interval;
+	uint16_t scan_passive_total;
+	uint16_t scan_active_total;
+	uint16_t scan_delay;
+	uint16_t scan_thresh;
+} __packed;
+
+/*
+ * Extended capability ie
+ */
+struct ieee80211_ie_ext_cap {
+	uint8_t elem_id;
+	uint8_t elem_len;
+	uint32_t ext_capflags;
+	uint32_t ext_capflags2;
+} __packed;
+
+/* Extended capability IE flags */
+#define IEEE80211_EXTCAPIE_2040COEXTMGMT        0x00000001
+#define IEEE80211_EXTCAPIE_TFS                  0x00010000
+#define IEEE80211_EXTCAPIE_FMS                  0x00000800
+#define IEEE80211_EXTCAPIE_WNMSLEEPMODE         0x00020000
+#define IEEE80211_EXTCAPIE_TIMBROADCAST         0x00040000
+#define IEEE80211_EXTCAPIE_PROXYARP             0x00001000
+#define IEEE80211_EXTCAPIE_BSSTRANSITION        0x00080000
+/* Tunneled Direct Link Setup (TDLS) extended capability bits */
+#define IEEE80211_EXTCAPIE_PEER_UAPSD_BUF_STA   0x10000000
+#define IEEE80211_EXTCAPIE_TDLS_PEER_PSM        0x20000000
+#define IEEE80211_EXTCAPIE_TDLS_CHAN_SX         0x40000000
+/* 2nd Extended capability IE flags bit32-bit63*/
+#define IEEE80211_EXTCAPIE_TDLSSUPPORT      0x00000020  /* bit-37 TDLS Support */
+#define IEEE80211_EXTCAPIE_TDLSPROHIBIT     0x00000040  /* bit-38 TDLS Prohibit Support */
+#define IEEE80211_EXTCAPIE_TDLSCHANSXPROHIBIT   0x00000080      /* bit-39 TDLS Channel Switch Prohibit */
+#define IEEE80211_EXTCAPIE_TDLS_WIDE_BAND   0x20000080  /* bit-61 TDLS Wide Bandwidth support */
+#define IEEE80211_EXTCAPIE_OP_MODE_NOTIFY   0x40000000  /* bit-62 Operating Mode notification */
+
+/*
+ * These caps are populated when we recieve beacon/probe response
+ * This is used to maintain local TDLS cap bit masks
+ */
+
+#define IEEE80211_TDLS_PROHIBIT     0x00000001  /* bit-1 TDLS Prohibit Support */
+
+/*
+ * 20/40 BSS coexistence ie
+ */
+struct ieee80211_ie_bss_coex {
+	uint8_t elem_id;
+	uint8_t elem_len;
+#if _BYTE_ORDER == _BIG_ENDIAN
+	uint8_t reserved1 : 1,
+		reserved2 : 1,
+		reserved3 : 1,
+		obss_exempt_grant : 1,
+		obss_exempt_req : 1,
+		ht20_width_req : 1, ht40_intolerant : 1, inf_request : 1;
+#else
+	uint8_t inf_request : 1,
+		ht40_intolerant : 1,
+		ht20_width_req : 1,
+		obss_exempt_req : 1,
+		obss_exempt_grant : 1, reserved3 : 1, reserved2 : 1, reserved1 : 1;
+#endif
+} __packed;
+
+/*
+ * 20/40 BSS intolerant channel report ie
+ */
+struct ieee80211_ie_intolerant_report {
+	uint8_t elem_id;
+	uint8_t elem_len;
+	uint8_t reg_class;
+	uint8_t chan_list[1];   /* variable-length channel list */
+} __packed;
+
+/*
+ * 20/40 coext management action frame
+ */
+struct ieee80211_action_bss_coex_frame {
+	struct ieee80211_action ac_header;
+	struct ieee80211_ie_bss_coex coex;
+	struct ieee80211_ie_intolerant_report chan_report;
+} __packed;
+
+typedef enum ieee80211_tie_interval_type {
+	IEEE80211_TIE_INTERVAL_TYPE_RESERVED = 0,
+	IEEE80211_TIE_INTERVAL_TYPE_REASSOC_DEADLINE_INTERVAL = 1,
+	IEEE80211_TIE_INTERVAL_TYPE_KEY_LIFETIME_INTERVAL = 2,
+	IEEE80211_TIE_INTERVAL_TYPE_ASSOC_COMEBACK_TIME = 3,
+} ieee80211_tie_interval_type_t;
+
+struct ieee80211_ie_timeout_interval {
+	uint8_t elem_id;
+	uint8_t elem_len;
+	uint8_t interval_type;
+	uint32_t value;
+} __packed;
+
+/* TODO -> Need to Check Redefinition Error used in only UMAC */
+#if 0
+/* Management MIC information element (IEEE 802.11w) */
+struct ieee80211_mmie {
+	uint8_t element_id;
+	uint8_t length;
+	uint16_t key_id;
+	uint8_t sequence_number[6];
+	uint8_t mic[8];
+} __packed;
+#endif
+
+/*
+ * 802.11n Secondary Channel Offset element
+ */
+#define IEEE80211_SEC_CHAN_OFFSET_SCN               0   /* no secondary channel */
+#define IEEE80211_SEC_CHAN_OFFSET_SCA               1   /* secondary channel above */
+#define IEEE80211_SEC_CHAN_OFFSET_SCB               3   /* secondary channel below */
+
+struct ieee80211_ie_sec_chan_offset {
+	uint8_t elem_id;
+	uint8_t len;
+	uint8_t sec_chan_offset;
+} __packed;
+
+/*
+ * 802.11ac Transmit Power Envelope element
+ */
+#define IEEE80211_VHT_TXPWR_IS_SUB_ELEMENT          1   /* It checks whether its  sub element */
+#define IEEE80211_VHT_TXPWR_MAX_POWER_COUNT         4   /* Max TX power elements valid */
+#define IEEE80211_VHT_TXPWR_NUM_POWER_SUPPORTED     3   /* Max TX power elements supported */
+#define IEEE80211_VHT_TXPWR_LCL_MAX_PWR_UNITS_SHFT  3   /* B3-B5 Local Max transmit power units */
+
+struct ieee80211_ie_vht_txpwr_env {
+	uint8_t elem_id;
+	uint8_t elem_len;
+	uint8_t txpwr_info;     /* Transmit Power Information */
+	uint8_t local_max_txpwr[4];     /* Local Max TxPower for 20,40,80,160MHz */
+} __packed;
+
+/*
+ * 802.11ac Wide Bandwidth Channel Switch Element
+ */
+
+#define IEEE80211_VHT_EXTCH_SWITCH             1        /* For extension channel switch */
+#define CHWIDTH_VHT20                          20       /* Channel width 20 */
+#define CHWIDTH_VHT40                          40       /* Channel width 40 */
+#define CHWIDTH_VHT80                          80       /* Channel width 80 */
+#define CHWIDTH_VHT160                         160      /* Channel width 160 */
+
+struct ieee80211_ie_wide_bw_switch {
+	uint8_t elem_id;
+	uint8_t elem_len;
+	uint8_t new_ch_width;   /* New channel width */
+	uint8_t new_ch_freq_seg1;       /* Channel Center frequency 1 */
+	uint8_t new_ch_freq_seg2;       /* Channel Center frequency 2 */
+} __packed;
+
+#define IEEE80211_RSSI_RX       0x00000001
+#define IEEE80211_RSSI_TX       0x00000002
+#define IEEE80211_RSSI_EXTCHAN  0x00000004
+#define IEEE80211_RSSI_BEACON   0x00000008
+#define IEEE80211_RSSI_RXDATA   0x00000010
+
+#define IEEE80211_RATE_TX 0
+#define IEEE80211_RATE_RX 1
+#define IEEE80211_LASTRATE_TX 2
+#define IEEE80211_LASTRATE_RX 3
+#define IEEE80211_RATECODE_TX 4
+#define IEEE80211_RATECODE_RX 5
+
+#define IEEE80211_MAX_RATE_PER_CLIENT 8
+/* Define for the P2P Wildcard SSID */
+#define IEEE80211_P2P_WILDCARD_SSID         "DIRECT-"
+
+#define IEEE80211_P2P_WILDCARD_SSID_LEN     (sizeof(IEEE80211_P2P_WILDCARD_SSID) - 1)
+
+#endif /* CDS_COMMON_IEEE80211_H_ */

+ 1374 - 0
core/cds/inc/cds_ieee80211_defines.h

@@ -0,0 +1,1374 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef CDS_IEEE80211_DEFINES_H_
+#define CDS_IEEE80211_DEFINES_H_
+
+#include "cds_ieee80211_common.h"
+
+/*
+ * Public defines for Atheros Upper MAC Layer
+ */
+
+/**
+ * @brief Opaque handle of 802.11 protocal layer.
+ */
+struct ieee80211com;
+typedef struct ieee80211com *wlan_dev_t;
+
+/**
+ * @brief Opaque handle to App IE module.
+ */
+struct wlan_mlme_app_ie;
+typedef struct wlan_mlme_app_ie *wlan_mlme_app_ie_t;
+
+/**
+ * @brief Opaque handle of network instance (vap) in 802.11 protocal layer.
+ */
+struct ieee80211vap;
+typedef struct ieee80211vap *wlan_if_t;
+
+struct ieee80211vapprofile;
+typedef struct ieee80211vapprofile *wlan_if_info_t;
+
+/**
+ * @brief Opaque handle of a node in the wifi network.
+ */
+struct ieee80211_node;
+typedef struct ieee80211_node *wlan_node_t;
+
+/**
+ * @brief Opaque handle of OS interface (ifp in the case of unix ).
+ */
+struct _os_if_t;
+typedef struct _os_if_t *os_if_t;
+
+/**
+ *
+ * @brief Opaque handle.
+ */
+typedef void *os_handle_t;
+
+/**
+ * @brief Opaque handle of a channel.
+ */
+struct ieee80211_channel;
+typedef struct ieee80211_channel *wlan_chan_t;
+
+/**
+ * @brief Opaque handle scan_entry.
+ */
+struct ieee80211_scan_entry;
+typedef struct ieee80211_scan_entry *wlan_scan_entry_t;
+
+/* AoW related defines */
+#define AOW_MAX_RECEIVER_COUNT  10
+
+#define IEEE80211_NWID_LEN                  32
+#define IEEE80211_ISO_COUNTRY_LENGTH        3   /* length of 11d ISO country string */
+
+typedef struct _ieee80211_ssid {
+	int len;
+	uint8_t ssid[IEEE80211_NWID_LEN];
+} ieee80211_ssid;
+
+typedef struct ieee80211_tx_status {
+	int ts_flags;
+#define IEEE80211_TX_ERROR          0x01
+#define IEEE80211_TX_XRETRY         0x02
+
+	int ts_retries;         /* number of retries to successfully transmit this frame */
+#ifdef ATH_SUPPORT_TxBF
+	uint8_t ts_txbfstatus;
+#define AR_BW_Mismatch      0x1
+#define AR_Stream_Miss      0x2
+#define AR_CV_Missed        0x4
+#define AR_Dest_Miss        0x8
+#define AR_Expired          0x10
+#define AR_TxBF_Valid_HW_Status    (AR_BW_Mismatch|AR_Stream_Miss|AR_CV_Missed|AR_Dest_Miss|AR_Expired)
+#define TxBF_STATUS_Sounding_Complete   0x20
+#define TxBF_STATUS_Sounding_Request    0x40
+#define TxBF_Valid_SW_Status  (TxBF_STATUS_Sounding_Complete | TxBF_STATUS_Sounding_Request)
+#define TxBF_Valid_Status  (AR_TxBF_Valid_HW_Status | TxBF_Valid_SW_Status)
+	uint32_t ts_tstamp;     /* tx time stamp */
+#endif
+#ifdef ATH_SUPPORT_FLOWMAC_MODULE
+	uint8_t ts_flowmac_flags;
+#define IEEE80211_TX_FLOWMAC_DONE           0x01
+#endif
+	uint32_t ts_rateKbps;
+} ieee80211_xmit_status;
+
+#ifndef EXTERNAL_USE_ONLY
+typedef struct ieee80211_rx_status {
+	int rs_numchains;
+	int rs_flags;
+#define IEEE80211_RX_FCS_ERROR      0x01
+#define IEEE80211_RX_MIC_ERROR      0x02
+#define IEEE80211_RX_DECRYPT_ERROR  0x04
+/* holes in flags here between, ATH_RX_XXXX to IEEE80211_RX_XXX */
+#define IEEE80211_RX_KEYMISS        0x200
+	int rs_rssi;            /* RSSI (noise floor ajusted) */
+	int rs_abs_rssi;        /* absolute RSSI */
+	int rs_datarate;        /* data rate received */
+	int rs_rateieee;
+	int rs_ratephy;
+
+#define IEEE80211_MAX_ANTENNA       3   /* Keep the same as ATH_MAX_ANTENNA */
+	uint8_t rs_rssictl[IEEE80211_MAX_ANTENNA];      /* RSSI (noise floor ajusted) */
+	uint8_t rs_rssiextn[IEEE80211_MAX_ANTENNA];     /* RSSI (noise floor ajusted) */
+	uint8_t rs_isvalidrssi; /* rs_rssi is valid or not */
+
+	enum ieee80211_phymode rs_phymode;
+	int rs_freq;
+
+	union {
+		uint8_t data[8];
+		uint64_t tsf;
+	} rs_tstamp;
+
+	/*
+	 * Detail channel structure of recv frame.
+	 * It could be NULL if not available
+	 */
+	struct ieee80211_channel *rs_full_chan;
+
+	uint8_t rs_isaggr;
+	uint8_t rs_isapsd;
+	int16_t rs_noisefloor;
+	uint16_t rs_channel;
+#ifdef ATH_SUPPORT_TxBF
+	uint32_t rs_rpttstamp;  /* txbf report time stamp */
+#endif
+
+	/* The following counts are meant to assist in stats calculation.
+	   These variables are incremented only in specific situations, and
+	   should not be relied upon for any purpose other than the original
+	   stats related purpose they have been introduced for. */
+
+	uint16_t rs_cryptodecapcount;   /* Crypto bytes decapped/demic'ed. */
+	uint8_t rs_padspace;    /* No. of padding bytes present after header
+	                           in wbuf. */
+	uint8_t rs_qosdecapcount;       /* QoS/HTC bytes decapped. */
+
+	/* End of stats calculation related counts. */
+
+	uint8_t rs_lsig[IEEE80211_LSIG_LEN];
+	uint8_t rs_htsig[IEEE80211_HTSIG_LEN];
+	uint8_t rs_servicebytes[IEEE80211_SB_LEN];
+
+} ieee80211_recv_status;
+#endif /* EXTERNAL_USE_ONLY */
+
+/*
+ * flags to be passed to ieee80211_vap_create function .
+ */
+#define IEEE80211_CLONE_BSSID           0x0001  /* allocate unique mac/bssid */
+#define IEEE80211_CLONE_NOBEACONS       0x0002  /* don't setup beacon timers */
+#define IEEE80211_CLONE_WDS             0x0004  /* enable WDS processing */
+#define IEEE80211_CLONE_WDSLEGACY       0x0008  /* legacy WDS operation */
+#define IEEE80211_PRIMARY_VAP           0x0010  /* primary vap */
+#define IEEE80211_P2PDEV_VAP            0x0020  /* p2pdev vap */
+#define IEEE80211_P2PGO_VAP             0x0040  /* p2p-go vap */
+#define IEEE80211_P2PCLI_VAP            0x0080  /* p2p-client vap */
+#define IEEE80211_CLONE_MACADDR         0x0100  /* create vap w/ specified mac/bssid */
+#define IEEE80211_CLONE_MATADDR         0x0200  /* create vap w/ specified MAT addr */
+#define IEEE80211_WRAP_VAP              0x0400  /* wireless repeater ap vap */
+
+/*
+ * For the new multi-vap scan feature, there is a set of default priority tables
+ * for each OpMode.
+ * The following are the default list of the VAP Scan Priority Mapping based on OpModes.
+ * NOTE: the following are only used when "#if ATH_SUPPORT_MULTIPLE_SCANS" is true.
+ */
+/* For IBSS opmode */
+#define DEF_VAP_SCAN_PRI_MAP_OPMODE_IBSS_BASE               0
+/* For STA opmode */
+#define DEF_VAP_SCAN_PRI_MAP_OPMODE_STA_BASE                0
+#define DEF_VAP_SCAN_PRI_MAP_OPMODE_STA_P2P_CLIENT          1
+/* For HostAp opmode */
+#define DEF_VAP_SCAN_PRI_MAP_OPMODE_AP_BASE                 0
+#define DEF_VAP_SCAN_PRI_MAP_OPMODE_AP_P2P_GO               1
+#define DEF_VAP_SCAN_PRI_MAP_OPMODE_AP_P2P_DEVICE           2
+/* For BTAmp opmode */
+#define DEF_VAP_SCAN_PRI_MAP_OPMODE_BTAMP_BASE              0
+
+typedef enum _ieee80211_dev_vap_event {
+	IEEE80211_VAP_CREATED = 1,
+	IEEE80211_VAP_STOPPED,
+	IEEE80211_VAP_DELETED
+} ieee80211_dev_vap_event;
+
+typedef struct _wlan_dev_event_handler_table {
+	void (*wlan_dev_vap_event)(void *event_arg, wlan_dev_t, os_if_t, ieee80211_dev_vap_event);      /* callback to receive vap events */
+#ifdef ATH_SUPPORT_SPECTRAL
+	void (*wlan_dev_spectral_indicate)(void *, void *, uint32_t);
+#endif
+} wlan_dev_event_handler_table;
+
+typedef enum _ieee80211_ap_stopped_reason {
+	IEEE80211_AP_STOPPED_REASON_DUMMY = 0,  /* Dummy placeholder. Should not use */
+	IEEE80211_AP_STOPPED_REASON_CHANNEL_DFS = 1,
+} ieee80211_ap_stopped_reason;
+
+typedef int IEEE80211_REASON_CODE;
+typedef int IEEE80211_STATUS;
+
+/*
+ * scan API related structs.
+ */
+typedef enum _ieee80211_scan_type {
+	IEEE80211_SCAN_BACKGROUND,
+	IEEE80211_SCAN_FOREGROUND,
+	IEEE80211_SCAN_SPECTRAL,
+	IEEE80211_SCAN_REPEATER_BACKGROUND,
+	IEEE80211_SCAN_REPEATER_EXT_BACKGROUND,
+	IEEE80211_SCAN_RADIO_MEASUREMENTS,
+} ieee80211_scan_type;
+
+/*
+ * Priority numbers must be sequential, starting with 0.
+ */
+typedef enum ieee80211_scan_priority_t {
+	IEEE80211_SCAN_PRIORITY_VERY_LOW = 0,
+	IEEE80211_SCAN_PRIORITY_LOW,
+	IEEE80211_SCAN_PRIORITY_MEDIUM,
+	IEEE80211_SCAN_PRIORITY_HIGH,
+	IEEE80211_SCAN_PRIORITY_VERY_HIGH,
+
+	IEEE80211_SCAN_PRIORITY_COUNT   /* number of priorities supported */
+} IEEE80211_SCAN_PRIORITY;
+
+typedef uint16_t IEEE80211_SCAN_REQUESTOR;
+typedef uint32_t IEEE80211_SCAN_ID;
+
+#define IEEE80211_SCAN_ID_NONE                    0
+
+/* All P2P scans currently use medium priority */
+#define IEEE80211_P2P_DEFAULT_SCAN_PRIORITY       IEEE80211_SCAN_PRIORITY_MEDIUM
+#define IEEE80211_P2P_SCAN_PRIORITY_HIGH          IEEE80211_SCAN_PRIORITY_HIGH
+
+/* Masks identifying types/ID of scans */
+#define IEEE80211_SPECIFIC_SCAN       0x00000000
+#define IEEE80211_VAP_SCAN            0x01000000
+#define IEEE80211_ALL_SCANS           0x04000000
+
+/**
+ * host scan bit. only relevant for host/target architecture.
+ * do not reuse this bit definition. target uses this .
+ *
+ */
+#define IEEE80211_HOST_SCAN           0x80000000
+#define IEEE80211_SCAN_CLASS_MASK     0xFF000000
+
+#define IEEE80211_SCAN_PASSIVE            0x0001        /* passively scan all the channels */
+#define IEEE80211_SCAN_ACTIVE             0x0002        /* actively  scan all the channels (regdomain rules still apply) */
+#define IEEE80211_SCAN_2GHZ               0x0004        /* scan 2GHz band */
+#define IEEE80211_SCAN_5GHZ               0x0008        /* scan 5GHz band */
+#define IEEE80211_SCAN_ALLBANDS           (IEEE80211_SCAN_5GHZ | IEEE80211_SCAN_2GHZ)
+#define IEEE80211_SCAN_CONTINUOUS         0x0010        /* keep scanning until maxscantime expires */
+#define IEEE80211_SCAN_FORCED             0x0020        /* forced scan (OS request) - should proceed even in the presence of data traffic */
+#define IEEE80211_SCAN_NOW                0x0040        /* scan now (User request)  - should proceed even in the presence of data traffic */
+#define IEEE80211_SCAN_ADD_BCAST_PROBE    0x0080        /* add wildcard ssid and broadcast probe request if there is none */
+#define IEEE80211_SCAN_EXTERNAL           0x0100        /* scan requested by OS */
+#define IEEE80211_SCAN_BURST              0x0200        /* scan multiple channels before returning to BSS channel */
+#define IEEE80211_SCAN_CHAN_EVENT         0x0400        /* scan chan event for  offload architectures */
+#define IEEE80211_SCAN_FILTER_PROBE_REQ   0x0800        /* Filter probe requests- applicable only for offload architectures */
+
+#define IEEE80211_SCAN_PARAMS_MAX_SSID     10
+#define IEEE80211_SCAN_PARAMS_MAX_BSSID    10
+
+/* flag definitions passed to scan_cancel API */
+
+#define IEEE80211_SCAN_CANCEL_ASYNC 0x0 /* asynchronouly wait for scan SM to complete cancel */
+#define IEEE80211_SCAN_CANCEL_WAIT  0x1 /* wait for scan SM to complete cancel */
+#define IEEE80211_SCAN_CANCEL_SYNC  0x2 /* synchronously execute cancel scan */
+
+#ifndef EXTERNAL_USE_ONLY
+typedef bool (*ieee80211_scan_termination_check)(void *arg);
+
+typedef struct _ieee80211_scan_params {
+	ieee80211_scan_type type;
+	int min_dwell_time_active;      /* min time in msec on active channels */
+	int max_dwell_time_active;      /* max time in msec on active channels (if no response) */
+	int min_dwell_time_passive;     /* min time in msec on passive channels */
+	int max_dwell_time_passive;     /* max time in msec on passive channels (if no response) */
+	int min_rest_time;      /* min time in msec on the BSS channel, only valid for BG scan */
+	int max_rest_time;      /* max time in msec on the BSS channel, only valid for BG scan */
+	int max_offchannel_time;        /* max time away from BSS channel, in ms */
+	int repeat_probe_time;  /* time before sending second probe request */
+	int idle_time;          /* time in msec on bss channel before switching channel */
+	int max_scan_time;      /* maximum time in msec allowed for scan  */
+	int probe_delay;        /* delay in msec before sending probe request */
+	int offchan_retry_delay;        /* delay in msec before retrying off-channel switch */
+	int min_beacon_count;   /* number of home AP beacons to receive before leaving the home channel */
+	int max_offchan_retries;        /* maximum number of times to retry off-channel switch */
+	int beacon_timeout;     /* maximum time to wait for beacons */
+	int flags;              /* scan flags */
+	int num_channels;       /* number of channels to scan */
+	bool multiple_ports_active;     /* driver has multiple ports active in the home channel */
+	bool restricted_scan;   /* Perform restricted scan */
+	bool chan_list_allocated;
+	IEEE80211_SCAN_PRIORITY p2p_scan_priority;      /* indicates the scan priority if this is a P2P-related scan */
+	uint32_t *chan_list;    /* array of ieee channels (or) frequencies to scan */
+	int num_ssid;           /* number of desired ssids */
+	ieee80211_ssid ssid_list[IEEE80211_SCAN_PARAMS_MAX_SSID];
+	int num_bssid;          /* number of desired bssids */
+	uint8_t bssid_list[IEEE80211_SCAN_PARAMS_MAX_BSSID][IEEE80211_ADDR_LEN];
+	struct ieee80211_node *bss_node;        /* BSS node */
+	int ie_len;             /* length of the ie data to be added to probe req */
+	uint8_t *ie_data;       /* pointer to ie data */
+	ieee80211_scan_termination_check check_termination_function;    /* function checking for termination condition */
+	void *check_termination_context;        /* context passed to function above */
+} ieee80211_scan_params;
+
+/* Data types used to specify scan priorities */
+typedef uint32_t IEEE80211_PRIORITY_MAPPING[IEEE80211_SCAN_PRIORITY_COUNT];
+
+/**************************************
+ * Called before attempting to roam.  Modifies the rssiAdder of a BSS
+ * based on the preferred status of a BSS.
+ *
+ * According to CCX spec, AP in the neighbor list is not meant for giving extra
+ * weightage in roaming. By doing so, roaming becomes sticky. See bug 21220.
+ * Change the weightage to 0. Cisco may ask in future for a user control of
+ * this weightage.
+ */
+#define PREFERRED_BSS_RANK                20
+#define NEIGHBOR_BSS_RANK                  0    /* must be less than preferred BSS rank */
+
+/*
+ * The utility of the BSS is the metric used in the selection
+ * of a BSS. The Utility of the BSS is reduced if we just left the BSS.
+ * The Utility of the BSS is not reduced if we have left the
+ * BSS for 8 seconds (8000ms) or more.
+ * 2^13 milliseconds is a close approximation to avoid expensive division
+ */
+#define LAST_ASSOC_TIME_DELTA_REQUIREMENT (1 << 13)     /* 8192 */
+
+#define QBSS_SCALE_MAX                   255    /* Qbss channel load Max value */
+#define QBSS_SCALE_DOWN_FACTOR             2    /* scale factor to reduce Qbss channel load */
+#define QBSS_HYST_ADJ                     60    /* Qbss Weightage factor for the current AP */
+
+/*
+ * Flags used to set field APState
+ */
+#define AP_STATE_GOOD    0x00
+#define AP_STATE_BAD     0x01
+#define AP_STATE_RETRY   0x10
+#define BAD_AP_TIMEOUT   6000   /* In milli seconds */
+/*
+ * To disable BAD_AP status check on any scan entry
+ */
+#define BAD_AP_TIMEOUT_DISABLED             0
+
+/*
+ * BAD_AP timeout specified in seconds
+ */
+#define BAD_AP_TIMEOUT_IN_SECONDS           10
+
+/*
+ * State values used to represent our assoc_state with ap (discrete, not bitmasks)
+ */
+#define AP_ASSOC_STATE_NONE     0
+#define AP_ASSOC_STATE_AUTH     1
+#define AP_ASSOC_STATE_ASSOC    2
+
+/*
+ * Entries in the scan list are considered obsolete after 75 seconds.
+ */
+#define IEEE80211_SCAN_ENTRY_EXPIRE_TIME           75000
+
+/*
+ * idle time is only valid for scan type IEEE80211_SCAN_BACKGROUND.
+ * if idle time is set then the scanner would change channel from BSS
+ * channel to foreign channel only if both resttime is expired and
+ * the theres was not traffic for idletime msec on the bss channel.
+ * value of 0 for idletime would cause the channel to switch from BSS
+ * channel to foreign channel as soon  as the resttime is expired.
+ *
+ * if maxscantime is nonzero and if the scanner can not complete the
+ * scan in maxscantime msec then the scanner will cancel the scan and
+ * post IEEE80211_SCAN_COMPLETED event with reason SCAN_TIMEDOUT.
+ *
+ */
+
+/*
+ * chanlist can be either ieee channels (or) frequencies.
+ * if a value is less than 1000 implementation assumes it
+ * as ieee channel # otherwise implementation assumes it
+ * as frequency in Mhz.
+ */
+
+typedef enum _ieee80211_scan_event_type {
+	IEEE80211_SCAN_STARTED,
+	IEEE80211_SCAN_COMPLETED,
+	IEEE80211_SCAN_RADIO_MEASUREMENT_START,
+	IEEE80211_SCAN_RADIO_MEASUREMENT_END,
+	IEEE80211_SCAN_RESTARTED,
+	IEEE80211_SCAN_HOME_CHANNEL,
+	IEEE80211_SCAN_FOREIGN_CHANNEL,
+	IEEE80211_SCAN_BSSID_MATCH,
+	IEEE80211_SCAN_FOREIGN_CHANNEL_GET_NF,
+	IEEE80211_SCAN_DEQUEUED,
+	IEEE80211_SCAN_PREEMPTED,
+
+	IEEE80211_SCAN_EVENT_COUNT
+} ieee80211_scan_event_type;
+
+typedef enum ieee80211_scan_completion_reason {
+	IEEE80211_REASON_NONE,
+	IEEE80211_REASON_COMPLETED,
+	IEEE80211_REASON_CANCELLED,
+	IEEE80211_REASON_TIMEDOUT,
+	IEEE80211_REASON_TERMINATION_FUNCTION,
+	IEEE80211_REASON_MAX_OFFCHAN_RETRIES,
+	IEEE80211_REASON_PREEMPTED,
+	IEEE80211_REASON_RUN_FAILED,
+	IEEE80211_REASON_INTERNAL_STOP,
+
+	IEEE80211_REASON_COUNT
+} ieee80211_scan_completion_reason;
+
+typedef struct _ieee80211_scan_event {
+	ieee80211_scan_event_type type;
+	ieee80211_scan_completion_reason reason;
+	wlan_chan_t chan;
+	IEEE80211_SCAN_REQUESTOR requestor;     /* Requestor ID passed to the scan_start function */
+	IEEE80211_SCAN_ID scan_id;      /* Specific ID of the scan reporting the event */
+} ieee80211_scan_event;
+
+typedef enum _ieee80211_scan_request_status {
+	IEEE80211_SCAN_STATUS_QUEUED,
+	IEEE80211_SCAN_STATUS_RUNNING,
+	IEEE80211_SCAN_STATUS_PREEMPTED,
+	IEEE80211_SCAN_STATUS_COMPLETED
+} ieee80211_scan_request_status;
+
+/*
+ * the sentry field of tht ieee80211_scan_event is only valid if the
+ * event type is IEEE80211_SCAN_BSSID_MATCH.
+ */
+
+typedef void (*ieee80211_scan_event_handler)(wlan_if_t vaphandle,
+					     ieee80211_scan_event *event,
+					     void *arg);
+
+typedef struct _ieee80211_scan_info {
+	ieee80211_scan_type type;
+	IEEE80211_SCAN_REQUESTOR requestor;     /* Originator ID passed to the scan_start function */
+	IEEE80211_SCAN_ID scan_id;      /* Specific ID of the scan reporting the event */
+	IEEE80211_SCAN_PRIORITY priority;       /* Requested priority level (low/medium/high) */
+	ieee80211_scan_request_status scheduling_status;        /* Queued/running/preempted/completed */
+	int min_dwell_time_active;      /* min time in msec on active channels */
+	int max_dwell_time_active;      /* max time in msec on active channel (if no response) */
+	int min_dwell_time_passive;     /* min time in msec on passive channels */
+	int max_dwell_time_passive;     /* max time in msec on passive channel */
+	int min_rest_time;      /* min time in msec on the BSS channel, only valid for BG scan */
+	int max_rest_time;      /* max time in msec on the BSS channel, only valid for BG scan */
+	int max_offchannel_time;        /* max time away from BSS channel, in ms */
+	int repeat_probe_time;  /* time before sending second probe request */
+	int min_beacon_count;   /* number of home AP beacons to receive before leaving the home channel */
+	int flags;              /* scan flags */
+	systime_t scan_start_time;      /* system time when last scani started */
+	int scanned_channels;   /* number of scanned channels */
+	int default_channel_list_length;        /* number of channels in the default channel list */
+	int channel_list_length;        /* number of channels in the channel list used for the current scan */
+	uint8_t in_progress : 1,  /* if the scan is in progress */
+		cancelled : 1,  /* if the scan is cancelled */
+		preempted : 1,  /* if the scan is preempted */
+		restricted : 1; /* if the scan is restricted */
+} ieee80211_scan_info;
+
+typedef struct _ieee80211_scan_request_info {
+	wlan_if_t vaphandle;
+	IEEE80211_SCAN_REQUESTOR requestor;
+	IEEE80211_SCAN_PRIORITY requested_priority;
+	IEEE80211_SCAN_PRIORITY absolute_priority;
+	IEEE80211_SCAN_ID scan_id;
+	ieee80211_scan_request_status scheduling_status;
+	ieee80211_scan_params params;
+	systime_t request_timestamp;
+	uint32_t maximum_duration;
+} ieee80211_scan_request_info;
+
+#endif /* EXTERNAL_USE_ONLY */
+
+#ifndef EXTERNAL_USE_ONLY
+typedef void (*ieee80211_acs_event_handler)(void *arg, wlan_chan_t channel);
+#endif /* EXTERNAL_USE_ONLY */
+
+#define MAX_CHAINS 3
+
+typedef struct _wlan_rssi_info {
+	int8_t avg_rssi;        /* average rssi */
+	uint8_t valid_mask;     /* bitmap of valid elements in rssi_ctrl/ext array */
+	int8_t rssi_ctrl[MAX_CHAINS];
+	int8_t rssi_ext[MAX_CHAINS];
+} wlan_rssi_info;
+
+typedef enum _wlan_rssi_type {
+	WLAN_RSSI_TX,
+	WLAN_RSSI_RX,
+	WLAN_RSSI_BEACON,       /* rssi of the beacon, only valid for STA/IBSS vap */
+	WLAN_RSSI_RX_DATA
+} wlan_rssi_type;
+
+typedef enum _ieee80211_rate_type {
+	IEEE80211_RATE_TYPE_LEGACY,
+	IEEE80211_RATE_TYPE_MCS,
+} ieee80211_rate_type;
+
+typedef struct _ieee80211_rate_info {
+	ieee80211_rate_type type;
+	uint32_t rate;          /* average rate in kbps */
+	uint32_t lastrate;      /* last packet rate in kbps */
+	uint8_t mcs;            /* mcs index . is valid if rate type is MCS20 or MCS40 */
+	uint8_t maxrate_per_client;
+} ieee80211_rate_info;
+
+typedef enum _ieee80211_node_param_type {
+	IEEE80211_NODE_PARAM_TX_POWER,
+	IEEE80211_NODE_PARAM_ASSOCID,
+	IEEE80211_NODE_PARAM_INACT,     /* inactivity timer value */
+	IEEE80211_NODE_PARAM_AUTH_MODE, /* auth mode */
+	IEEE80211_NODE_PARAM_CAP_INFO,  /* auth mode */
+} ieee80211_node_param_type;
+
+/*
+ * Per/node (station) statistics available when operating as an AP.
+ */
+struct ieee80211_nodestats {
+	uint32_t ns_rx_data;    /* rx data frames */
+	uint32_t ns_rx_mgmt;    /* rx management frames */
+	uint32_t ns_rx_ctrl;    /* rx control frames */
+	uint32_t ns_rx_ucast;   /* rx unicast frames */
+	uint32_t ns_rx_mcast;   /* rx multi/broadcast frames */
+	uint64_t ns_rx_bytes;   /* rx data count (bytes) */
+	uint64_t ns_rx_beacons; /* rx beacon frames */
+	uint32_t ns_rx_proberesp;       /* rx probe response frames */
+
+	uint32_t ns_rx_dup;     /* rx discard 'cuz dup */
+	uint32_t ns_rx_noprivacy;       /* rx w/ wep but privacy off */
+	uint32_t ns_rx_wepfail; /* rx wep processing failed */
+	uint32_t ns_rx_demicfail;       /* rx demic failed */
+
+	/* We log MIC and decryption failures against Transmitter STA stats.
+	   Though the frames may not actually be sent by STAs corresponding
+	   to TA, the stats are still valuable for some customers as a sort
+	   of rough indication.
+	   Also note that the mapping from TA to STA may fail sometimes. */
+	uint32_t ns_rx_tkipmic; /* rx TKIP MIC failure */
+	uint32_t ns_rx_ccmpmic; /* rx CCMP MIC failure */
+	uint32_t ns_rx_wpimic;  /* rx WAPI MIC failure */
+	uint32_t ns_rx_tkipicv; /* rx ICV check failed (TKIP) */
+	uint32_t ns_rx_decap;   /* rx decapsulation failed */
+	uint32_t ns_rx_defrag;  /* rx defragmentation failed */
+	uint32_t ns_rx_disassoc;        /* rx disassociation */
+	uint32_t ns_rx_deauth;  /* rx deauthentication */
+	uint32_t ns_rx_action;  /* rx action */
+	uint32_t ns_rx_decryptcrc;      /* rx decrypt failed on crc */
+	uint32_t ns_rx_unauth;  /* rx on unauthorized port */
+	uint32_t ns_rx_unencrypted;     /* rx unecrypted w/ privacy */
+
+	uint32_t ns_tx_data;    /* tx data frames */
+	uint32_t ns_tx_data_success;    /* tx data frames successfully
+	                                   transmitted (unicast only) */
+	uint32_t ns_tx_mgmt;    /* tx management frames */
+	uint32_t ns_tx_ucast;   /* tx unicast frames */
+	uint32_t ns_tx_mcast;   /* tx multi/broadcast frames */
+	uint64_t ns_tx_bytes;   /* tx data count (bytes) */
+	uint64_t ns_tx_bytes_success;   /* tx success data count - unicast only
+	                                   (bytes) */
+	uint32_t ns_tx_probereq;        /* tx probe request frames */
+	uint32_t ns_tx_uapsd;   /* tx on uapsd queue */
+	uint32_t ns_tx_discard; /* tx dropped by NIC */
+
+	uint32_t ns_tx_novlantag;       /* tx discard 'cuz no tag */
+	uint32_t ns_tx_vlanmismatch;    /* tx discard 'cuz bad tag */
+
+	uint32_t ns_tx_eosplost;        /* uapsd EOSP retried out */
+
+	uint32_t ns_ps_discard; /* ps discard 'cuz of age */
+
+	uint32_t ns_uapsd_triggers;     /* uapsd triggers */
+	uint32_t ns_uapsd_duptriggers;  /* uapsd duplicate triggers */
+	uint32_t ns_uapsd_ignoretriggers;       /* uapsd duplicate triggers */
+	uint32_t ns_uapsd_active;       /* uapsd duplicate triggers */
+	uint32_t ns_uapsd_triggerenabled;       /* uapsd duplicate triggers */
+
+	/* MIB-related state */
+	uint32_t ns_tx_assoc;   /* [re]associations */
+	uint32_t ns_tx_assoc_fail;      /* [re]association failures */
+	uint32_t ns_tx_auth;    /* [re]authentications */
+	uint32_t ns_tx_auth_fail;       /* [re]authentication failures */
+	uint32_t ns_tx_deauth;  /* deauthentications */
+	uint32_t ns_tx_deauth_code;     /* last deauth reason */
+	uint32_t ns_tx_disassoc;        /* disassociations */
+	uint32_t ns_tx_disassoc_code;   /* last disassociation reason */
+	uint32_t ns_psq_drops;  /* power save queue drops */
+};
+
+/*
+ * station power save mode.
+ */
+typedef enum ieee80211_psmode {
+	IEEE80211_PWRSAVE_NONE = 0,     /* no power save */
+	IEEE80211_PWRSAVE_LOW,
+	IEEE80211_PWRSAVE_NORMAL,
+	IEEE80211_PWRSAVE_MAXIMUM,
+	IEEE80211_PWRSAVE_WNM   /* WNM-Sleep Mode */
+} ieee80211_pwrsave_mode;
+
+/* station power save pspoll handling */
+typedef enum {
+	IEEE80211_CONTINUE_PSPOLL_FOR_MORE_DATA,
+	IEEE80211_WAKEUP_FOR_MORE_DATA,
+} ieee80211_pspoll_moredata_handling;
+
+/*
+ * apps power save state.
+ */
+typedef enum {
+	APPS_AWAKE = 0,
+	APPS_PENDING_SLEEP,
+	APPS_SLEEP,
+	APPS_FAKE_SLEEP,        /* Pending blocking sleep */
+	APPS_FAKING_SLEEP,      /* Blocking sleep */
+	APPS_UNKNOWN_PWRSAVE,
+} ieee80211_apps_pwrsave_state;
+
+typedef enum _iee80211_mimo_powersave_mode {
+	IEEE80211_MIMO_POWERSAVE_NONE,  /* no mimo power save */
+	IEEE80211_MIMO_POWERSAVE_STATIC,        /* static mimo power save */
+	IEEE80211_MIMO_POWERSAVE_DYNAMIC        /* dynamic mimo powersave */
+} ieee80211_mimo_powersave_mode;
+
+#ifdef ATH_COALESCING
+typedef enum _ieee80211_coalescing_state {
+	IEEE80211_COALESCING_DISABLED = 0,      /* Coalescing is disabled */
+	IEEE80211_COALESCING_DYNAMIC = 1,       /* Dynamically move to Enabled state based on Uruns */
+	IEEE80211_COALESCING_ENABLED = 2,       /* Coalescing is enabled */
+} ieee80211_coalescing_state;
+
+#define IEEE80211_TX_COALESCING_THRESHOLD     5 /* Number of underrun errors to trigger coalescing */
+#endif
+
+typedef enum _ieee80211_cap {
+	IEEE80211_CAP_SHSLOT,   /* CAPABILITY: short slot */
+	IEEE80211_CAP_SHPREAMBLE,       /* CAPABILITY: short premable */
+	IEEE80211_CAP_MULTI_DOMAIN,     /* CAPABILITY: multiple domain */
+	IEEE80211_CAP_WMM,      /* CAPABILITY: WMM */
+	IEEE80211_CAP_HT,       /* CAPABILITY: HT */
+	IEEE80211_CAP_PERF_PWR_OFLD,    /* CAPABILITY: power performance offload support */
+	IEEE80211_CAP_11AC,     /* CAPABILITY: 11ac support */
+} ieee80211_cap;
+
+typedef enum _ieee80211_device_param {
+	IEEE80211_DEVICE_RSSI_CTL,
+	IEEE80211_DEVICE_NUM_TX_CHAIN,
+	IEEE80211_DEVICE_NUM_RX_CHAIN,
+	IEEE80211_DEVICE_TX_CHAIN_MASK,
+	IEEE80211_DEVICE_RX_CHAIN_MASK,
+	IEEE80211_DEVICE_TX_CHAIN_MASK_LEGACY,
+	IEEE80211_DEVICE_RX_CHAIN_MASK_LEGACY,
+	IEEE80211_DEVICE_BMISS_LIMIT,   /* # of beacon misses for HW to generate BMISS intr */
+	IEEE80211_DEVICE_PROTECTION_MODE,       /* protection mode */
+	IEEE80211_DEVICE_BLKDFSCHAN,    /* block the use of DFS channels */
+	IEEE80211_DEVICE_GREEN_AP_PS_ENABLE,
+	IEEE80211_DEVICE_GREEN_AP_PS_TIMEOUT,
+	IEEE80211_DEVICE_GREEN_AP_PS_ON_TIME,
+	IEEE80211_DEVICE_CWM_EXTPROTMODE,
+	IEEE80211_DEVICE_CWM_EXTPROTSPACING,
+	IEEE80211_DEVICE_CWM_ENABLE,
+	IEEE80211_DEVICE_CWM_EXTBUSYTHRESHOLD,
+	IEEE80211_DEVICE_DOTH,
+	IEEE80211_DEVICE_ADDBA_MODE,
+	IEEE80211_DEVICE_COUNTRYCODE,
+	IEEE80211_DEVICE_MULTI_CHANNEL, /* turn on/off off channel support */
+	IEEE80211_DEVICE_MAX_AMSDU_SIZE,        /* Size of AMSDU to be sent on the air */
+	IEEE80211_DEVICE_P2P,   /* Enable or Disable P2P */
+	IEEE80211_DEVICE_OVERRIDE_SCAN_PROBERESPONSE_IE,        /* Override scan Probe response IE, 0: Don't over-ride */
+	IEEE80211_DEVICE_2G_CSA,
+	IEEE80211_DEVICE_PWRTARGET,
+	IEEE80211_DEVICE_OFF_CHANNEL_SUPPORT,
+} ieee80211_device_param;
+
+typedef enum _ieee80211_param {
+	IEEE80211_BEACON_INTVAL,        /* in TUs */
+	IEEE80211_LISTEN_INTVAL,        /* number of beacons */
+	IEEE80211_DTIM_INTVAL,  /* number of beacons */
+	IEEE80211_BMISS_COUNT_RESET,    /* number of beacon miss intrs before reset */
+	IEEE80211_BMISS_COUNT_MAX,      /* number of beacon miss intrs for bmiss notificationst */
+	IEEE80211_ATIM_WINDOW,  /* ATIM window */
+	IEEE80211_SHORT_SLOT,   /* short slot on/off */
+	IEEE80211_SHORT_PREAMBLE,       /* short preamble on/off */
+	IEEE80211_RTS_THRESHOLD,        /* rts threshold, 0 means no rts threshold  */
+	IEEE80211_FRAG_THRESHOLD,       /* fragmentation threshold, 0 means no rts threshold  */
+	IEEE80211_FIXED_RATE,   /*
+	                         * rate code series(0: auto rate, 32 bit value:  rate
+	                         * codes for 4 rate series. each byte for one rate series)
+	                         */
+	IEEE80211_MCAST_RATE,   /* rate in Kbps */
+	IEEE80211_TXPOWER,      /* in 0.5db units */
+	IEEE80211_AMPDU_DENCITY,        /* AMPDU dencity */
+	IEEE80211_AMPDU_LIMIT,  /* AMPDU limit */
+	IEEE80211_MAX_AMPDU,    /* Max AMPDU Exp */
+	IEEE80211_VHT_MAX_AMPDU,        /* VHT Max AMPDU Exp */
+	IEEE80211_WPS_MODE,     /* WPS mode */
+	IEEE80211_TSN_MODE,     /* TSN mode */
+	IEEE80211_MULTI_DOMAIN, /* Multiple domain */
+	IEEE80211_SAFE_MODE,    /* Safe mode */
+	IEEE80211_NOBRIDGE_MODE,        /* No bridging done, all frames sent up the stack */
+	IEEE80211_PERSTA_KEYTABLE_SIZE, /* IBSS-only, read-only: persta key table size */
+	IEEE80211_RECEIVE_80211,        /* deliver std 802.11 frames 802.11 instead of ethernet frames on the rx */
+	IEEE80211_SEND_80211,   /* OS sends std 802.11 frames 802.11 instead of ethernet frames on tx side */
+	IEEE80211_MIN_BEACON_COUNT,     /* minumum number beacons to tx/rx before vap can pause */
+	IEEE80211_IDLE_TIME,    /* minimun no activity time before vap can pause */
+	IEEE80211_MIN_FRAMESIZE,        /* smallest frame size we are allowed to receive */
+	/* features. 0:feature is off. 1:feature is on. */
+	IEEE80211_FEATURE_WMM,  /* WMM */
+	IEEE80211_FEATURE_WMM_PWRSAVE,  /* WMM Power Save */
+	IEEE80211_FEATURE_UAPSD,        /* UAPSD setting (BE/BK/VI/VO) */
+	IEEE80211_FEATURE_WDS,  /* dynamic WDS feature */
+	IEEE80211_FEATURE_PRIVACY,      /* encryption */
+	IEEE80211_FEATURE_DROP_UNENC,   /* drop un encrypted frames */
+	IEEE80211_FEATURE_COUNTER_MEASURES,     /* turn on couter measures */
+	IEEE80211_FEATURE_HIDE_SSID,    /* turn on hide ssid feature */
+	IEEE80211_FEATURE_APBRIDGE,     /* turn on internal mcast traffic bridging for AP */
+	IEEE80211_FEATURE_PUREB,        /* turn on pure B mode for AP */
+	IEEE80211_FEATURE_PUREG,        /* turn on pure G mode for AP */
+	IEEE80211_FEATURE_REGCLASS,     /* add regulatory class IE in AP */
+	IEEE80211_FEATURE_COUNTRY_IE,   /* add country IE for vap in AP */
+	IEEE80211_FEATURE_IC_COUNTRY_IE,        /* add country IE for ic in AP */
+	IEEE80211_FEATURE_DOTH, /* enable 802.11h */
+	IEEE80211_FEATURE_PURE11N,      /* enable pure 11n  mode */
+	IEEE80211_FEATURE_PRIVATE_RSNIE,        /* enable OS shim to setup RSN IE */
+	IEEE80211_FEATURE_COPY_BEACON,  /* keep a copy of beacon */
+	IEEE80211_FEATURE_PSPOLL,       /* enable/disable pspoll mode in power save SM */
+	IEEE80211_FEATURE_CONTINUE_PSPOLL_FOR_MOREDATA, /* enable/disable option to contunue sending ps polls when there is more data */
+	IEEE80211_FEATURE_AMPDU,        /* Enable or Disable Aggregation */
+#ifdef ATH_COALESCING
+	IEEE80211_FEATURE_TX_COALESCING,        /* enable tx coalescing */
+#endif
+	IEEE80211_FEATURE_VAP_IND,      /* Repeater independant VAP */
+	IEEE80211_FIXED_RETRIES,        /* fixed retries  0-4 */
+	IEEE80211_SHORT_GI,     /* short gi on/off */
+	IEEE80211_HT40_INTOLERANT,
+	IEEE80211_CHWIDTH,
+	IEEE80211_CHEXTOFFSET,
+	IEEE80211_DISABLE_2040COEXIST,
+	IEEE80211_DISABLE_HTPROTECTION,
+	IEEE80211_STA_QUICKKICKOUT,
+	IEEE80211_CHSCANINIT,
+	IEEE80211_FEATURE_STAFWD,       /* dynamic AP Client  feature */
+	IEEE80211_DRIVER_CAPS,
+	IEEE80211_UAPSD_MAXSP,  /* UAPSD service period setting (0:unlimited, 2,4,6) */
+	IEEE80211_WEP_MBSSID,
+	IEEE80211_MGMT_RATE,    /* ieee rate to be used for management */
+	IEEE80211_RESMGR_VAP_AIR_TIME_LIMIT,    /* When multi-channel enabled, restrict air-time allocated to a VAP */
+	IEEE80211_TDLS_MACADDR1,        /* Upper 4 bytes of device's MAC address */
+	IEEE80211_TDLS_MACADDR2,        /* Lower 2 bytes of device's MAC address */
+	IEEE80211_TDLS_ACTION,  /* TDLS action requested                 */
+	IEEE80211_AUTO_ASSOC,
+	IEEE80211_PROTECTION_MODE,      /* per VAP protection mode */
+	IEEE80211_AUTH_INACT_TIMEOUT,   /* inactivity time while waiting for 802.11x auth to complete */
+	IEEE80211_INIT_INACT_TIMEOUT,   /* inactivity time while waiting for 802.11 auth/assoc to complete */
+	IEEE80211_RUN_INACT_TIMEOUT,    /* inactivity time when fully authed */
+	IEEE80211_PROBE_INACT_TIMEOUT,  /* inactivity counter value below which starts probing */
+	IEEE80211_QBSS_LOAD,
+	IEEE80211_WNM_CAP,
+	IEEE80211_WNM_BSS_CAP,
+	IEEE80211_WNM_TFS_CAP,
+	IEEE80211_WNM_TIM_CAP,
+	IEEE80211_WNM_SLEEP_CAP,
+	IEEE80211_WNM_FMS_CAP,
+	IEEE80211_AP_REJECT_DFS_CHAN,   /* AP to reject resuming on DFS Channel */
+	IEEE80211_ABOLT,
+	IEEE80211_COMP,
+	IEEE80211_FF,
+	IEEE80211_TURBO,
+	IEEE80211_BURST,
+	IEEE80211_AR,
+	IEEE80211_SLEEP,
+	IEEE80211_EOSPDROP,
+	IEEE80211_MARKDFS,
+	IEEE80211_WDS_AUTODETECT,
+	IEEE80211_WEP_TKIP_HT,
+	IEEE80211_ATH_RADIO,
+	IEEE80211_IGNORE_11DBEACON,
+	/* Video debug feature */
+	IEEE80211_VI_DBG_CFG,   /* Video debug configuration - Bit0- enable dbg, Bit1 - enable stats log */
+	IEEE80211_VI_DBG_NUM_STREAMS,   /* Total number of receive streams */
+	IEEE80211_VI_STREAM_NUM,        /* the stream number whose marker parameters are being set */
+	IEEE80211_VI_DBG_NUM_MARKERS,   /* total number of markers used to filter pkts */
+	IEEE80211_VI_MARKER_NUM,        /* the marker number whose parameters (offset, size & match) are being set */
+	IEEE80211_VI_MARKER_OFFSET_SIZE,        /* byte offset from skb start (upper 16 bits) & size in bytes(lower 16 bits) */
+	IEEE80211_VI_MARKER_MATCH,      /* marker pattern match used in filtering */
+	IEEE80211_VI_RXSEQ_OFFSET_SIZE, /* Rx Seq num offset skb start (upper 16 bits) & size in bytes(lower 16 bits) */
+	IEEE80211_VI_RX_SEQ_RSHIFT,     /* right-shift value in case field is not word aligned */
+	IEEE80211_VI_RX_SEQ_MAX,        /* maximum Rx Seq number (to check wrap around) */
+	IEEE80211_VI_RX_SEQ_DROP,       /* Indicator to the debug app that a particular seq num has been dropped */
+	IEEE80211_VI_TIME_OFFSET_SIZE,  /* Timestamp offset skb start (upper 16 bits) & size in bytes(lower 16 bits) */
+	IEEE80211_VI_RESTART,   /* If set to 1 resets all internal variables/counters & restarts debug tool */
+	IEEE80211_VI_RXDROP_STATUS,     /* Total RX drops in wireless */
+	IEEE80211_TRIGGER_MLME_RESP,    /* Option for App to trigger mlme response */
+#ifdef ATH_SUPPORT_TxBF
+	IEEE80211_TXBF_AUTO_CVUPDATE,   /* auto CV update enable */
+	IEEE80211_TXBF_CVUPDATE_PER,    /* per threshold to initial CV update */
+#endif
+	IEEE80211_MAX_CLIENT_NUMBERS,
+	IEEE80211_SMARTNET,
+	IEEE80211_FEATURE_MFP_TEST,     /* MFP test */
+	IEEE80211_WEATHER_RADAR,        /* weather radar channel skip */
+	IEEE80211_WEP_KEYCACHE, /* WEP KEYCACHE is enable */
+	IEEE80211_SEND_DEAUTH,  /* send deauth instead of disassoc while doing interface down  */
+	IEEE80211_SET_TXPWRADJUST,
+	IEEE80211_RRM_CAP,
+	IEEE80211_RRM_DEBUG,
+	IEEE80211_RRM_STATS,
+	IEEE80211_RRM_SLWINDOW,
+	IEEE80211_FEATURE_OFF_CHANNEL_SUPPORT,
+	IEEE80211_FIXED_VHT_MCS,        /* VHT mcs index */
+	IEEE80211_FIXED_NSS,    /* Spatial Streams count */
+	IEEE80211_SUPPORT_LDPC, /* LDPC Support */
+	IEEE80211_SUPPORT_TX_STBC,      /* TX STBC enable/disable */
+	IEEE80211_SUPPORT_RX_STBC,      /* RX STBC enable/disable */
+	IEEE80211_DEFAULT_KEYID,        /* XMIT default key */
+	IEEE80211_OPMODE_NOTIFY_ENABLE, /* Op mode notification enable/disable */
+	IEEE80211_ENABLE_RTSCTS,        /* Enable/Disable RTS-CTS */
+	IEEE80211_VHT_MCSMAP,   /* VHT MCS Map */
+	IEEE80211_GET_ACS_STATE,        /* get acs state */
+	IEEE80211_GET_CAC_STATE,        /* get cac state */
+} ieee80211_param;
+
+#define  IEEE80211_PROTECTION_NONE         0
+#define  IEEE80211_PROTECTION_CTSTOSELF    1
+#define  IEEE80211_PROTECTION_RTS_CTS      2
+
+typedef enum _ieee80211_privacy_filter {
+	IEEE80211_PRIVACY_FILTER_ALLWAYS,
+	IEEE80211_PRIVACY_FILTER_KEY_UNAVAILABLE,
+} ieee80211_privacy_filter;
+
+typedef enum _ieee80211_privacy_filter_packet_type {
+	IEEE80211_PRIVACY_FILTER_PACKET_UNICAST,
+	IEEE80211_PRIVACY_FILTER_PACKET_MULTICAST,
+	IEEE80211_PRIVACY_FILTER_PACKET_BOTH
+} ieee80211_privacy_filter_packet_type;
+
+typedef struct _ieee80211_privacy_excemption_filter {
+	uint16_t ether_type;    /* type of ethernet to apply this filter, in host byte order */
+	ieee80211_privacy_filter filter_type;
+	ieee80211_privacy_filter_packet_type packet_type;
+} ieee80211_privacy_exemption;
+
+/*
+ * Authentication mode.
+ * NB: the usage of auth modes NONE, AUTO are deprecated,
+ * they are implemented through combinations of other auth modes
+ * and cipher types. The deprecated values are preserved here to
+ * maintain binary compatibility with applications like
+ * wpa_supplicant and hostapd.
+ */
+typedef enum _ieee80211_auth_mode {
+	IEEE80211_AUTH_NONE = 0,        /* deprecated */
+	IEEE80211_AUTH_OPEN = 1,        /* open */
+	IEEE80211_AUTH_SHARED = 2,      /* shared-key */
+	IEEE80211_AUTH_8021X = 3,       /* 802.1x */
+	IEEE80211_AUTH_AUTO = 4,        /* deprecated */
+	IEEE80211_AUTH_WPA = 5, /* WPA */
+	IEEE80211_AUTH_RSNA = 6,        /* WPA2/RSNA */
+	IEEE80211_AUTH_CCKM = 7,        /* CCK */
+	IEEE80211_AUTH_WAPI = 8,        /* WAPI */
+} ieee80211_auth_mode;
+
+#define IEEE80211_AUTH_MAX      (IEEE80211_AUTH_WAPI+1)
+
+/*
+ * Cipher types.
+ * NB: The values are preserved here to maintain binary compatibility
+ * with applications like wpa_supplicant and hostapd.
+ */
+typedef enum _ieee80211_cipher_type {
+	IEEE80211_CIPHER_WEP = 0,
+	IEEE80211_CIPHER_TKIP = 1,
+	IEEE80211_CIPHER_AES_OCB = 2,
+	IEEE80211_CIPHER_AES_CCM = 3,
+	IEEE80211_CIPHER_WAPI = 4,
+	IEEE80211_CIPHER_CKIP = 5,
+	IEEE80211_CIPHER_AES_CMAC = 6,
+	IEEE80211_CIPHER_NONE = 7,
+} ieee80211_cipher_type;
+
+#define IEEE80211_CIPHER_MAX    (IEEE80211_CIPHER_NONE+1)
+
+/* key direction */
+typedef enum _ieee80211_key_direction {
+	IEEE80211_KEY_DIR_TX,
+	IEEE80211_KEY_DIR_RX,
+	IEEE80211_KEY_DIR_BOTH
+} ieee80211_key_direction;
+
+#define IEEE80211_KEYIX_NONE    ((uint16_t) -1)
+
+typedef struct _ieee80211_keyval {
+	ieee80211_cipher_type keytype;
+	ieee80211_key_direction keydir;
+	u_int persistent : 1,     /* persistent key */
+	      mfp : 1;          /* management frame protection */
+	uint16_t keylen;        /* length of the key data fields */
+	uint8_t *macaddr;       /* mac address of length IEEE80211_ADDR_LEN . all bytes are 0xff for multicast key */
+	uint64_t keyrsc;
+	uint64_t keytsc;
+	uint16_t txmic_offset;  /* TKIP/SMS4 only: offset to tx mic key */
+	uint16_t rxmic_offset;  /* TKIP/SMS4 only: offset to rx mic key */
+	uint8_t *keydata;
+#ifdef ATH_SUPPORT_WAPI
+	uint8_t key_used;       /*index for WAPI rekey labeling */
+#endif
+} ieee80211_keyval;
+
+#define IEEE80211_AES_CMAC_LEN     128
+typedef enum _ieee80211_rsn_param {
+	IEEE80211_UCAST_CIPHER_LEN,
+	IEEE80211_MCAST_CIPHER_LEN,
+	IEEE80211_MCASTMGMT_CIPHER_LEN,
+	IEEE80211_KEYMGT_ALGS,
+	IEEE80211_RSN_CAPS
+} ieee80211_rsn_param;
+
+#define IEEE80211_PMKID_LEN     16
+
+typedef struct _ieee80211_pmkid_entry {
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint8_t pmkid[IEEE80211_PMKID_LEN];
+} ieee80211_pmkid_entry;
+
+typedef enum _wlan_wme_param {
+	WLAN_WME_CWMIN,
+	WLAN_WME_CWMAX,
+	WLAN_WME_AIFS,
+	WLAN_WME_TXOPLIMIT,
+	WLAN_WME_ACM,           /*bss only */
+	WLAN_WME_ACKPOLICY      /*bss only */
+} wlan_wme_param;
+
+typedef enum _ieee80211_frame_type {
+	IEEE80211_FRAME_TYPE_PROBEREQ,
+	IEEE80211_FRAME_TYPE_BEACON,
+	IEEE80211_FRAME_TYPE_PROBERESP,
+	IEEE80211_FRAME_TYPE_ASSOCREQ,
+	IEEE80211_FRAME_TYPE_ASSOCRESP,
+	IEEE80211_FRAME_TYPE_AUTH
+} ieee80211_frame_type;
+
+#define IEEE80211_FRAME_TYPE_MAX    (IEEE80211_FRAME_TYPE_AUTH+1)
+
+typedef enum _ieee80211_ampdu_mode {
+	IEEE80211_AMPDU_MODE_OFF,       /* disable AMPDU */
+	IEEE80211_AMPDU_MODE_ON,        /* enable AMPDU */
+	IEEE80211_AMPDU_MODE_WDSVAR     /* enable AMPDU with 4addr WAR */
+} ieee80211_ampdu_mode;
+
+typedef enum _ieee80211_reset_type {
+	IEEE80211_RESET_TYPE_DEVICE = 0,        /* device reset on error: tx timeout and etc. */
+	IEEE80211_RESET_TYPE_DOT11_INTF,        /* dot11 reset: only reset one network interface (vap) */
+	IEEE80211_RESET_TYPE_INTERNAL,  /* internal reset */
+} ieee80211_reset_type;
+
+typedef struct _ieee80211_reset_request {
+	ieee80211_reset_type type;
+
+	u_int reset_hw : 1,       /* reset the actual H/W */
+	/*
+	 * The following fields are only valid for DOT11 reset, i.e.,
+	 * IEEE80211_RESET_TYPE_DOT11_INTF
+	 */
+	      reset_phy : 1,    /* reset PHY */
+	      reset_mac : 1,    /* reset MAC */
+	      set_default_mib : 1, /* set default MIB variables */
+	      no_flush : 1;
+	uint8_t macaddr[IEEE80211_ADDR_LEN];
+	enum ieee80211_phymode phy_mode;
+} ieee80211_reset_request;
+
+#define IEEE80211_MSG_MAX 63
+#define IEEE80211_MSG_SMARTANT 7        /* Bit 7 (0x80)for Smart Antenna debug */
+enum {
+	/* IEEE80211_PARAM_DBG_LVL */
+	IEEE80211_MSG_TDLS = 0, /* TDLS */
+	IEEE80211_MSG_ACS,      /* auto channel selection */
+	IEEE80211_MSG_SCAN_SM,  /* scan state machine */
+	IEEE80211_MSG_SCANENTRY,        /* scan entry */
+	IEEE80211_MSG_WDS,      /* WDS handling */
+	IEEE80211_MSG_ACTION,   /* action management frames */
+	IEEE80211_MSG_ROAM,     /* sta-mode roaming */
+	IEEE80211_MSG_INACT,    /* inactivity handling */
+	IEEE80211_MSG_DOTH = 8, /* 11.h */
+	IEEE80211_MSG_IQUE,     /* IQUE features */
+	IEEE80211_MSG_WME,      /* WME protocol */
+	IEEE80211_MSG_ACL,      /* ACL handling */
+	IEEE80211_MSG_WPA,      /* WPA/RSN protocol */
+	IEEE80211_MSG_RADKEYS,  /* dump 802.1x keys */
+	IEEE80211_MSG_RADDUMP,  /* dump 802.1x radius packets */
+	IEEE80211_MSG_RADIUS,   /* 802.1x radius client */
+	IEEE80211_MSG_DOT1XSM = 16,     /* 802.1x state machine */
+	IEEE80211_MSG_DOT1X,    /* 802.1x authenticator */
+	IEEE80211_MSG_POWER,    /* power save handling */
+	IEEE80211_MSG_STATE,    /* state machine */
+	IEEE80211_MSG_OUTPUT,   /* output handling */
+	IEEE80211_MSG_SCAN,     /* scanning */
+	IEEE80211_MSG_AUTH,     /* authentication handling */
+	IEEE80211_MSG_ASSOC,    /* association handling */
+	IEEE80211_MSG_NODE = 24,        /* node handling */
+	IEEE80211_MSG_ELEMID,   /* element id parsing */
+	IEEE80211_MSG_XRATE,    /* rate set handling */
+	IEEE80211_MSG_INPUT,    /* input handling */
+	IEEE80211_MSG_CRYPTO,   /* crypto work */
+	IEEE80211_MSG_DUMPPKTS, /* IFF_LINK2 equivalant */
+	IEEE80211_MSG_DEBUG,    /* IFF_DEBUG equivalent */
+	IEEE80211_MSG_MLME,     /* MLME */
+	/* IEEE80211_PARAM_DBG_LVL_HIGH */
+	IEEE80211_MSG_RRM = 32, /* Radio resource measurement */
+	IEEE80211_MSG_WNM,      /* Wireless Network Management */
+	IEEE80211_MSG_P2P_PROT, /* P2P Protocol driver */
+	IEEE80211_MSG_PROXYARP, /* 11v Proxy ARP */
+	IEEE80211_MSG_L2TIF,    /* Hotspot 2.0 L2 TIF */
+	IEEE80211_MSG_WIFIPOS,  /* WifiPositioning Feature */
+	IEEE80211_MSG_WRAP,     /* WRAP or Wireless ProxySTA */
+	IEEE80211_MSG_DFS,      /* DFS debug mesg */
+
+	IEEE80211_MSG_NUM_CATEGORIES,   /* total ieee80211 messages */
+	IEEE80211_MSG_UNMASKABLE = IEEE80211_MSG_MAX,   /* anything */
+	IEEE80211_MSG_ANY = IEEE80211_MSG_MAX,  /* anything */
+};
+
+/* verbosity levels */
+#define     IEEE80211_VERBOSE_OFF                  100
+#define     IEEE80211_VERBOSE_FORCE               1
+#define     IEEE80211_VERBOSE_SERIOUS             2
+#define     IEEE80211_VERBOSE_NORMAL              3
+#define     IEEE80211_VERBOSE_LOUD                4
+#define     IEEE80211_VERBOSE_DETAILED            5
+#define     IEEE80211_VERBOSE_COMPLEX             6
+#define     IEEE80211_VERBOSE_FUNCTION            7
+#define     IEEE80211_VERBOSE_TRACE               8
+
+#define IEEE80211_DEBUG_DEFAULT IEEE80211_MSG_DEBUG
+
+/*
+ * the lower 4 bits of the msg flags are used for extending the
+ * debug flags.
+ */
+
+/*
+ * flag defintions for wlan_mlme_stop_bss(vap) API.
+ */
+#define WLAN_MLME_STOP_BSS_F_SEND_DEAUTH                0x01
+#define WLAN_MLME_STOP_BSS_F_CLEAR_ASSOC_STATE          0x02
+#define WLAN_MLME_STOP_BSS_F_FORCE_STOP_RESET           0x04
+#define WLAN_MLME_STOP_BSS_F_WAIT_RX_DONE               0x08
+#define WLAN_MLME_STOP_BSS_F_NO_RESET                   0x10
+#define WLAN_MLME_STOP_BSS_F_STANDBY                    0x20
+
+/*
+ * WAPI commands to authenticator
+ */
+#define WAPI_WAI_REQUEST            (uint16_t)0x00F1
+#define WAPI_UNICAST_REKEY          (uint16_t)0x00F2
+#define WAPI_STA_AGING              (uint16_t)0x00F3
+#define WAPI_MULTI_REKEY            (uint16_t)0x00F4
+#define WAPI_STA_STATS              (uint16_t)0x00F5
+
+/*
+ * IEEE80211 PHY Statistics.
+ */
+struct ieee80211_phy_stats {
+	uint64_t ips_tx_packets;        /* frames successfully transmitted */
+	uint64_t ips_tx_multicast;      /* multicast/broadcast frames successfully transmitted */
+	uint64_t ips_tx_fragments;      /* fragments successfully transmitted */
+	uint64_t ips_tx_xretries;       /* frames that are xretried. NB: not number of retries */
+	uint64_t ips_tx_retries;        /* frames transmitted after retries. NB: not number of retries */
+	uint64_t ips_tx_multiretries;   /* frames transmitted after more than one retry. */
+	uint64_t ips_tx_timeout;        /* frames that expire the dot11MaxTransmitMSDULifetime */
+	uint64_t ips_rx_packets;        /* frames successfully received */
+	uint64_t ips_rx_multicast;      /* multicast/broadcast frames successfully received */
+	uint64_t ips_rx_fragments;      /* fragments successfully received */
+	uint64_t ips_rx_timeout;        /* frmaes that expired the dot11MaxReceiveLifetime */
+	uint64_t ips_rx_dup;    /* duplicated fragments */
+	uint64_t ips_rx_mdup;   /* multiple duplicated fragments */
+	uint64_t ips_rx_promiscuous;    /* frames that are received only because promiscuous filter is on */
+	uint64_t ips_rx_promiscuous_fragments;  /* fragments that are received only because promiscuous filter is on */
+	uint64_t ips_tx_rts;    /* RTS success count */
+	uint64_t ips_tx_shortretry;     /* tx on-chip retries (short). RTSFailCnt */
+	uint64_t ips_tx_longretry;      /* tx on-chip retries (long). DataFailCnt */
+	uint64_t ips_rx_crcerr; /* rx failed 'cuz of bad CRC */
+	uint64_t ips_rx_fifoerr;        /* rx failed 'cuz of FIFO overrun */
+	uint64_t ips_rx_decrypterr;     /* rx decryption error */
+};
+
+struct ieee80211_chan_stats {
+	uint32_t chan_clr_cnt;
+	uint32_t cycle_cnt;
+	uint32_t phy_err_cnt;
+};
+
+struct ieee80211_mac_stats {
+	uint64_t ims_tx_packets;        /* frames successfully transmitted */
+	uint64_t ims_rx_packets;        /* frames successfully received */
+	uint64_t ims_tx_bytes;  /* bytes successfully transmitted */
+	uint64_t ims_rx_bytes;  /* bytes successfully received */
+
+	/* TODO: For the byte counts below, we need to handle some scenarios
+	   such as encryption related decaps, etc */
+	uint64_t ims_tx_data_packets;   /* data frames successfully transmitted */
+	uint64_t ims_rx_data_packets;   /* data frames successfully received */
+	uint64_t ims_tx_data_bytes;     /* data bytes successfully transmitted,
+	                                   inclusive of FCS. */
+	uint64_t ims_rx_data_bytes;     /* data bytes successfully received,
+	                                   inclusive of FCS. */
+
+	uint64_t ims_tx_datapyld_bytes; /* data payload bytes successfully
+	                                   transmitted */
+	uint64_t ims_rx_datapyld_bytes; /* data payload successfully
+	                                   received */
+
+	/* Decryption errors */
+	uint64_t ims_rx_unencrypted;    /* rx w/o wep and privacy on */
+	uint64_t ims_rx_badkeyid;       /* rx w/ incorrect keyid */
+	uint64_t ims_rx_decryptok;      /* rx decrypt okay */
+	uint64_t ims_rx_decryptcrc;     /* rx decrypt failed on crc */
+	uint64_t ims_rx_wepfail;        /* rx wep processing failed */
+	uint64_t ims_rx_tkipreplay;     /* rx seq# violation (TKIP) */
+	uint64_t ims_rx_tkipformat;     /* rx format bad (TKIP) */
+	uint64_t ims_rx_tkipmic;        /* rx MIC check failed (TKIP) */
+	uint64_t ims_rx_tkipicv;        /* rx ICV check failed (TKIP) */
+	uint64_t ims_rx_ccmpreplay;     /* rx seq# violation (CCMP) */
+	uint64_t ims_rx_ccmpformat;     /* rx format bad (CCMP) */
+	uint64_t ims_rx_ccmpmic;        /* rx MIC check failed (CCMP) */
+/*this file can be included by applications as 80211stats that has no such MACRO definition*/
+/* #if ATH_SUPPORT_WAPI */
+	uint64_t ims_rx_wpireplay;      /* rx seq# violation (WPI) */
+	uint64_t ims_rx_wpimic; /* rx MIC check failed (WPI) */
+/* #endif */
+	/* Other Tx/Rx errors */
+	uint64_t ims_tx_discard;        /* tx dropped by NIC */
+	uint64_t ims_rx_discard;        /* rx dropped by NIC */
+
+	uint64_t ims_rx_countermeasure; /* rx TKIP countermeasure activation count */
+};
+
+/*
+ * Summary statistics.
+ */
+struct ieee80211_stats {
+	uint32_t is_rx_badversion;      /* rx frame with bad version */
+	uint32_t is_rx_tooshort;        /* rx frame too short */
+	uint32_t is_rx_wrongbss;        /* rx from wrong bssid */
+	uint32_t is_rx_wrongdir;        /* rx w/ wrong direction */
+	uint32_t is_rx_mcastecho;       /* rx discard 'cuz mcast echo */
+	uint32_t is_rx_notassoc;        /* rx discard 'cuz sta !assoc */
+	uint32_t is_rx_noprivacy;       /* rx w/ wep but privacy off */
+	uint32_t is_rx_decap;   /* rx decapsulation failed */
+	uint32_t is_rx_mgtdiscard;      /* rx discard mgt frames */
+	uint32_t is_rx_ctl;     /* rx discard ctrl frames */
+	uint32_t is_rx_beacon;  /* rx beacon frames */
+	uint32_t is_rx_rstoobig;        /* rx rate set truncated */
+	uint32_t is_rx_elem_missing;    /* rx required element missing */
+	uint32_t is_rx_elem_toobig;     /* rx element too big */
+	uint32_t is_rx_elem_toosmall;   /* rx element too small */
+	uint32_t is_rx_elem_unknown;    /* rx element unknown */
+	uint32_t is_rx_badchan; /* rx frame w/ invalid chan */
+	uint32_t is_rx_chanmismatch;    /* rx frame chan mismatch */
+	uint32_t is_rx_nodealloc;       /* rx frame dropped */
+	uint32_t is_rx_ssidmismatch;    /* rx frame ssid mismatch  */
+	uint32_t is_rx_auth_unsupported;        /* rx w/ unsupported auth alg */
+	uint32_t is_rx_auth_fail;       /* rx sta auth failure */
+	uint32_t is_rx_auth_countermeasures;    /* rx auth discard 'cuz CM */
+	uint32_t is_rx_assoc_bss;       /* rx assoc from wrong bssid */
+	uint32_t is_rx_assoc_notauth;   /* rx assoc w/o auth */
+	uint32_t is_rx_assoc_capmismatch;       /* rx assoc w/ cap mismatch */
+	uint32_t is_rx_assoc_norate;    /* rx assoc w/ no rate match */
+	uint32_t is_rx_assoc_badwpaie;  /* rx assoc w/ bad WPA IE */
+	uint32_t is_rx_deauth;  /* rx deauthentication */
+	uint32_t is_rx_disassoc;        /* rx disassociation */
+	uint32_t is_rx_action;  /* rx action mgt */
+	uint32_t is_rx_badsubtype;      /* rx frame w/ unknown subtype */
+	uint32_t is_rx_nobuf;   /* rx failed for lack of buf */
+	uint32_t is_rx_ahdemo_mgt;      /* rx discard ahdemo mgt frame */
+	uint32_t is_rx_bad_auth;        /* rx bad auth request */
+	uint32_t is_rx_unauth;  /* rx on unauthorized port */
+	uint32_t is_rx_badcipher;       /* rx failed 'cuz key type */
+	uint32_t is_tx_nodefkey;        /* tx failed 'cuz no defkey */
+	uint32_t is_tx_noheadroom;      /* tx failed 'cuz no space */
+	uint32_t is_rx_nocipherctx;     /* rx failed 'cuz key !setup */
+	uint32_t is_rx_acl;     /* rx discard 'cuz acl policy */
+	uint32_t is_rx_ffcnt;   /* rx fast frames */
+	uint32_t is_rx_badathtnl;       /* driver key alloc failed */
+	uint32_t is_rx_nowds;   /* 4-addr packets received with no wds enabled */
+	uint32_t is_tx_nobuf;   /* tx failed for lack of buf */
+	uint32_t is_tx_nonode;  /* tx failed for no node */
+	uint32_t is_tx_unknownmgt;      /* tx of unknown mgt frame */
+	uint32_t is_tx_badcipher;       /* tx failed 'cuz key type */
+	uint32_t is_tx_ffokcnt; /* tx fast frames sent success */
+	uint32_t is_tx_fferrcnt;        /* tx fast frames sent success */
+	uint32_t is_scan_active;        /* active scans started */
+	uint32_t is_scan_passive;       /* passive scans started */
+	uint32_t is_node_timeout;       /* nodes timed out inactivity */
+	uint32_t is_crypto_nomem;       /* no memory for crypto ctx */
+	uint32_t is_crypto_tkip;        /* tkip crypto done in s/w */
+	uint32_t is_crypto_tkipenmic;   /* tkip en-MIC done in s/w */
+	uint32_t is_crypto_tkipdemic;   /* tkip de-MIC done in s/w */
+	uint32_t is_crypto_tkipcm;      /* tkip counter measures */
+	uint32_t is_crypto_ccmp;        /* ccmp crypto done in s/w */
+	uint32_t is_crypto_wep; /* wep crypto done in s/w */
+	uint32_t is_crypto_setkey_cipher;       /* cipher rejected key */
+	uint32_t is_crypto_setkey_nokey;        /* no key index for setkey */
+	uint32_t is_crypto_delkey;      /* driver key delete failed */
+	uint32_t is_crypto_badcipher;   /* unknown cipher */
+	uint32_t is_crypto_nocipher;    /* cipher not available */
+	uint32_t is_crypto_attachfail;  /* cipher attach failed */
+	uint32_t is_crypto_swfallback;  /* cipher fallback to s/w */
+	uint32_t is_crypto_keyfail;     /* driver key alloc failed */
+	uint32_t is_crypto_enmicfail;   /* en-MIC failed */
+	uint32_t is_ibss_capmismatch;   /* merge failed-cap mismatch */
+	uint32_t is_ibss_norate;        /* merge failed-rate mismatch */
+	uint32_t is_ps_unassoc; /* ps-poll for unassoc. sta */
+	uint32_t is_ps_badaid;  /* ps-poll w/ incorrect aid */
+	uint32_t is_ps_qempty;  /* ps-poll w/ nothing to send */
+};
+
+typedef enum _ieee80211_send_frame_type {
+	IEEE80211_SEND_NULL,
+	IEEE80211_SEND_QOSNULL,
+} ieee80211_send_frame_type;
+
+typedef struct _ieee80211_tspec_info {
+	uint8_t traffic_type;
+	uint8_t direction;
+	uint8_t dot1Dtag;
+	uint8_t tid;
+	uint8_t acc_policy_edca;
+	uint8_t acc_policy_hcca;
+	uint8_t aggregation;
+	uint8_t psb;
+	uint8_t ack_policy;
+	uint16_t norminal_msdu_size;
+	uint16_t max_msdu_size;
+	uint32_t min_srv_interval;
+	uint32_t max_srv_interval;
+	uint32_t inactivity_interval;
+	uint32_t suspension_interval;
+	uint32_t srv_start_time;
+	uint32_t min_data_rate;
+	uint32_t mean_data_rate;
+	uint32_t peak_data_rate;
+	uint32_t max_burst_size;
+	uint32_t delay_bound;
+	uint32_t min_phy_rate;
+	uint16_t surplus_bw;
+	uint16_t medium_time;
+} ieee80211_tspec_info;
+
+#ifndef EXTERNAL_USE_ONLY
+/*
+ * Manual ADDBA support
+ */
+enum {
+	ADDBA_SEND = 0,
+	ADDBA_STATUS = 1,
+	DELBA_SEND = 2,
+	ADDBA_RESP = 3,
+	ADDBA_CLR_RESP = 4,
+	SINGLE_AMSDU = 5,
+};
+
+enum {
+	ADDBA_MODE_AUTO = 0,
+	ADDBA_MODE_MANUAL = 1,
+};
+
+struct ieee80211_addba_delba_request {
+	wlan_dev_t ic;
+	uint8_t action;
+	uint8_t tid;
+	uint16_t status;
+	uint16_t aid;
+	uint32_t arg1;
+	uint32_t arg2;
+};
+#endif /* EXTERNAL_USE_ONLY */
+
+#ifdef ATH_BT_COEX
+typedef enum _ieee80211_bt_coex_info_type {
+	IEEE80211_BT_COEX_INFO_SCHEME = 0,
+	IEEE80211_BT_COEX_INFO_BTBUSY = 1,
+} ieee80211_bt_coex_info_type;
+#endif
+
+struct tkip_countermeasure {
+	uint16_t mic_count_in_60s;
+	uint32_t timestamp;
+};
+
+enum _ieee80211_qos_frame_direction {
+	IEEE80211_RX_QOS_FRAME = 0,
+	IEEE80211_TX_QOS_FRAME = 1,
+	IEEE80211_TX_COMPLETE_QOS_FRAME = 2
+};
+
+typedef struct ieee80211_vap_opmode_count {
+	int total_vaps;
+	int ibss_count;
+	int sta_count;
+	int wds_count;
+	int ahdemo_count;
+	int ap_count;
+	int monitor_count;
+	int btamp_count;
+	int unknown_count;
+} ieee80211_vap_opmode_count;
+
+struct ieee80211_app_ie_t {
+	uint32_t length;
+	uint8_t *ie;
+};
+
+/*
+ * MAC ACL operations.
+ */
+enum {
+	IEEE80211_MACCMD_POLICY_OPEN = 0,       /* set policy: no ACL's */
+	IEEE80211_MACCMD_POLICY_ALLOW = 1,      /* set policy: allow traffic */
+	IEEE80211_MACCMD_POLICY_DENY = 2,       /* set policy: deny traffic */
+	IEEE80211_MACCMD_FLUSH = 3,     /* flush ACL database */
+	IEEE80211_MACCMD_DETACH = 4,    /* detach ACL policy */
+	IEEE80211_MACCMD_POLICY_RADIUS = 5,     /* set policy: RADIUS managed ACLs */
+};
+
+#endif

+ 238 - 0
core/cds/inc/cds_if_upperproto.h

@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/* #ifndef _NET_IF_ETHERSUBR_H_ */
+/* #define _NET_IF_ETHERSUBR_H_ */
+#ifndef _NET_IF_UPPERPROTO_H_
+#define _NET_IF_UPPERPROTO_H_
+
+#define ETHER_ADDR_LEN    6     /* length of an Ethernet address */
+#define ETHER_TYPE_LEN    2     /* length of the Ethernet type field */
+#define ETHER_CRC_LEN     4     /* length of the Ethernet CRC */
+#define ETHER_HDR_LEN     (ETHER_ADDR_LEN*2+ETHER_TYPE_LEN)
+#define ETHER_MAX_LEN     1518
+
+#define ETHERMTU          (ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+#ifndef _NET_ETHERNET_H_
+struct ether_header {
+	uint8_t ether_dhost[ETHER_ADDR_LEN];
+	uint8_t ether_shost[ETHER_ADDR_LEN];
+	uint16_t ether_type;
+} __packed;
+#endif
+
+#ifndef ETHERTYPE_PAE
+#define ETHERTYPE_PAE    0x888e /* EAPOL PAE/802.1x */
+#endif
+#ifndef ETHERTYPE_IP
+#define ETHERTYPE_IP     0x0800 /* IP protocol */
+#endif
+#ifndef ETHERTYPE_AARP
+#define ETHERTYPE_AARP  0x80f3  /* Appletalk AARP protocol */
+#endif
+#ifndef ETHERTYPE_IPX
+#define ETHERTYPE_IPX    0x8137 /* IPX over DIX protocol */
+#endif
+#ifndef ETHERTYPE_ARP
+#define ETHERTYPE_ARP    0x0806 /* ARP protocol */
+#endif
+#ifndef ETHERTYPE_IPV6
+#define ETHERTYPE_IPV6   0x86dd /* IPv6 */
+#endif
+#ifndef ETHERTYPE_8021Q
+#define ETHERTYPE_8021Q 0x8100  /* 802.1Q vlan protocol */
+#endif
+#ifndef ETHERTYPE_VLAN
+#define ETHERTYPE_VLAN  0x8100  /* VLAN TAG protocol */
+#endif
+#ifndef TX_QUEUE_FOR_EAPOL_FRAME
+#define TX_QUEUE_FOR_EAPOL_FRAME  0x7   /* queue eapol frame to queue 7 to avoid aggregation disorder */
+#endif
+
+/*
+ * define WAI ethertype
+ */
+#ifndef ETHERTYPE_WAI
+#define ETHERTYPE_WAI    0x88b4 /* WAI/WAPI */
+#endif
+
+#define ETHERTYPE_OCB_TX   0x8151
+#define ETHERTYPE_OCB_RX   0x8152
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+#if 0
+#ifndef _NET_ETHERNET_H_
+struct ether_addr {
+	uint8_t octet[ETHER_ADDR_LEN];
+} __packed;
+#endif
+#endif
+
+#define ETHER_IS_MULTICAST(addr) (*(addr) & 0x01)       /* is address mcast/bcast? */
+
+#define VLAN_PRI_SHIFT  13      /* Shift to find VLAN user priority */
+#define VLAN_PRI_MASK    7      /* Mask for user priority bits in VLAN */
+
+/*
+ * Structure of the IP frame
+ */
+struct ip_header {
+	uint8_t version_ihl;
+	uint8_t tos;
+	uint16_t tot_len;
+	uint16_t id;
+	uint16_t frag_off;
+	uint8_t ttl;
+	uint8_t protocol;
+	uint16_t check;
+	uint32_t saddr;
+	uint32_t daddr;
+	/*The options start here. */
+};
+#ifndef IP_PROTO_TCP
+#define IP_PROTO_TCP    0x6     /* TCP protocol */
+#endif
+#ifndef IP_PROTO_UDP
+#define IP_PROTO_UDP 17
+#endif
+
+/*
+ *   IGMP protocol structures
+ */
+
+/* IGMP record type */
+#define IGMP_QUERY_TYPE       0x11
+#define IGMPV1_REPORT_TYPE    0x12
+#define IGMPV2_REPORT_TYPE    0x16
+#define IGMPV2_LEAVE_TYPE     0x17
+#define IGMPV3_REPORT_TYPE    0x22
+
+/* Is packet type is either leave or report */
+#define IS_IGMP_REPORT_LEAVE_PACKET(type) ( \
+		(IGMPV1_REPORT_TYPE == type) \
+		|| (IGMPV2_REPORT_TYPE == type)	\
+		|| (IGMPV2_LEAVE_TYPE  == type)	\
+		|| (IGMPV3_REPORT_TYPE == type)	\
+		)
+/*
+ *    Header in on cable format
+ */
+
+struct igmp_header {
+	uint8_t type;
+	uint8_t code;           /* For newer IGMP */
+	uint16_t csum;
+	uint32_t group;
+};
+
+/* V3 group record types [grec_type] */
+#define IGMPV3_MODE_IS_INCLUDE        1
+#define IGMPV3_MODE_IS_EXCLUDE        2
+#define IGMPV3_CHANGE_TO_INCLUDE      3
+#define IGMPV3_CHANGE_TO_EXCLUDE      4
+#define IGMPV3_ALLOW_NEW_SOURCES      5
+#define IGMPV3_BLOCK_OLD_SOURCES      6
+
+/*  Group record format
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |  Record Type  |  Aux Data Len |     Number of Sources (N)     |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                       Multicast Address                       |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                       Source Address [1]                      |
+ ||+-                                                             -+
+ |                       Source Address [2]                      |
+ ||+-                                                             -+
+      .                               .                               .
+      .                               .                               .
+      .                               .                               .
+ ||+-                                                             -+
+ |                       Source Address [N]                      |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                                                               |
+      .                                                               .
+      .                         Auxiliary Data                        .
+      .                                                               .
+ |                                                               |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct igmp_v3_grec {
+	uint8_t grec_type;
+	uint8_t grec_auxwords;
+	uint16_t grec_nsrcs;
+	uint32_t grec_mca;
+};
+
+/* IGMPv3 report format
+       0                   1                   2                   3
+       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |  Type = 0x22  |    Reserved   |           Checksum            |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |           Reserved            |  Number of Group Records (M)  |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                                                               |
+      .                                                               .
+      .                        Group Record [1]                       .
+      .                                                               .
+ |                                                               |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                                                               |
+      .                                                               .
+      .                        Group Record [2]                       .
+      .                                                               .
+ |                                                               |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                               .                               |
+      .                               .                               .
+ |                               .                               |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |                                                               |
+      .                                                               .
+      .                        Group Record [M]                       .
+      .                                                               .
+ |                                                               |
+ ||+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct igmp_v3_report {
+	uint8_t type;
+	uint8_t resv1;
+	uint16_t csum;
+	uint16_t resv2;
+	uint16_t ngrec;
+};
+
+/* Calculate the group record length*/
+#define IGMPV3_GRP_REC_LEN(x) (8 + (4 * x->grec_nsrcs) + (4 * x->grec_auxwords) )
+
+#endif /* _NET_IF_ETHERSUBR_H_ */

+ 165 - 0
core/cds/inc/cds_mq.h

@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined( __CDS_MQ_H )
+#define __CDS_MQ_H
+
+/**=========================================================================
+
+   \file  cds_mq.h
+
+   \brief virtual Operating System Services (CDF) message queue APIs
+
+   Message Queue Definitions and API
+
+   ========================================================================*/
+
+/*--------------------------------------------------------------------------
+   Include Files
+   ------------------------------------------------------------------------*/
+#include <cdf_types.h>
+#include <cdf_status.h>
+
+/*--------------------------------------------------------------------------
+   Preprocessor definitions and constants
+   ------------------------------------------------------------------------*/
+
+/*--------------------------------------------------------------------------
+   Type declarations
+   ------------------------------------------------------------------------*/
+
+/* cds Message Type.
+   This represnets a message that can be posted to another module through
+   the cds Message Queues.
+   \note This is mapped directly to the tSirMsgQ for backward
+   compatibility with the legacy MAC code */
+
+typedef struct cds_msg_s {
+	uint16_t type;
+	/*
+	 * This field can be used as sequence number/dialog token for matching
+	 * requests and responses.
+	 */
+	uint16_t reserved;
+	/**
+	 * Based on the type either a bodyptr pointer into
+	 * memory or bodyval as a 32 bit data is used.
+	 * bodyptr: is always a freeable pointer, one should always
+	 * make sure that bodyptr is always freeable.
+	 *
+	 * Messages should use either bodyptr or bodyval; not both !!!.
+	 */
+	void *bodyptr;
+
+	uint32_t bodyval;
+
+	/*
+	 * Some messages provide a callback function.  The function signature
+	 * must be agreed upon between the two entities exchanging the message
+	 */
+	void *callback;
+
+} cds_msg_t;
+
+/*-------------------------------------------------------------------------
+   Function declarations and documenation
+   ------------------------------------------------------------------------*/
+
+/* Message Queue IDs */
+typedef enum {
+	/* Message Queue ID for messages bound for SME */
+	CDS_MQ_ID_SME = CDF_MODULE_ID_SME,
+
+	/* Message Queue ID for messages bound for PE */
+	CDS_MQ_ID_PE = CDF_MODULE_ID_PE,
+
+	/* Message Queue ID for messages bound for WMA */
+	CDS_MQ_ID_WMA = CDF_MODULE_ID_WMA,
+
+	/* Message Queue ID for messages bound for the SYS module */
+	CDS_MQ_ID_SYS = CDF_MODULE_ID_SYS,
+
+} CDS_MQ_ID;
+
+/**---------------------------------------------------------------------------
+
+   \brief cds_mq_post_message() - post a message to a message queue
+
+   This API allows messages to be posted to a specific message queue.  Messages
+   can be posted to the following message queues:
+
+   <ul>
+    <li> SME
+    <li> PE
+    <li> HAL
+    <li> TL
+   </ul>
+
+   \param msgQueueId - identifies the message queue upon which the message
+         will be posted.
+
+   \param message - a pointer to a message buffer.  Memory for this message
+         buffer is allocated by the caller and free'd by the CDF after the
+         message is posted to the message queue.  If the consumer of the
+         message needs anything in this message, it needs to copy the contents
+         before returning from the message queue handler.
+
+   \return CDF_STATUS_SUCCESS - the message has been successfully posted
+          to the message queue.
+
+          CDF_STATUS_E_INVAL - The value specified by msgQueueId does not
+          refer to a valid Message Queue Id.
+
+          CDF_STATUS_E_FAULT  - message is an invalid pointer.
+
+          CDF_STATUS_E_FAILURE - the message queue handler has reported
+          an unknown failure.
+
+   \sa
+
+   --------------------------------------------------------------------------*/
+CDF_STATUS cds_mq_post_message(CDS_MQ_ID msgQueueId, cds_msg_t *message);
+
+/**---------------------------------------------------------------------------
+
+   \brief cds_send_mb_message_to_mac() - post a message to a message queue
+
+   \param pBuf is a buffer allocated by caller. The actual structure varies
+         base on message type
+
+   \return CDF_STATUS_SUCCESS - the message has been successfully posted
+          to the message queue.
+
+          CDF_STATUS_E_FAILURE - the message queue handler has reported
+          an unknown failure.
+
+   \sa
+   --------------------------------------------------------------------------*/
+
+CDF_STATUS cds_send_mb_message_to_mac(void *pBuf);
+
+#endif /* if !defined __CDS_MQ_H */

+ 111 - 0
core/cds/inc/cds_pack_align.h

@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined( __CDS_PACK_ALIGN_H )
+#define __CDS_PACK_ALIGN_H
+
+/**=========================================================================
+
+   \file  cds_pack_align.h
+
+   \brief Connectivity driver services (CDS) pack and align primitives
+
+   Definitions for platform independent means of packing and aligning
+   data structures
+
+   ========================================================================*/
+
+/*
+
+   Place the macro CDS_PACK_START above a structure declaration to pack. We
+   are not going to allow modifying the pack size because pack size cannot be
+   specified in AMSS and GNU. Place the macro CDS_PACK_END below a structure
+   declaration to stop the pack. This requirement is necessitated by Windows
+   which need pragma based prolog and epilog.
+
+   Pack-size > 1-byte is not supported since gcc and arm do not support that.
+
+   Here are some examples
+
+   1. Pack-size 1-byte foo_t across all platforms
+
+   CDS_PACK_START
+   typedef CDS_PACK_PRE struct foo_s { ... } CDS_PACK_POST foo_t;
+   CDS_PACK_END
+
+   2. 2-byte alignment for foo_t across all platforms
+
+   typedef CDS_ALIGN_PRE(2) struct foo_s { ... } CDS_ALIGN_POST(2) foo_t;
+
+   3. Pack-size 1-byte and 2-byte alignment for foo_t across all platforms
+
+   CDS_PACK_START
+   typedef CDS_PACK_PRE CDS_ALIGN_PRE(2) struct foo_s { ... } CDS_ALIGN_POST(2) CDS_PACK_POST foo_t;
+   CDS_PACK_END
+
+ */
+
+#if defined __GNUC__
+
+#define CDS_PACK_START
+#define CDS_PACK_END
+
+#define CDS_PACK_PRE
+#define CDS_PACK_POST  __attribute__((__packed__))
+
+#define CDS_ALIGN_PRE(__value)
+#define CDS_ALIGN_POST(__value)  __attribute__((__aligned__(__value)))
+
+#elif defined __arm
+
+#define CDS_PACK_START
+#define CDS_PACK_END
+
+#define CDS_PACK_PRE  __packed
+#define CDS_PACK_POST
+
+#define CDS_ALIGN_PRE(__value)  __align(__value)
+#define CDS_ALIGN_POST(__value)
+
+#elif defined _MSC_VER
+
+#define CDS_PACK_START  __pragma(pack(push,1))
+#define CDS_PACK_END  __pragma(pack(pop))
+
+#define CDS_PACK_PRE
+#define CDS_PACK_POST
+
+#define CDS_ALIGN_PRE(__value)  __declspec(align(__value))
+#define CDS_ALIGN_POST(__value)
+
+#else
+
+#error Unsupported compiler!!!
+
+#endif
+
+#endif /* __CDS_PACK_ALIGN_H */

+ 185 - 0
core/cds/inc/cds_packet.h

@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined( __CDS_PKT_H )
+#define __CDS_PKT_H
+
+/**=========================================================================
+
+   \file        cds_packet.h
+
+   \brief       Connectivity driver services (CDS) network Packet APIs
+
+   Network Protocol packet/buffer support interfaces
+
+   ========================================================================*/
+
+/*--------------------------------------------------------------------------
+   Include Files
+   ------------------------------------------------------------------------*/
+#include <cdf_types.h>
+#include <cdf_status.h>
+
+/*--------------------------------------------------------------------------
+   Preprocessor definitions and constants
+   ------------------------------------------------------------------------*/
+
+/*--------------------------------------------------------------------------
+   Type declarations
+   ------------------------------------------------------------------------*/
+struct cds_pkt_t;
+typedef struct cds_pkt_t cds_pkt_t;
+
+#include "cdf_nbuf.h"
+
+#define CDS_PKT_TRAC_TYPE_EAPOL   NBUF_PKT_TRAC_TYPE_EAPOL
+#define CDS_PKT_TRAC_TYPE_DHCP    NBUF_PKT_TRAC_TYPE_DHCP
+#define CDS_PKT_TRAC_TYPE_MGMT_ACTION    NBUF_PKT_TRAC_TYPE_MGMT_ACTION /* Managment action frame */
+
+#define CDS_PKT_TRAC_DUMP_CMD     9999
+
+/*---------------------------------------------------------------------------
+
+* brief cds_pkt_get_proto_type() -
+      Find protoco type from packet contents
+
+* skb Packet Pointer
+* tracking_map packet type want to track
+* dot11_type, frame type when the frame is in dot11 format
+
+   ---------------------------------------------------------------------------*/
+uint8_t cds_pkt_get_proto_type
+	(struct sk_buff *skb, uint8_t tracking_map, uint8_t dot11_type);
+
+#ifdef QCA_PKT_PROTO_TRACE
+/*---------------------------------------------------------------------------
+
+* brief cds_pkt_trace_buf_update() -
+      Update storage buffer with interest event string
+
+* event_string Event String may packet type or outstanding event
+
+   ---------------------------------------------------------------------------*/
+void cds_pkt_trace_buf_update(char *event_string);
+
+/*---------------------------------------------------------------------------
+
+* brief cds_pkt_trace_buf_dump() -
+      Dump stored information into kernel log
+
+   ---------------------------------------------------------------------------*/
+void cds_pkt_trace_buf_dump(void);
+
+/*---------------------------------------------------------------------------
+
+* brief cds_pkt_proto_trace_init() -
+      Initialize protocol trace functionality, allocate required resource
+
+   ---------------------------------------------------------------------------*/
+void cds_pkt_proto_trace_init(void);
+
+/*---------------------------------------------------------------------------
+
+* brief cds_pkt_proto_trace_close() -
+      Free required resource
+
+   ---------------------------------------------------------------------------*/
+void cds_pkt_proto_trace_close(void);
+#endif /* QCA_PKT_PROTO_TRACE */
+
+/**
+ * cds_pkt_return_packet  Free the cds Packet
+ * @ cds Packet
+ */
+CDF_STATUS cds_pkt_return_packet(cds_pkt_t *packet);
+
+/**
+ * cds_pkt_get_packet_length  Returns the packet length
+ * @ cds Packet
+ */
+CDF_STATUS cds_pkt_get_packet_length(cds_pkt_t *pPacket,
+				     uint16_t *pPacketSize);
+
+/*
+ * TODO: Remove later
+ * All the below difinitions are not
+ * required for Host Driver 2.0
+ * once corresponding references are removed
+ * from HDD and other layers
+ * below code will be removed
+ */
+/* The size of AMSDU frame per spec can be a max of 3839 bytes
+   in BD/PDUs that means 30 (one BD = 128 bytes)
+   we must add the size of the 802.11 header to that */
+#define CDS_PKT_SIZE_BUFFER  ((30 * 128) + 32)
+
+/* cds Packet Types */
+typedef enum {
+	/* cds Packet is used to transmit 802.11 Management frames. */
+	CDS_PKT_TYPE_TX_802_11_MGMT,
+
+	/* cds Packet is used to transmit 802.11 Data frames. */
+	CDS_PKT_TYPE_TX_802_11_DATA,
+
+	/* cds Packet is used to transmit 802.3 Data frames. */
+	CDS_PKT_TYPE_TX_802_3_DATA,
+
+	/* cds Packet contains Received data of an unknown frame type */
+	CDS_PKT_TYPE_RX_RAW,
+
+	/* Invalid sentinel value */
+	CDS_PKT_TYPE_MAXIMUM
+} CDS_PKT_TYPE;
+
+/* user IDs.   These IDs are needed on the cds_pkt_get/set_user_data_ptr()
+   to identify the user area in the cds Packet. */
+typedef enum {
+	CDS_PKT_USER_DATA_ID_TL = 0,
+	CDS_PKT_USER_DATA_ID_BAL,
+	CDS_PKT_USER_DATA_ID_WMA,
+	CDS_PKT_USER_DATA_ID_HDD,
+	CDS_PKT_USER_DATA_ID_BSL,
+
+	CDS_PKT_USER_DATA_ID_MAX
+} CDS_PKT_USER_DATA_ID;
+
+#ifdef MEMORY_DEBUG
+#define cds_packet_alloc(s, d, p)	\
+	cds_packet_alloc_debug(s, d, p, __FILE__, __LINE__)
+
+CDF_STATUS cds_packet_alloc_debug(uint16_t size, void **data, void **ppPacket,
+				  uint8_t *file_name, uint32_t line_num);
+#else
+CDF_STATUS cds_packet_alloc(uint16_t size, void **data, void **ppPacket);
+#endif
+
+void cds_packet_free(void *pPacket);
+
+typedef CDF_STATUS (*cds_pkt_get_packet_callback)(cds_pkt_t *pPacket,
+						  void *userData);
+
+#endif /* !defined( __CDS_PKT_H ) */

+ 33 - 0
core/cds/inc/cds_queue.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _CDS_QUEUE_H
+#define _CDS_QUEUE_H
+
+#include <queue.h> /* include BSD queue */
+
+#endif /* end of _CDS_QUEUE_H */

+ 310 - 0
core/cds/inc/cds_reg_service.h

@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined __CDS_REG_SERVICE_H
+#define __CDS_REG_SERVICE_H
+
+/**=========================================================================
+
+   \file  cds_reg_service.h
+
+   \brief Connectivity driver services (CDS): Non-Volatile storage API
+
+   ========================================================================*/
+
+#include "cdf_status.h"
+
+#define CDS_COUNTRY_CODE_LEN  2
+#define CDS_MAC_ADDRESS_LEN   6
+
+typedef enum {
+	REGDOMAIN_FCC,
+	REGDOMAIN_ETSI,
+	REGDOMAIN_JAPAN,
+	REGDOMAIN_WORLD,
+	REGDOMAIN_COUNT
+} v_REGDOMAIN_t;
+
+typedef enum {
+	/* 2.4GHz Band */
+	RF_CHAN_1 = 0,
+	RF_CHAN_2,
+	RF_CHAN_3,
+	RF_CHAN_4,
+	RF_CHAN_5,
+	RF_CHAN_6,
+	RF_CHAN_7,
+	RF_CHAN_8,
+	RF_CHAN_9,
+	RF_CHAN_10,
+	RF_CHAN_11,
+	RF_CHAN_12,
+	RF_CHAN_13,
+	RF_CHAN_14,
+
+	/* 4.9GHz Band */
+	RF_CHAN_240,
+	RF_CHAN_244,
+	RF_CHAN_248,
+	RF_CHAN_252,
+	RF_CHAN_208,
+	RF_CHAN_212,
+	RF_CHAN_216,
+
+	/* 5GHz Low & Mid U-NII Band */
+	RF_CHAN_36,
+	RF_CHAN_40,
+	RF_CHAN_44,
+	RF_CHAN_48,
+	RF_CHAN_52,
+	RF_CHAN_56,
+	RF_CHAN_60,
+	RF_CHAN_64,
+
+	/* 5GHz Mid Band - ETSI & FCC */
+	RF_CHAN_100,
+	RF_CHAN_104,
+	RF_CHAN_108,
+	RF_CHAN_112,
+	RF_CHAN_116,
+	RF_CHAN_120,
+	RF_CHAN_124,
+	RF_CHAN_128,
+	RF_CHAN_132,
+	RF_CHAN_136,
+	RF_CHAN_140,
+
+	RF_CHAN_144,
+
+	/* 5GHz High U-NII Band */
+	RF_CHAN_149,
+	RF_CHAN_153,
+	RF_CHAN_157,
+	RF_CHAN_161,
+	RF_CHAN_165,
+
+	/* 802.11p */
+	RF_CHAN_170,
+	RF_CHAN_171,
+	RF_CHAN_172,
+	RF_CHAN_173,
+	RF_CHAN_174,
+	RF_CHAN_175,
+	RF_CHAN_176,
+	RF_CHAN_177,
+	RF_CHAN_178,
+	RF_CHAN_179,
+	RF_CHAN_180,
+	RF_CHAN_181,
+	RF_CHAN_182,
+	RF_CHAN_183,
+	RF_CHAN_184,
+
+	/* CHANNEL BONDED CHANNELS */
+	RF_CHAN_BOND_3,
+	RF_CHAN_BOND_4,
+	RF_CHAN_BOND_5,
+	RF_CHAN_BOND_6,
+	RF_CHAN_BOND_7,
+	RF_CHAN_BOND_8,
+	RF_CHAN_BOND_9,
+	RF_CHAN_BOND_10,
+	RF_CHAN_BOND_11,
+	RF_CHAN_BOND_242,       /* 4.9GHz Band */
+	RF_CHAN_BOND_246,
+	RF_CHAN_BOND_250,
+	RF_CHAN_BOND_210,
+	RF_CHAN_BOND_214,
+	RF_CHAN_BOND_38,        /* 5GHz Low & Mid U-NII Band */
+	RF_CHAN_BOND_42,
+	RF_CHAN_BOND_46,
+	RF_CHAN_BOND_50,
+	RF_CHAN_BOND_54,
+	RF_CHAN_BOND_58,
+	RF_CHAN_BOND_62,
+	RF_CHAN_BOND_102,       /* 5GHz Mid Band - ETSI & FCC */
+	RF_CHAN_BOND_106,
+	RF_CHAN_BOND_110,
+	RF_CHAN_BOND_114,
+	RF_CHAN_BOND_118,
+	RF_CHAN_BOND_122,
+	RF_CHAN_BOND_126,
+	RF_CHAN_BOND_130,
+	RF_CHAN_BOND_134,
+	RF_CHAN_BOND_138,
+
+	RF_CHAN_BOND_142,
+
+	RF_CHAN_BOND_151,       /* 5GHz High U-NII Band */
+	RF_CHAN_BOND_155,
+	RF_CHAN_BOND_159,
+	RF_CHAN_BOND_163,
+
+	NUM_RF_CHANNELS,
+
+	MIN_2_4GHZ_CHANNEL = RF_CHAN_1,
+	MAX_2_4GHZ_CHANNEL = RF_CHAN_14,
+	NUM_24GHZ_CHANNELS = (MAX_2_4GHZ_CHANNEL - MIN_2_4GHZ_CHANNEL + 1),
+
+	MIN_5GHZ_CHANNEL = RF_CHAN_240,
+	MAX_5GHZ_CHANNEL = RF_CHAN_184,
+	NUM_5GHZ_CHANNELS = (MAX_5GHZ_CHANNEL - MIN_5GHZ_CHANNEL + 1),
+
+	MIN_20MHZ_RF_CHANNEL = RF_CHAN_1,
+	MAX_20MHZ_RF_CHANNEL = RF_CHAN_184,
+	NUM_20MHZ_RF_CHANNELS =
+		(MAX_20MHZ_RF_CHANNEL - MIN_20MHZ_RF_CHANNEL + 1),
+
+	MIN_40MHZ_RF_CHANNEL = RF_CHAN_BOND_3,
+	MAX_40MHZ_RF_CHANNEL = RF_CHAN_BOND_163,
+	NUM_40MHZ_RF_CHANNELS =
+		(MAX_40MHZ_RF_CHANNEL - MIN_40MHZ_RF_CHANNEL + 1),
+
+	MIN_5_9GHZ_CHANNEL = RF_CHAN_170,
+	MAX_5_9GHZ_CHANNEL = RF_CHAN_184,
+
+	INVALID_RF_CHANNEL = 0xBAD,
+	RF_CHANNEL_INVALID_MAX_FIELD = 0x7FFFFFFF
+} eRfChannels;
+
+typedef enum {
+	CHANNEL_STATE_DISABLE,
+	CHANNEL_STATE_ENABLE,
+	CHANNEL_STATE_DFS,
+	CHANNEL_STATE_INVALID
+} CHANNEL_STATE;
+
+typedef int8_t tPowerdBm;
+
+typedef struct {
+	uint32_t enabled:4;
+	uint32_t flags:28;
+	tPowerdBm pwrLimit;
+} sRegulatoryChannel;
+
+typedef struct {
+	sRegulatoryChannel channels[NUM_RF_CHANNELS];
+} sRegulatoryDomain;
+
+typedef struct {
+	uint16_t targetFreq;
+	uint16_t channelNum;
+} tRfChannelProps;
+
+typedef struct {
+	uint8_t chanId;
+	tPowerdBm pwr;
+} tChannelListWithPower;
+
+typedef enum {
+	COUNTRY_INIT,
+	COUNTRY_IE,
+	COUNTRY_USER,
+	COUNTRY_QUERY,
+	COUNTRY_MAX = COUNTRY_QUERY
+} v_CountryInfoSource_t;
+
+/**
+ * enum chan_width: channel width
+ *
+ * @CHAN_WIDTH_0MHZ: channel disabled or invalid
+ * @CHAN_WIDTH_5MHZ: channel width 5 MHZ
+ * @CHAN_WIDTH_10MHZ: channel width 10 MHZ
+ * @CHAN_WIDTH_20MHZ: channel width 20 MHZ
+ * @CHAN_WIDTH_40MHZ: channel width 40 MHZ
+ * @CHAN_WIDTH_80MHZ: channel width 80MHZ
+ * @CHAN_WIDTH_160MHZ: channel width 160 MHZ
+ */
+enum channel_width {
+	CHAN_WIDTH_0MHZ,
+	CHAN_WIDTH_5MHZ,
+	CHAN_WIDTH_10MHZ,
+	CHAN_WIDTH_20MHZ,
+	CHAN_WIDTH_40MHZ,
+	CHAN_WIDTH_80MHZ,
+	CHAN_WIDTH_160MHZ
+};
+
+/**
+ * @country_code_t : typedef for country code. One extra
+ * char for holding null character
+ */
+typedef uint8_t country_code_t[CDS_COUNTRY_CODE_LEN + 1];
+
+typedef struct {
+	sRegulatoryDomain regDomains[REGDOMAIN_COUNT];
+	country_code_t default_country;
+} t_reg_table;
+
+
+CDF_STATUS cds_get_reg_domain_from_country_code(v_REGDOMAIN_t *pRegDomain,
+						const country_code_t countryCode,
+						v_CountryInfoSource_t source);
+
+CDF_STATUS cds_read_default_country(country_code_t default_country);
+
+CDF_STATUS cds_get_channel_list_with_power(tChannelListWithPower
+					   *pChannels20MHz,
+					   uint8_t *pNum20MHzChannelsFound,
+					   tChannelListWithPower
+					   *pChannels40MHz,
+					   uint8_t *pNum40MHzChannelsFound);
+
+CDF_STATUS cds_set_reg_domain(void *clientCtxt, v_REGDOMAIN_t regId);
+
+CHANNEL_STATE cds_get_channel_state(uint32_t rfChannel);
+
+#define CDS_IS_DFS_CH(channel) (cds_get_channel_state((channel)) == \
+				CHANNEL_STATE_DFS)
+
+#define CDS_IS_PASSIVE_OR_DISABLE_CH(channel) \
+    (cds_get_channel_state((channel)) != CHANNEL_STATE_ENABLE)
+
+#define CDS_MAX_24GHz_CHANNEL_NUMBER \
+    (rf_channels[MAX_2_4GHZ_CHANNEL].channelNum)
+#define CDS_MIN_5GHz_CHANNEL_NUMBER  (rf_channels[RF_CHAN_36].channelNum)
+#define CDS_MAX_5GHz_CHANNEL_NUMBER  (rf_channels[MAX_5GHZ_CHANNEL].channelNum)
+
+#define CDS_IS_CHANNEL_5GHZ(chnNum) \
+	(((chnNum) >= CDS_MIN_5GHz_CHANNEL_NUMBER) && ((chnNum) <= CDS_MAX_5GHz_CHANNEL_NUMBER))
+
+#define CDS_IS_CHANNEL_24GHZ(chnNum) \
+	(((chnNum) > 0) && ((chnNum) <= CDS_MAX_24GHz_CHANNEL_NUMBER))
+
+#define CDS_IS_SAME_BAND_CHANNELS(ch1, ch2) \
+	(ch1 && ch2 && \
+	(CDS_IS_CHANNEL_5GHZ(ch1) == CDS_IS_CHANNEL_5GHZ(ch2)))
+
+CDF_STATUS cds_regulatory_init(void);
+CDF_STATUS cds_get_dfs_region(uint8_t *dfs_region);
+CDF_STATUS cds_set_dfs_region(uint8_t dfs_region);
+bool cds_is_dsrc_channel(uint16_t);
+CHANNEL_STATE cds_get_bonded_channel_state(uint32_t chan_num,
+					   enum channel_width ch_width);
+enum channel_width cds_get_max_channel_bw(uint32_t chan_num);
+
+#endif /* __CDS_REG_SERVICE_H */

+ 1098 - 0
core/cds/inc/cds_regdomain.h

@@ -0,0 +1,1098 @@
+/*
+ * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Notifications and licenses are retained for attribution purposes only.
+ */
+/*
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2005-2006 Atheros Communications, Inc.
+ * Copyright (c) 2010, Atheros Communications Inc.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the following conditions are met:
+ * 1. The materials contained herein are unmodified and are used
+ *    unmodified.
+ * 2. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following NO
+ *    ''WARRANTY'' disclaimer below (''Disclaimer''), without
+ *    modification.
+ * 3. Redistributions in binary form must reproduce at minimum a
+ *    disclaimer similar to the Disclaimer below and any redistribution
+ *    must be conditioned upon including a substantially similar
+ *    Disclaimer requirement for further binary redistribution.
+ * 4. Neither the names of the above-listed copyright holders nor the
+ *    names of any contributors may be used to endorse or promote
+ *    product derived from this software without specific prior written
+ *    permission.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT,
+ * MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
+ * FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ *
+ * This module contains the regulatory domain private structure definitions .
+ *
+ */
+
+#ifndef REGULATORY_H
+#define REGULATORY_H
+
+enum {
+	CTRY_DEBUG = 0x1ff,     /* debug country code */
+	CTRY_DEFAULT = 0        /* default country code */
+};
+
+#define BMLEN 2                 /* Use 2 64 bit uint for channel bitmask */
+
+/*
+ * The following table is the master list for all different freqeuncy
+ * bands with the complete matrix of all possible flags and settings
+ * for each band if it is used in ANY reg domain.
+ */
+
+#define DEF_REGDMN              FCC3_FCCA
+#define    DEF_DMN_5            FCC1
+#define    DEF_DMN_2            FCCA
+#define    COUNTRY_ERD_FLAG     0x8000
+#define WORLDWIDE_ROAMING_FLAG  0x4000
+#define    SUPER_DOMAIN_MASK    0x0fff
+#define    COUNTRY_CODE_MASK    0x3fff
+#define CF_INTERFERENCE         (CHANNEL_CW_INT | CHANNEL_RADAR_INT)
+
+/*
+ * The following describe the bit masks for different passive scan
+ * capability/requirements per regdomain.
+ */
+#define NO_PSCAN    0x0ULL
+#define PSCAN_FCC   0x0000000000000001ULL
+#define PSCAN_FCC_T 0x0000000000000002ULL
+#define PSCAN_ETSI  0x0000000000000004ULL
+#define PSCAN_MKK1  0x0000000000000008ULL
+#define PSCAN_MKK2  0x0000000000000010ULL
+#define PSCAN_MKKA  0x0000000000000020ULL
+#define PSCAN_MKKA_G    0x0000000000000040ULL
+#define PSCAN_ETSIA 0x0000000000000080ULL
+#define PSCAN_ETSIB 0x0000000000000100ULL
+#define PSCAN_ETSIC 0x0000000000000200ULL
+#define PSCAN_WWR   0x0000000000000400ULL
+#define PSCAN_MKKA1 0x0000000000000800ULL
+#define PSCAN_MKKA1_G   0x0000000000001000ULL
+#define PSCAN_MKKA2 0x0000000000002000ULL
+#define PSCAN_MKKA2_G   0x0000000000004000ULL
+#define PSCAN_MKK3  0x0000000000008000ULL
+#define PSCAN_EXT_CHAN  0x0000000000010000ULL
+#define PSCAN_DEFER 0x7FFFFFFFFFFFFFFFULL
+#define IS_ECM_CHAN 0x8000000000000000ULL
+
+/* define in ah_eeprom.h */
+#define SD_NO_CTL       0xf0
+#define NO_CTL          0xff
+#define CTL_MODE_M      0x0f
+#define CTL_11A         0
+#define CTL_11B         1
+#define CTL_11G         2
+#define CTL_TURBO       3
+#define CTL_108G        4
+#define CTL_2GHT20      5
+#define CTL_5GHT20      6
+#define CTL_2GHT40      7
+#define CTL_5GHT40      8
+#define CTL_5GVHT80     9
+
+#ifndef ATH_NO_5G_SUPPORT
+#define REGDMN_MODE_11A_TURBO    REGDMN_MODE_108A
+#define CHAN_11A_BMZERO BMZERO,
+#define CHAN_11A_BM(_a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l) \
+	BM(_a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l),
+#else
+/* remove 11a channel info if 11a is not supported */
+#define CHAN_11A_BMZERO
+#define CHAN_11A_BM(_a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l)
+#endif
+#ifndef ATH_REMOVE_2G_TURBO_RD_TABLE
+#define REGDMN_MODE_11G_TURBO    REGDMN_MODE_108G
+#define CHAN_TURBO_G_BMZERO BMZERO,
+#define CHAN_TURBO_G_BM(_a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l)	\
+	BM(_a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l),
+#else
+/* remove turbo-g channel info if turbo-g is not supported */
+#define CHAN_TURBO_G(a, b)
+#define CHAN_TURBO_G_BMZERO
+#define CHAN_TURBO_G_BM(_a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l)
+#endif
+
+#define BMLEN 2                 /* Use 2 64 bit uint for channel bitmask
+	                           NB: Must agree with macro below (BM) */
+#define BMZERO {(uint64_t) 0, (uint64_t) 0} /* BMLEN zeros */
+
+#ifndef SUPPRESS_SHIFT_WARNING
+#define SUPPRESS_SHIFT_WARNING
+#endif
+
+/* Suppress MS warning "C4293: 'operator' : shift count negative or too big,
+ * undefined behavior"
+ * This is safe below because the the operand is properly range-checked, but
+ * the compiler can't reason that out before it spits the warning.
+ * Using suppress, so the warning can still be enabled globally to catch other
+ * incorrect uses.
+ */
+#define BM(_fa, _fb, _fc, _fd, _fe, _ff, _fg, _fh, _fi, _fj, _fk, _fl) \
+	SUPPRESS_SHIFT_WARNING \
+	{((((_fa >= 0) && (_fa < 64)) ? (((uint64_t) 1) << _fa) : (uint64_t) 0) | \
+	  (((_fb >= 0) && (_fb < 64)) ? (((uint64_t) 1) << _fb) : (uint64_t) 0) | \
+	  (((_fc >= 0) && (_fc < 64)) ? (((uint64_t) 1) << _fc) : (uint64_t) 0) | \
+	  (((_fd >= 0) && (_fd < 64)) ? (((uint64_t) 1) << _fd) : (uint64_t) 0) | \
+	  (((_fe >= 0) && (_fe < 64)) ? (((uint64_t) 1) << _fe) : (uint64_t) 0) | \
+	  (((_ff >= 0) && (_ff < 64)) ? (((uint64_t) 1) << _ff) : (uint64_t) 0) | \
+	  (((_fg >= 0) && (_fg < 64)) ? (((uint64_t) 1) << _fg) : (uint64_t) 0) | \
+	  (((_fh >= 0) && (_fh < 64)) ? (((uint64_t) 1) << _fh) : (uint64_t) 0) | \
+	  (((_fi >= 0) && (_fi < 64)) ? (((uint64_t) 1) << _fi) : (uint64_t) 0) | \
+	  (((_fj >= 0) && (_fj < 64)) ? (((uint64_t) 1) << _fj) : (uint64_t) 0) | \
+	  (((_fk >= 0) && (_fk < 64)) ? (((uint64_t) 1) << _fk) : (uint64_t) 0) | \
+	  (((_fl >= 0) && (_fl < 64)) ? (((uint64_t) 1) << _fl) : (uint64_t) 0) ) \
+	 ,(((((_fa > 63) && (_fa < 128)) ? (((uint64_t) 1) << (_fa - 64)) : (uint64_t) 0) | \
+	    (((_fb > 63) && (_fb < 128)) ? (((uint64_t) 1) << (_fb - 64)) : (uint64_t) 0) | \
+	    (((_fc > 63) && (_fc < 128)) ? (((uint64_t) 1) << (_fc - 64)) : (uint64_t) 0) | \
+	    (((_fd > 63) && (_fd < 128)) ? (((uint64_t) 1) << (_fd - 64)) : (uint64_t) 0) | \
+	    (((_fe > 63) && (_fe < 128)) ? (((uint64_t) 1) << (_fe - 64)) : (uint64_t) 0) | \
+	    (((_ff > 63) && (_ff < 128)) ? (((uint64_t) 1) << (_ff - 64)) : (uint64_t) 0) | \
+	    (((_fg > 63) && (_fg < 128)) ? (((uint64_t) 1) << (_fg - 64)) : (uint64_t) 0) | \
+	    (((_fh > 63) && (_fh < 128)) ? (((uint64_t) 1) << (_fh - 64)) : (uint64_t) 0) | \
+	    (((_fi > 63) && (_fi < 128)) ? (((uint64_t) 1) << (_fi - 64)) : (uint64_t) 0) | \
+	    (((_fj > 63) && (_fj < 128)) ? (((uint64_t) 1) << (_fj - 64)) : (uint64_t) 0) | \
+	    (((_fk > 63) && (_fk < 128)) ? (((uint64_t) 1) << (_fk - 64)) : (uint64_t) 0) | \
+	    (((_fl > 63) && (_fl < 128)) ? (((uint64_t) 1) << (_fl - 64)) : (uint64_t) 0)))}
+
+/*
+ * THE following table is the mapping of regdomain pairs specified by
+ * an 8 bit regdomain value to the individual unitary reg domains
+ */
+
+typedef struct reg_dmn_pair_mapping {
+	uint16_t regDmnEnum;    /* 16 bit reg domain pair */
+	uint16_t regDmn5GHz;    /* 5GHz reg domain */
+	uint16_t regDmn2GHz;    /* 2GHz reg domain */
+	uint32_t flags5GHz;     /* Requirements flags (AdHoc
+	                           disallow, noise floor cal needed,
+	                           etc) */
+	uint32_t flags2GHz;     /* Requirements flags (AdHoc
+	                           disallow, noise floor cal needed,
+	                           etc) */
+	uint64_t pscanMask;     /* Passive Scan flags which
+	                           can override unitary domain
+	                           passive scan flags.  This
+	                           value is used as a mask on
+	                           the unitary flags */
+	uint16_t singleCC;      /* Country code of single country if
+	                           a one-on-one mapping exists */
+} REG_DMN_PAIR_MAPPING;
+
+typedef struct {
+	uint16_t countryCode;
+	uint16_t regDmnEnum;
+	const char *isoName;
+	const char *name;
+	uint16_t allow11g : 1, allow11aTurbo : 1, allow11gTurbo : 1, allow11ng20 : 1, /* HT-20 allowed in 2GHz? */
+		 allow11ng40 : 1, /* HT-40 allowed in 2GHz? */
+		 allow11na20 : 1, /* HT-20 allowed in 5GHz? */
+		 allow11na40 : 1, /* HT-40 VHT-40 allowed in 5GHz? */
+		 allow11na80 : 1; /* VHT-80 allowed in 5GHz */
+	uint16_t outdoorChanStart;
+} COUNTRY_CODE_TO_ENUM_RD;
+
+typedef struct RegDmnFreqBand {
+	uint16_t lowChannel;    /* Low channel center in MHz */
+	uint16_t highChannel;   /* High Channel center in MHz */
+	uint8_t powerDfs;       /* Max power (dBm) for channel
+	                           range when using DFS */
+	uint8_t antennaMax;     /* Max allowed antenna gain */
+	uint8_t channelBW;      /* Bandwidth of the channel */
+	uint8_t channelSep;     /* Channel separation within
+	                           the band */
+	uint64_t useDfs;        /* Use DFS in the RegDomain
+	                           if corresponding bit is set */
+	uint64_t usePassScan;   /* Use Passive Scan in the RegDomain
+	                           if corresponding bit is set */
+	uint8_t regClassId;     /* Regulatory class id */
+} REG_DMN_FREQ_BAND;
+
+typedef struct reg_domain {
+	uint16_t regDmnEnum;    /* value from EnumRd table */
+	uint8_t conformance_test_limit;
+	uint64_t dfsMask;       /* DFS bitmask for 5Ghz tables */
+	uint64_t pscan;         /* Bitmask for passive scan */
+	uint32_t flags;         /* Requirement flags (AdHoc disallow, noise
+	                           floor cal needed, etc) */
+	uint64_t chan11a[BMLEN];        /* 128 bit bitmask for channel/band selection */
+	uint64_t chan11a_turbo[BMLEN];  /* 128 bit bitmask for channel/band select */
+	uint64_t chan11a_dyn_turbo[BMLEN];      /* 128 bit mask for chan/band select */
+
+	uint64_t chan11b[BMLEN];        /* 128 bit bitmask for channel/band selection */
+	uint64_t chan11g[BMLEN];        /* 128 bit bitmask for channel/band selection */
+	uint64_t chan11g_turbo[BMLEN];
+} REG_DOMAIN;
+
+struct cmode {
+	uint32_t mode;
+	uint32_t flags;
+};
+
+#define    YES    true
+#define    NO    false
+
+/* mapping of old skus to new skus for Japan */
+typedef struct {
+	uint16_t domain;
+	uint16_t newdomain_pre53;       /* pre eeprom version 5.3 */
+	uint16_t newdomain_post53;      /* post eeprom version 5.3 */
+} JAPAN_SKUMAP;
+
+/* mapping of countrycode to new skus for Japan */
+typedef struct {
+	uint16_t ccode;
+	uint16_t newdomain_pre53;       /* pre eeprom version 5.3 */
+	uint16_t newdomain_post53;      /* post eeprom version 5.3 */
+} JAPAN_COUNTRYMAP;
+
+/* check rd flags in eeprom for japan */
+typedef struct {
+	uint16_t freqbandbit;
+	uint32_t eepromflagtocheck;
+} JAPAN_BANDCHECK;
+
+/* Common mode power table for 5Ghz */
+typedef struct {
+	uint16_t lchan;
+	uint16_t hchan;
+	uint8_t pwrlvl;
+} COMMON_MODE_POWER;
+
+typedef enum {
+	COUNTRY_CODE_SET_BY_CORE,
+	COUNTRY_CODE_SET_BY_DRIVER,
+	COUNTRY_CODE_SET_BY_USER
+} COUNTRY_CODE_SOURCE;
+
+struct regulatory {
+	uint32_t reg_domain;
+	uint32_t eeprom_rd_ext;
+	uint16_t country_code;
+	uint8_t alpha2[3];
+	uint8_t dfs_region;
+	uint8_t ctl_2g;
+	uint8_t ctl_5g;
+	const void *regpair;
+	COUNTRY_CODE_SOURCE cc_src;
+	uint32_t reg_flags;
+};
+/* Multi-Device RegDomain Support */
+typedef struct ath_hal_reg_dmn_tables {
+	/* regDomainPairs: Map of 8-bit regdomain values to unitary reg domain */
+	const REG_DMN_PAIR_MAPPING *regDomainPairs;
+	/* allCountries: Master list of freq. bands (flags, settings) */
+	const COUNTRY_CODE_TO_ENUM_RD *allCountries;
+	/* regDomains: Array of supported reg domains */
+	const REG_DOMAIN *regDomains;
+
+	uint16_t regDomainPairsCt;      /* Num reg domain pair entries */
+	uint16_t allCountriesCt;        /* Num country entries */
+	uint16_t regDomainsCt;  /* Num reg domain entries */
+} HAL_REG_DMN_TABLES;
+
+/*
+ * Country/Region Codes from MS WINNLS.H
+ * Numbering from ISO 3166
+ */
+/**     @brief country code definitions
+ *        - country definition: CTRY_DEBUG
+ *            - country string: DB
+ *            - country ID: 0
+ *        - country definition: CTRY_DEFAULT
+ *            - country string: NA
+ *            - country ID: 0
+ *        - country definition: CTRY_ALBANIA
+ *            - country string: AL
+ *            - country ID: 8
+ *        - country definition: CTRY_ALGERIA
+ *            - country string: DZ
+ *            - country ID: 12
+ *        - country definition: CTRY_ARGENTINA
+ *            - country string: AR
+ *            - country ID: 32
+ *        - country definition: CTRY_ARMENIA
+ *            - country string: AM
+ *            - country ID: 51
+ *        - country definition: CTRY_AUSTRALIA
+ *            - country string: AU
+ *            - country ID: 36
+ *        - country definition: CTRY_AUSTRALIA2
+ *            - country string: AU2
+ *            - country ID: 5000
+ *        - country definition: CTRY_AUSTRIA
+ *            - country string: AT
+ *            - country ID: 40
+ *        - country definition: CTRY_AZERBAIJAN
+ *            - country string: AZ
+ *            - country ID: 31
+ *        - country definition: CTRY_BAHAMAS
+ *            - country string: BS
+ *            - country ID: 44
+ *        - country definition: CTRY_BAHRAIN
+ *            - country string: BH
+ *            - country ID: 48
+ *        - country definition: CTRY_BELARUS
+ *            - country string: BY
+ *            - country ID: 112
+ *        - country definition: CTRY_BELGIUM
+ *            - country string: BE
+ *            - country ID: 56
+ *        - country definition: CTRY_BELIZE
+ *            - country string: BZ
+ *            - country ID: 84
+ *        - country definition: CTRY_BERMUDA
+ *            - country string: BM
+ *            - country ID: 60
+ *        - country definition: CTRY_BOLIVIA
+ *            - country string: BO
+ *            - country ID: 68
+ *        - country definition: CTRY_BOSNIA_HERZEGOWINA
+ *            - country string: 70
+ *            - country ID: BA
+ *        - country definition: CTRY_BRAZIL
+ *            - country string: BR
+ *            - country ID: 76
+ *        - country definition: CTRY_BRUNEI_DARUSSALAM
+ *            - country string: BN
+ *            - country ID: 96
+ *        - country definition: CTRY_BULGARIA
+ *            - country string: BG
+ *            - country ID: 100
+ *        - country definition: CTRY_CANADA
+ *            - country string: CA
+ *            - country ID: 124
+ *        - country definition: CTRY_CANADA2
+ *            - country string: CA2
+ *            - country ID: 5001
+ *        - country definition: CTRY_CHILE
+ *            - country string: CL
+ *            - country ID: 152
+ *        - country definition: CTRY_CHINA
+ *            - country string: CN
+ *            - country ID: 152
+ *        - country definition: CTRY_COLOMBIA
+ *            - country string: CO
+ *            - country ID: 170
+ *        - country definition: CTRY_COSTA_RICA
+ *            - country string: CR
+ *            - country ID: 191
+ *        - country definition: CTRY_CROATIA
+ *            - country string: HR
+ *            - country ID: 191
+ *        - country definition: CTRY_CYPRUS
+ *            - country string: CY
+ *            - country ID: 196
+ *        - country definition: CTRY_CZECH
+ *            - country string: CZ
+ *            - country ID: 203
+ *        - country definition: CTRY_DENMARK
+ *            - country string: DK
+ *            - country ID: 208
+ *        - country definition: CTRY_DOMINICAN_REPUBLIC
+ *            - country string: DO
+ *            - country ID: 214
+ *        - country definition: CTRY_ECUADOR
+ *            - country string: EC
+ *            - country ID: 218
+ *        - country definition: CTRY_EGYPT
+ *            - country string: EG
+ *            - country ID: 818
+ *        - country definition: CTRY_EL_SALVADOR
+ *            - country string: SV
+ *            - country ID: 222
+ *        - country definition: CTRY_ESTONIA
+ *            - country string: EE
+ *            - country ID: 233
+ *        - country definition: CTRY_FAEROE_ISLANDS
+ *            - country string: FO
+ *            - country ID: 234
+ *        - country definition: CTRY_FINLAND
+ *            - country string: FI
+ *            - country ID: 246
+ *        - country definition: CTRY_FRANCE
+ *            - country string: FR
+ *            - country ID: 250
+ *        - country definition: CTRY_FRANCE2
+ *            - country string: F2
+ *            - country ID: 255
+ *        - country definition: CTRY_GEORGIA
+ *            - country string: GE
+ *            - country ID: 268
+ *        - country definition: CTRY_GERMANY
+ *            - country string: DE
+ *            - country ID: 276
+ *        - country definition: CTRY_GREECE
+ *            - country string: GR
+ *            - country ID: 300
+ *        - country definition: CTRY_GUATEMALA
+ *            - country string: GT
+ *            - country ID: 320
+ *        - country definition: CTRY_HONDURAS
+ *            - country string: HN
+ *            - country ID: 340
+ *        - country definition: CTRY_HONG_KONG
+ *            - country string: HK
+ *            - country ID: 344
+ *        - country definition: CTRY_HUNGARY
+ *            - country string: HU
+ *            - country ID: 348
+ *        - country definition: CTRY_ICELAND
+ *            - country string: IS
+ *            - country ID: 352
+ *        - country definition: CTRY_INDIA
+ *            - country string: IN
+ *            - country ID: 356
+ *        - country definition: CTRY_INDONESIA
+ *            - country string: ID
+ *            - country ID: 360
+ *        - country definition: CTRY_IRAN
+ *            - country string: IR
+ *            - country ID: 364
+ *        - country definition: CTRY_IRAQ
+ *            - country string: IQ
+ *            - country ID: 368
+ *        - country definition: CTRY_IRELAND
+ *            - country string: IE
+ *            - country ID: 372
+ *        - country definition: CTRY_ISRAEL
+ *            - country string: IL
+ *            - country ID: 376
+ *        - country definition: CTRY_ITALY
+ *            - country string: IT
+ *            - country ID: 380
+ *        - country definition: CTRY_JAMAICA
+ *            - country string: JM
+ *            - country ID: 388
+ *        - country definition: CTRY_JAPAN
+ *            - country string: JP
+ *            - country ID: 392
+ *        - country definition: CTRY_JAPAN1
+ *            - country string: JP1
+ *            - country ID: 393
+ *        - country definition: CTRY_JAPAN2
+ *            - country string: JP2
+ *            - country ID: 394
+ *        - country definition: CTRY_JAPAN3
+ *            - country string: JP3
+ *            - country ID: 395
+ *        - country definition: CTRY_JAPAN4
+ *            - country string: JP4
+ *            - country ID: 396
+ *        - country definition: CTRY_JAPAN5
+ *            - country string: JP5
+ *            - country ID: 397
+ *        - country definition: CTRY_JAPAN6
+ *            - country string: JP6
+ *            - country ID: 399
+ *        - country definition: CTRY_JAPAN7
+ *            - country string: JP7
+ *            - country ID: 4007
+ *        - country definition: CTRY_JAPAN8
+ *            - country string: JP8
+ *            - country ID: 4008
+ *        - country definition: CTRY_JAPAN9
+ *            - country string: JP9
+ *            - country ID: 4009
+ *        - country definition: CTRY_JAPAN10
+ *            - country string: JP10
+ *            - country ID: 4010
+ *        - country definition: CTRY_JAPAN11
+ *            - country string: JP11
+ *            - country ID: 4011
+ *        - country definition: CTRY_JAPAN12
+ *            - country string: JP12
+ *            - country ID: 4012
+ *        - country definition: CTRY_JAPAN13
+ *            - country string: JP13
+ *            - country ID: 4013
+ *        - country definition: CTRY_JAPAN14
+ *            - country string: JP14
+ *            - country ID: 4014
+ *        - country definition: CTRY_JAPAN15
+ *            - country string: JP15
+ *            - country ID: 4015
+ *        - country definition: CTRY_JAPAN16
+ *            - country string: JP16
+ *            - country ID: 4016
+ *        - country definition: CTRY_JAPAN17
+ *            - country string: JP17
+ *            - country ID: 4017
+ *        - country definition: CTRY_JAPAN18
+ *            - country string: JP18
+ *            - country ID: 4018
+ *        - country definition: CTRY_JAPAN19
+ *            - country string: JP19
+ *            - country ID: 4019
+ *        - country definition: CTRY_JAPAN20
+ *            - country string: JP20
+ *            - country ID: 4020
+ *        - country definition: CTRY_JAPAN21
+ *            - country string: JP21
+ *            - country ID: 4021
+ *        - country definition: CTRY_JAPAN22
+ *            - country string: JP22
+ *            - country ID: 4022
+ *        - country definition: CTRY_JAPAN23
+ *            - country string: JP23
+ *            - country ID: 4023
+ *        - country definition: CTRY_JAPAN24
+ *            - country string: JP24
+ *            - country ID: 4024
+ *        - country definition: CTRY_JAPAN25
+ *            - country string: JP25
+ *            - country ID: 4025
+ *        - country definition: CTRY_JAPAN26
+ *            - country string: JP26
+ *            - country ID: 4026
+ *        - country definition: CTRY_JAPAN27
+ *            - country string: JP27
+ *            - country ID: 4027
+ *        - country definition: CTRY_JAPAN28
+ *            - country string: JP28
+ *            - country ID: 4028
+ *        - country definition: CTRY_JAPAN29
+ *            - country string: JP29
+ *            - country ID: 4029
+ *        - country definition: CTRY_JAPAN30
+ *            - country string: JP30
+ *            - country ID: 4030
+ *        - country definition: CTRY_JAPAN31
+ *            - country string: JP31
+ *            - country ID: 4031
+ *        - country definition: CTRY_JAPAN32
+ *            - country string: JP32
+ *            - country ID: 4032
+ *        - country definition: CTRY_JAPAN33
+ *            - country string: JP33
+ *            - country ID: 4033
+ *        - country definition: CTRY_JAPAN34
+ *            - country string: JP34
+ *            - country ID: 4034
+ *        - country definition: CTRY_JAPAN35
+ *            - country string: JP35
+ *            - country ID: 4035
+ *        - country definition: CTRY_JAPAN36
+ *            - country string: JP36
+ *            - country ID: 4036
+ *        - country definition: CTRY_JAPAN37
+ *            - country string: JP37
+ *            - country ID: 4037
+ *        - country definition: CTRY_JAPAN38
+ *            - country string: JP38
+ *            - country ID: 4038
+ *        - country definition: CTRY_JAPAN39
+ *            - country string: JP39
+ *            - country ID: 4039
+ *        - country definition: CTRY_JAPAN40
+ *            - country string: JP40
+ *            - country ID: 4040
+ *        - country definition: CTRY_JAPAN41
+ *            - country string: JP41
+ *            - country ID: 4041
+ *        - country definition: CTRY_JAPAN42
+ *            - country string: JP42
+ *            - country ID: 4042
+ *        - country definition: CTRY_JAPAN43
+ *            - country string: JP43
+ *            - country ID: 4043
+ *        - country definition: CTRY_JAPAN44
+ *            - country string: JP44
+ *            - country ID: 4044
+ *        - country definition: CTRY_JAPAN45
+ *            - country string: JP45
+ *            - country ID: 4045
+ *        - country definition: CTRY_JAPAN46
+ *            - country string: JP46
+ *            - country ID: 4046
+ *        - country definition: CTRY_JAPAN47
+ *            - country string: JP47
+ *            - country ID: 4047
+ *        - country definition: CTRY_JAPAN48
+ *            - country string: JP48
+ *            - country ID: 4048
+ *        - country definition: CTRY_JAPAN49
+ *            - country string: JP49
+ *            - country ID: 4049
+ *        - country definition: CTRY_JAPAN50
+ *            - country string: JP50
+ *            - country ID: 4050
+ *        - country definition: CTRY_JAPAN51
+ *            - country string: JP51
+ *            - country ID: 4051
+ *        - country definition: CTRY_JAPAN52
+ *            - country string: JP52
+ *            - country ID: 4052
+ *        - country definition: CTRY_JAPAN53
+ *            - country string: JP53
+ *            - country ID: 4053
+ *        - country definition: CTRY_JAPAN54
+ *            - country string: JP54
+ *            - country ID: 4054
+ *        - country definition: CTRY_JAPAN55
+ *            - country string: JP55
+ *            - country ID: 4055
+ *        - country definition: CTRY_JAPAN56
+ *            - country string: JP56
+ *            - country ID: 4056
+ *        - country definition: CTRY_JORDAN
+ *            - country string: JO
+ *            - country ID: 400
+ *        - country definition: CTRY_KAZAKHSTAN
+ *            - country string: KZ
+ *            - country ID: 398
+ *        - country definition: CTRY_KENYA
+ *            - country string: KE
+ *            - country ID: 404
+ *        - country definition: CTRY_KOREA_NORTH
+ *            - country string: KP
+ *            - country ID: 408
+ *        - country definition: CTRY_KOREA_ROC
+ *            - country string: KR
+ *            - country ID: 410
+ *        - country definition: CTRY_KOREA_ROC2
+ *            - country string: KR2
+ *            - country ID: 411
+ *        - country definition: CTRY_KOREA_ROC3
+ *            - country string: KR3
+ *            - country ID: 412
+ *        - country definition: CTRY_KUWAIT
+ *            - country string: KW
+ *            - country ID: 414
+ *        - country definition: CTRY_LATVIA
+ *            - country string: LV
+ *            - country ID: 428
+ *        - country definition: CTRY_LEBANON
+ *            - country string: LB
+ *            - country ID: 422
+ *        - country definition: CTRY_LIBYA
+ *            - country string: LY
+ *            - country ID: 434
+ *        - country definition: CTRY_LIECHTENSTEIN
+ *            - country string: LI
+ *            - country ID: 438
+ *        - country definition: CTRY_LITHUANIA
+ *            - country string: LT
+ *            - country ID: 440
+ *        - country definition: CTRY_LUXEMBOURG
+ *            - country string: LU
+ *            - country ID: 442
+ *        - country definition: CTRY_MACAU
+ *            - country string: MO
+ *            - country ID: 446
+ *        - country definition: CTRY_MACEDONIA
+ *            - country string: MK
+ *            - country ID: 807
+ *        - country definition: CTRY_MALAYSIA
+ *            - country string: MY
+ *            - country ID: 458
+ *        - country definition: CTRY_MALTA
+ *            - country string: MT
+ *            - country ID: 470
+ *        - country definition: CTRY_MAURITIUS
+ *            - country string: MU
+ *            - country ID: 480
+ *        - country definition: CTRY_MEXICO
+ *            - country string: MX
+ *            - country ID: 484
+ *        - country definition: CTRY_MONACO
+ *            - country string: MC
+ *            - country ID: 492
+ *        - country definition: CTRY_MOROCCO
+ *            - country string: MA
+ *            - country ID: 504
+ *        - country definition: CTRY_NETHERLANDS
+ *            - country string: NL
+ *            - country ID: 528
+ *        - country definition: CTRY_NEW_ZEALAND
+ *            - country string: NZ
+ *            - country ID: 554
+ *        - country definition: CTRY_NICARAGUA
+ *            - country string: NI
+ *            - country ID: 558
+ *        - country definition: CTRY_NORWAY
+ *            - country string: NO
+ *            - country ID: 578
+ *        - country definition: CTRY_OMAN
+ *            - country string: OM
+ *            - country ID: 512
+ *        - country definition: CTRY_PAKISTAN
+ *            - country string: PK
+ *            - country ID: 586
+ *        - country definition: CTRY_PANAMA
+ *            - country string: PA
+ *            - country ID: 591
+ *        - country definition: CTRY_PARAGUAY
+ *            - country string: PY
+ *            - country ID: 600
+ *        - country definition: CTRY_PERU
+ *            - country string: PE
+ *            - country ID: 604
+ *        - country definition: CTRY_PHILIPPINES
+ *            - country string: PH
+ *            - country ID: 608
+ *        - country definition: CTRY_POLAND
+ *            - country string: PL
+ *            - country ID: 616
+ *        - country definition: CTRY_PORTUGAL
+ *            - country string: PT
+ *            - country ID: 620
+ *        - country definition: CTRY_PUERTO_RICO
+ *            - country string: PR
+ *            - country ID: 630
+ *        - country definition: CTRY_QATAR
+ *            - country string: QA
+ *            - country ID: 634
+ *        - country definition: CTRY_ROMANIA
+ *            - country string: RO
+ *            - country ID: 642
+ *        - country definition: CTRY_RUSSIA
+ *            - country string: RU
+ *            - country ID: 643
+ *        - country definition: CTRY_SAUDI_ARABIA
+ *            - country string: SA
+ *            - country ID: 682
+ *        - country definition: CTRY_SERBIA
+ *            - country string: RS
+ *            - country ID: 688
+ *        - country definition: CTRY_MONTENEGRO
+ *            - country string: ME
+ *            - country ID: 499
+ *        - country definition: CTRY_SINGAPORE
+ *            - country string: SG
+ *            - country ID: 702
+ *        - country definition: CTRY_SLOVAKIA
+ *            - country string: SK
+ *            - country ID: 703
+ *        - country definition: CTRY_SLOVENIA
+ *            - country string: SI
+ *            - country ID: 705
+ *        - country definition: CTRY_SOUTH_AFRICA
+ *            - country string: ZA
+ *            - country ID: 710
+ *        - country definition: CTRY_SPAIN
+ *            - country string: ES
+ *            - country ID: 724
+ *        - country definition: CTRY_SRI_LANKA
+ *            - country string: LK
+ *            - country ID: 144
+ *        - country definition: CTRY_SWEDEN
+ *            - country string: SE
+ *            - country ID: 752
+ *        - country definition: CTRY_SWITZERLAND
+ *            - country string: CH
+ *            - country ID: 756
+ *        - country definition: CTRY_SYRIA
+ *            - country string: SY
+ *            - country ID: 760
+ *        - country definition: CTRY_TAIWAN
+ *            - country string: TW
+ *            - country ID: 158
+ *        - country definition: CTRY_TANZANIA
+ *            - country string: TZ
+ *            - country ID: 834
+ *        - country definition: CTRY_THAILAND
+ *            - country string: TH
+ *            - country ID: 764
+ *        - country definition: CTRY_TRINIDAD_Y_TOBAGO
+ *            - country string: TT
+ *            - country ID: 780
+ *        - country definition: CTRY_TUNISIA
+ *            - country string: TN
+ *            - country ID: 788
+ *        - country definition: CTRY_TURKEY
+ *            - country string: TR
+ *            - country ID: 792
+ *        - country definition: CTRY_UAE
+ *            - country string: AE
+ *            - country ID: 784
+ *        - country definition: CTRY_UKRAINE
+ *            - country string: UA
+ *            - country ID: 804
+ *        - country definition: CTRY_UNITED_KINGDOM
+ *            - country string: GB
+ *            - country ID: 826
+ *        - country definition: CTRY_UNITED_STATES
+ *            - country string: US
+ *            - country ID: 840
+ *        - country definition: CTRY_UNITED_STATES_FCC49
+ *            - country string: US
+ *            - country ID: 842
+ *        - country definition: CTRY_URUGUAY
+ *            - country string: UY
+ *            - country ID: 858
+ *        - country definition: CTRY_UZBEKISTAN
+ *            - country string: UZ
+ *            - country ID: 860
+ *        - country definition: CTRY_VENEZUELA
+ *            - country string: VE
+ *            - country ID: 862
+ *        - country definition: CTRY_VIET_NAM
+ *            - country string: VN
+ *            - country ID: 704
+ *        - country definition: CTRY_YEMEN
+ *            - country string: YE
+ *            - country ID: 887
+ *        - country definition: CTRY_ZIMBABWE
+ *            - country string: ZW
+ *            - country ID: 716
+ */
+enum CountryCode {
+	CTRY_ALBANIA = 8,       /* Albania */
+	CTRY_ALGERIA = 12,      /* Algeria */
+	CTRY_ARGENTINA = 32,    /* Argentina */
+	CTRY_ARMENIA = 51,      /* Armenia */
+	CTRY_AUSTRALIA = 36,    /* Australia */
+	CTRY_AUSTRIA = 40,      /* Austria */
+	CTRY_AZERBAIJAN = 31,   /* Azerbaijan */
+	CTRY_BAHAMAS = 44,      /* Bahamas */
+	CTRY_BAHRAIN = 48,      /* Bahrain */
+	CTRY_BANGLADESH = 50,   /* Bangladesh */
+	CTRY_BARBADOS = 52,     /* Barbados */
+	CTRY_BELARUS = 112,     /* Belarus */
+	CTRY_BELGIUM = 56,      /* Belgium */
+	CTRY_BELIZE = 84,       /* Belize */
+	CTRY_BERMUDA = 60,      /* Berumuda */
+	CTRY_BOLIVIA = 68,      /* Bolivia */
+	CTRY_BOSNIA_HERZ = 70,  /* Bosnia and Herzegowina */
+	CTRY_BRAZIL = 76,       /* Brazil */
+	CTRY_BRUNEI_DARUSSALAM = 96,    /* Brunei Darussalam */
+	CTRY_BULGARIA = 100,    /* Bulgaria */
+	CTRY_CAMBODIA = 116,    /* Cambodia */
+	CTRY_CANADA = 124,      /* Canada */
+	CTRY_CHILE = 152,       /* Chile */
+	CTRY_CHINA = 156,       /* People's Republic of China */
+	CTRY_COLOMBIA = 170,    /* Colombia */
+	CTRY_COSTA_RICA = 188,  /* Costa Rica */
+	CTRY_CROATIA = 191,     /* Croatia */
+	CTRY_CYPRUS = 196,
+	CTRY_CZECH = 203,       /* Czech Republic */
+	CTRY_DENMARK = 208,     /* Denmark */
+	CTRY_DOMINICAN_REPUBLIC = 214,  /* Dominican Republic */
+	CTRY_ECUADOR = 218,     /* Ecuador */
+	CTRY_EGYPT = 818,       /* Egypt */
+	CTRY_EL_SALVADOR = 222, /* El Salvador */
+	CTRY_ESTONIA = 233,     /* Estonia */
+	CTRY_FAEROE_ISLANDS = 234,      /* Faeroe Islands */
+	CTRY_FINLAND = 246,     /* Finland */
+	CTRY_FRANCE = 250,      /* France */
+	CTRY_GEORGIA = 268,     /* Georgia */
+	CTRY_GERMANY = 276,     /* Germany */
+	CTRY_GREECE = 300,      /* Greece */
+	CTRY_GREENLAND = 304,   /* Greenland */
+	CTRY_GRENADA = 308,     /* Grenada */
+	CTRY_GUAM = 316,        /* Guam */
+	CTRY_GUATEMALA = 320,   /* Guatemala */
+	CTRY_HAITI = 332,       /* Haiti */
+	CTRY_HONDURAS = 340,    /* Honduras */
+	CTRY_HONG_KONG = 344,   /* Hong Kong S.A.R., P.R.C. */
+	CTRY_HUNGARY = 348,     /* Hungary */
+	CTRY_ICELAND = 352,     /* Iceland */
+	CTRY_INDIA = 356,       /* India */
+	CTRY_INDONESIA = 360,   /* Indonesia */
+	CTRY_IRAN = 364,        /* Iran */
+	CTRY_IRAQ = 368,        /* Iraq */
+	CTRY_IRELAND = 372,     /* Ireland */
+	CTRY_ISRAEL = 376,      /* Israel */
+	CTRY_ITALY = 380,       /* Italy */
+	CTRY_JAMAICA = 388,     /* Jamaica */
+	CTRY_JAPAN = 392,       /* Japan */
+	CTRY_JORDAN = 400,      /* Jordan */
+	CTRY_KAZAKHSTAN = 398,  /* Kazakhstan */
+	CTRY_KENYA = 404,       /* Kenya */
+	CTRY_KOREA_NORTH = 408, /* North Korea */
+	CTRY_KOREA_ROC = 410,   /* South Korea */
+	CTRY_KOREA_ROC3 = 412,  /* South Korea */
+	CTRY_KUWAIT = 414,      /* Kuwait */
+	CTRY_LATVIA = 428,      /* Latvia */
+	CTRY_LEBANON = 422,     /* Lebanon */
+	CTRY_LIBYA = 434,       /* Libya */
+	CTRY_LIECHTENSTEIN = 438,       /* Liechtenstein */
+	CTRY_LITHUANIA = 440,   /* Lithuania */
+	CTRY_LUXEMBOURG = 442,  /* Luxembourg */
+	CTRY_MACAU = 446,       /* Macau SAR */
+	CTRY_MACEDONIA = 807,   /* the Former Yugoslav Republic of Macedonia */
+	CTRY_MALAYSIA = 458,    /* Malaysia */
+	CTRY_MALDIVES = 462,    /* Maldives */
+	CTRY_MALTA = 470,       /* Malta */
+	CTRY_MAURITIUS = 480,   /* Mauritius */
+	CTRY_MEXICO = 484,      /* Mexico */
+	CTRY_MONACO = 492,      /* Principality of Monaco */
+	CTRY_MOROCCO = 504,     /* Morocco */
+	CTRY_NEPAL = 524,       /* Nepal */
+	CTRY_NETHERLANDS = 528, /* Netherlands */
+	CTRY_NETHERLANDS_ANTILLES = 530,        /* Netherlands-Antilles */
+	CTRY_ARUBA = 533,       /* Aruba */
+	CTRY_NEW_ZEALAND = 554, /* New Zealand */
+	CTRY_NICARAGUA = 558,   /* Nicaragua */
+	CTRY_NORWAY = 578,      /* Norway */
+	CTRY_OMAN = 512,        /* Oman */
+	CTRY_PAKISTAN = 586,    /* Islamic Republic of Pakistan */
+	CTRY_PANAMA = 591,      /* Panama */
+	CTRY_PAPUA_NEW_GUINEA = 598,    /* Papua New Guinea */
+	CTRY_PARAGUAY = 600,    /* Paraguay */
+	CTRY_PERU = 604,        /* Peru */
+	CTRY_PHILIPPINES = 608, /* Republic of the Philippines */
+	CTRY_POLAND = 616,      /* Poland */
+	CTRY_PORTUGAL = 620,    /* Portugal */
+	CTRY_PUERTO_RICO = 630, /* Puerto Rico */
+	CTRY_QATAR = 634,       /* Qatar */
+	CTRY_ROMANIA = 642,     /* Romania */
+	CTRY_RUSSIA = 643,      /* Russia */
+	CTRY_RWANDA = 646,      /* Rwanda */
+	CTRY_SAUDI_ARABIA = 682,        /* Saudi Arabia */
+	CTRY_SERBIA = 688,      /* Republic of Serbia */
+	CTRY_MONTENEGRO = 499,  /* Montenegro */
+	CTRY_SINGAPORE = 702,   /* Singapore */
+	CTRY_SLOVAKIA = 703,    /* Slovak Republic */
+	CTRY_SLOVENIA = 705,    /* Slovenia */
+	CTRY_SOUTH_AFRICA = 710,        /* South Africa */
+	CTRY_SPAIN = 724,       /* Spain */
+	CTRY_SRI_LANKA = 144,   /* Sri Lanka */
+	CTRY_SWEDEN = 752,      /* Sweden */
+	CTRY_SWITZERLAND = 756, /* Switzerland */
+	CTRY_SYRIA = 760,       /* Syria */
+	CTRY_TAIWAN = 158,      /* Taiwan */
+	CTRY_TANZANIA = 834,    /* Tanzania */
+	CTRY_THAILAND = 764,    /* Thailand */
+	CTRY_TRINIDAD_Y_TOBAGO = 780,   /* Trinidad y Tobago */
+	CTRY_TUNISIA = 788,     /* Tunisia */
+	CTRY_TURKEY = 792,      /* Turkey */
+	CTRY_UAE = 784,         /* U.A.E. */
+	CTRY_UGANDA = 800,      /* Uganda */
+	CTRY_UKRAINE = 804,     /* Ukraine */
+	CTRY_UNITED_KINGDOM = 826,      /* United Kingdom */
+	CTRY_UNITED_STATES = 840,       /* United States */
+	CTRY_UNITED_STATES2 = 841,      /* United States for AP */
+	CTRY_UNITED_STATES_FCC49 = 842, /* United States (Public Safety) */
+	CTRY_URUGUAY = 858,     /* Uruguay */
+	CTRY_UZBEKISTAN = 860,  /* Uzbekistan */
+	CTRY_VENEZUELA = 862,   /* Venezuela */
+	CTRY_VIET_NAM = 704,    /* Viet Nam */
+	CTRY_YEMEN = 887,       /* Yemen */
+	CTRY_ZIMBABWE = 716,    /* Zimbabwe */
+
+	/*
+	** Japan special codes.  Boy, do they have a lot
+	*/
+
+	CTRY_JAPAN1 = 393,      /* Japan (JP1) */
+	CTRY_JAPAN2 = 394,      /* Japan (JP0) */
+	CTRY_JAPAN3 = 395,      /* Japan (JP1-1) */
+	CTRY_JAPAN4 = 396,      /* Japan (JE1) */
+	CTRY_JAPAN5 = 397,      /* Japan (JE2) */
+	CTRY_JAPAN6 = 4006,     /* Japan (JP6) */
+	CTRY_JAPAN7 = 4007,     /* Japan (J7) */
+	CTRY_JAPAN8 = 4008,     /* Japan (J8) */
+	CTRY_JAPAN9 = 4009,     /* Japan (J9) */
+	CTRY_JAPAN10 = 4010,    /* Japan (J10) */
+	CTRY_JAPAN11 = 4011,    /* Japan (J11) */
+	CTRY_JAPAN12 = 4012,    /* Japan (J12) */
+	CTRY_JAPAN13 = 4013,    /* Japan (J13) */
+	CTRY_JAPAN14 = 4014,    /* Japan (J14) */
+	CTRY_JAPAN15 = 4015,    /* Japan (J15) */
+	CTRY_JAPAN16 = 4016,    /* Japan (J16) */
+	CTRY_JAPAN17 = 4017,    /* Japan (J17) */
+	CTRY_JAPAN18 = 4018,    /* Japan (J18) */
+	CTRY_JAPAN19 = 4019,    /* Japan (J19) */
+	CTRY_JAPAN20 = 4020,    /* Japan (J20) */
+	CTRY_JAPAN21 = 4021,    /* Japan (J21) */
+	CTRY_JAPAN22 = 4022,    /* Japan (J22) */
+	CTRY_JAPAN23 = 4023,    /* Japan (J23) */
+	CTRY_JAPAN24 = 4024,    /* Japan (J24) */
+	CTRY_JAPAN25 = 4025,    /* Japan (J25) */
+	CTRY_JAPAN26 = 4026,    /* Japan (J26) */
+	CTRY_JAPAN27 = 4027,    /* Japan (J27) */
+	CTRY_JAPAN28 = 4028,    /* Japan (J28) */
+	CTRY_JAPAN29 = 4029,    /* Japan (J29) */
+	CTRY_JAPAN30 = 4030,    /* Japan (J30) */
+	CTRY_JAPAN31 = 4031,    /* Japan (J31) */
+	CTRY_JAPAN32 = 4032,    /* Japan (J32) */
+	CTRY_JAPAN33 = 4033,    /* Japan (J33) */
+	CTRY_JAPAN34 = 4034,    /* Japan (J34) */
+	CTRY_JAPAN35 = 4035,    /* Japan (J35) */
+	CTRY_JAPAN36 = 4036,    /* Japan (J36) */
+	CTRY_JAPAN37 = 4037,    /* Japan (J37) */
+	CTRY_JAPAN38 = 4038,    /* Japan (J38) */
+	CTRY_JAPAN39 = 4039,    /* Japan (J39) */
+	CTRY_JAPAN40 = 4040,    /* Japan (J40) */
+	CTRY_JAPAN41 = 4041,    /* Japan (J41) */
+	CTRY_JAPAN42 = 4042,    /* Japan (J42) */
+	CTRY_JAPAN43 = 4043,    /* Japan (J43) */
+	CTRY_JAPAN44 = 4044,    /* Japan (J44) */
+	CTRY_JAPAN45 = 4045,    /* Japan (J45) */
+	CTRY_JAPAN46 = 4046,    /* Japan (J46) */
+	CTRY_JAPAN47 = 4047,    /* Japan (J47) */
+	CTRY_JAPAN48 = 4048,    /* Japan (J48) */
+	CTRY_JAPAN49 = 4049,    /* Japan (J49) */
+	CTRY_JAPAN50 = 4050,    /* Japan (J50) */
+	CTRY_JAPAN51 = 4051,    /* Japan (J51) */
+	CTRY_JAPAN52 = 4052,    /* Japan (J52) */
+	CTRY_JAPAN53 = 4053,    /* Japan (J53) */
+	CTRY_JAPAN54 = 4054,    /* Japan (J54) */
+	CTRY_JAPAN55 = 4055,    /* Japan (J55) */
+	CTRY_JAPAN56 = 4056,    /* Japan (J56) */
+	CTRY_JAPAN57 = 4057,    /* Japan (J57) */
+	CTRY_JAPAN58 = 4058,    /* Japan (J58) */
+	CTRY_JAPAN59 = 4059,    /* Japan (J59) */
+
+	/*
+	** "Special" codes for multiply defined countries, with the exception
+	** of Japan and US.
+	*/
+
+	CTRY_AUSTRALIA2 = 5000, /* Australia for AP only */
+	CTRY_CANADA2 = 5001,    /* Canada for AP only */
+	CTRY_BELGIUM2 = 5002    /* Belgium/Cisco implementation */
+};
+
+int32_t cds_fill_some_regulatory_info(struct regulatory *reg);
+void cds_fill_and_send_ctl_to_fw(struct regulatory *reg);
+int32_t cds_get_country_from_alpha2(uint8_t *alpha2);
+void cds_fill_send_ctl_info_to_fw(struct regulatory *reg, uint32_t modesAvail,
+				  uint32_t modeSelect);
+void cds_set_wma_dfs_region(struct regulatory *reg);
+
+#endif /* REGULATORY_H */

+ 2218 - 0
core/cds/inc/cds_regdomain_common.h

@@ -0,0 +1,2218 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * Copyright (c) 2005-2011 Atheros Communications, Inc.
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD: release/9.0.0/sys/dev/ath/ath_hal/ah_regdomain/ah_rd_regenum.h 224226 2011-07-20 12:46:58Z adrian $
+ */
+/*
+ * This module contains the common regulatory domain database tables:
+ *
+ *     - reg domain enum constants
+ *     - reg domain enum to reg domain pair mappings
+ *     - country to regdomain mappings
+ *     - channel tag enums and the frequency-to-frequency band mappings
+ *       for all the modes
+ *
+ * "The country table and respective Regulatory Domain channel and power
+ * settings are based on available knowledge as of software release. The
+ * underlying global regulatory and spectrum rules change on a regular basis,
+ * therefore, no warranty is given that the channel and power information
+ * herein is complete, accurate or up to date.  Developers are responsible
+ * for regulatory compliance of end-products developed using the enclosed
+ * data per all applicable national requirements.  Furthermore, data in this
+ * table does not guarantee that spectrum is available and that regulatory
+ * approval is possible in every case. Knowldegable regulatory compliance
+ * or government contacts should be consulted by the manufacturer to ensure
+ * that the most current and accurate settings are used in each end-product.
+ * This table was designed so that developers are able to update the country
+ * table mappings as well as the Regulatory Domain definitions in order to
+ * incorporate the most current channel and power settings in the end-product."
+ *
+ */
+
+/* Enumerated Regulatory Domain Information 8 bit values indicate that
+ * the regdomain is really a pair of unitary regdomains.  12 bit values
+ * are the real unitary regdomains and are the only ones which have the
+ * frequency bitmasks and flags set.
+ */
+
+#include "cds_ieee80211_common.h"
+#include <a_types.h>
+#include "wlan_defs.h"
+
+#define MAX_CHANNELS_PER_OPERATING_CLASS  25
+
+enum EnumRd {
+	/*
+	 * The following regulatory domain definitions are
+	 * found in the EEPROM. Each regulatory domain
+	 * can operate in either a 5GHz or 2.4GHz wireless mode or
+	 * both 5GHz and 2.4GHz wireless modes.
+	 * In general, the value holds no special
+	 * meaning and is used to decode into either specific
+	 * 2.4GHz or 5GHz wireless mode for that particular
+	 * regulatory domain.
+	 */
+	NO_ENUMRD = 0x00,
+	NULL1_WORLD = 0x03,     /* For 11b-only countries (no 11a allowed) */
+	NULL1_ETSIB = 0x07,     /* Israel */
+	NULL1_ETSIC = 0x08,
+	FCC1_FCCA = 0x10,       /* USA */
+	FCC1_WORLD = 0x11,      /* Hong Kong */
+	FCC4_FCCA = 0x12,       /* USA - Public Safety */
+	FCC5_FCCA = 0x13,       /* US with no DFS (UNII-1 + UNII-3 Only) */
+	FCC6_FCCA = 0x14,       /* Canada for AP only */
+
+	FCC2_FCCA = 0x20,       /* Canada */
+	FCC2_WORLD = 0x21,      /* Australia & HK */
+	FCC2_ETSIC = 0x22,
+	FCC6_WORLD = 0x23,      /* Australia for AP only */
+	FRANCE_RES = 0x31,      /* Legacy France for OEM */
+	FCC3_FCCA = 0x3A,       /* USA & Canada w/5470 band, 11h, DFS enabled */
+	FCC3_WORLD = 0x3B,      /* USA & Canada w/5470 band, 11h, DFS enabled */
+	FCC3_ETSIC = 0x3F,      /* New Zealand, DFS enabled */
+
+	ETSI1_WORLD = 0x37,
+	ETSI3_ETSIA = 0x32,     /* France (optional) */
+	ETSI2_WORLD = 0x35,     /* Hungary & others */
+	ETSI3_WORLD = 0x36,     /* France & others */
+	ETSI4_WORLD = 0x30,
+	ETSI4_ETSIC = 0x38,
+	ETSI5_WORLD = 0x39,
+	ETSI6_WORLD = 0x34,     /* Bulgaria */
+	ETSI8_WORLD = 0x3D,     /* Russia */
+	ETSI9_WORLD = 0x3E,     /* Ukraine */
+	ETSI_RESERVED = 0x33,   /* Reserved (Do not used) */
+
+	MKK1_MKKA = 0x40,       /* Japan (JP1) */
+	MKK1_MKKB = 0x41,       /* Japan (JP0) */
+	APL4_WORLD = 0x42,      /* Singapore and Morocco */
+	MKK2_MKKA = 0x43,       /* Japan with 4.9G channels */
+	APL_RESERVED = 0x44,    /* Reserved (Do not used)  */
+	APL2_WORLD = 0x45,      /* Korea */
+	APL2_APLC = 0x46,
+	APL3_WORLD = 0x47,
+	MKK1_FCCA = 0x48,       /* Japan (JP1-1) */
+	APL2_APLD = 0x49,       /* Korea with 2.3G channels */
+	MKK1_MKKA1 = 0x4A,      /* Japan (JE1) */
+	MKK1_MKKA2 = 0x4B,      /* Japan (JE2) */
+	MKK1_MKKC = 0x4C,       /* Japan (MKK1_MKKA,except Ch14) */
+	APL2_FCCA = 0x4D,       /* Mobile customer */
+	APL11_FCCA = 0x4F,      /* Specific AP Customer 5GHz, For APs Only */
+
+	APL3_FCCA = 0x50,
+	APL12_WORLD = 0x51,
+	APL1_WORLD = 0x52,      /* Latin America */
+	APL1_FCCA = 0x53,
+	APL1_APLA = 0x54,
+	APL1_ETSIC = 0x55,
+	APL2_ETSIC = 0x56,      /* Venezuela */
+	APL5_WORLD = 0x58,      /* Chile */
+	APL13_WORLD = 0x5A,     /* Algeria */
+	APL6_WORLD = 0x5B,      /* Singapore */
+	APL7_FCCA = 0x5C,       /* Taiwan 5.47 Band */
+	APL8_WORLD = 0x5D,      /* Malaysia 5GHz */
+	APL9_MKKC = 0x5E,      /* Korea 5GHz, Before 11/2007. Now used only by APs */
+	APL10_MKKC = 0x5F,     /* Korea 5GHz, After 11/2007. For STAs only */
+
+	/*
+	 * World mode SKUs
+	 */
+	WOR0_WORLD = 0x60,      /* World0 (WO0 SKU) */
+	WOR1_WORLD = 0x61,      /* World1 (WO1 SKU) */
+	WOR2_WORLD = 0x62,      /* World2 (WO2 SKU) */
+	WOR3_WORLD = 0x63,      /* World3 (WO3 SKU) */
+	WOR4_WORLD = 0x64,      /* World4 (WO4 SKU) */
+	WOR5_ETSIC = 0x65,      /* World5 (WO5 SKU) */
+
+	WOR01_WORLD = 0x66,     /* World0-1 (WW0-1 SKU) */
+	WOR02_WORLD = 0x67,     /* World0-2 (WW0-2 SKU) */
+	EU1_WORLD = 0x68,       /* Same as World0-2 (WW0-2 SKU), except active scan ch1-13. No ch14 */
+
+	WOR9_WORLD = 0x69,      /* World9 (WO9 SKU) */
+	WORA_WORLD = 0x6A,      /* WorldA (WOA SKU) */
+	WORB_WORLD = 0x6B,      /* WorldB (WOB SKU) */
+	WORC_WORLD = 0x6C,      /* WorldC (WOC SKU) */
+
+	MKK3_MKKB = 0x80,       /* Japan UNI-1 even + MKKB */
+	MKK3_MKKA2 = 0x81,      /* Japan UNI-1 even + MKKA2 */
+	MKK3_MKKC = 0x82,       /* Japan UNI-1 even + MKKC */
+
+	MKK4_MKKB = 0x83,       /* Japan UNI-1 even + UNI-2 + MKKB */
+	MKK4_MKKA2 = 0x84,      /* Japan UNI-1 even + UNI-2 + MKKA2 */
+	MKK4_MKKC = 0x85,       /* Japan UNI-1 even + UNI-2 + MKKC */
+
+	MKK5_MKKB = 0x86,       /* Japan UNI-1 even + UNI-2 + mid-band + MKKB */
+	MKK5_MKKA2 = 0x87,      /* Japan UNI-1 even + UNI-2 + mid-band + MKKA2 */
+	MKK5_MKKC = 0x88,       /* Japan UNI-1 even + UNI-2 + mid-band + MKKC */
+	MKK5_FCCA = 0x9A,
+
+	MKK6_MKKB = 0x89,       /* Japan UNI-1 even + UNI-1 odd MKKB */
+	MKK6_MKKA2 = 0x8A,      /* Japan UNI-1 even + UNI-1 odd + MKKA2 */
+	MKK6_MKKC = 0x8B,       /* Japan UNI-1 even + UNI-1 odd + MKKC */
+
+	MKK7_MKKB = 0x8C,       /* Japan UNI-1 even + UNI-1 odd + UNI-2 + MKKB */
+	MKK7_MKKA2 = 0x8D,      /* Japan UNI-1 even + UNI-1 odd + UNI-2 + MKKA2 */
+	MKK7_MKKC = 0x8E,       /* Japan UNI-1 even + UNI-1 odd + UNI-2 + MKKC */
+
+	MKK8_MKKB = 0x8F,       /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + MKKB */
+	MKK8_MKKA2 = 0x90,      /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + MKKA2 */
+	MKK8_MKKC = 0x91,       /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + MKKC */
+
+	MKK14_MKKA1 = 0x92,     /* Japan UNI-1 even + UNI-1 odd + 4.9GHz + MKKA1 */
+	MKK15_MKKA1 = 0x93,     /* Japan UNI-1 even + UNI-1 odd + UNI-2 + 4.9GHz + MKKA1 */
+
+	MKK10_FCCA = 0xD0,      /* Japan UNI-1 even + UNI-2 + 4.9GHz + FCCA */
+	MKK10_MKKA1 = 0xD1,     /* Japan UNI-1 even + UNI-2 + 4.9GHz + MKKA1 */
+	MKK10_MKKC = 0xD2,      /* Japan UNI-1 even + UNI-2 + 4.9GHz + MKKC */
+	MKK10_MKKA2 = 0xD3,     /* Japan UNI-1 even + UNI-2 + 4.9GHz + MKKA2 */
+
+	MKK11_MKKA = 0xD4,      /* Japan UNI-1 even + UNI-2 + mid-band + 4.9GHz + MKKA */
+	MKK11_FCCA = 0xD5,      /* Japan UNI-1 even + UNI-2 + mid-band + 4.9GHz + FCCA */
+	MKK11_MKKA1 = 0xD6,     /* Japan UNI-1 even + UNI-2 + mid-band + 4.9GHz + MKKA1 */
+	MKK11_MKKC = 0xD7,      /* Japan UNI-1 even + UNI-2 + mid-band + 4.9GHz + MKKC */
+	MKK11_MKKA2 = 0xD8,     /* Japan UNI-1 even + UNI-2 + mid-band + 4.9GHz + MKKA2 */
+
+	MKK12_MKKA = 0xD9,      /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + 4.9GHz + MKKA */
+	MKK12_FCCA = 0xDA,      /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + 4.9GHz + FCCA */
+	MKK12_MKKA1 = 0xDB,     /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + 4.9GHz + MKKA1 */
+	MKK12_MKKC = 0xDC,      /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + 4.9GHz + MKKC */
+	MKK12_MKKA2 = 0xDD,     /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + 4.9GHz + MKKA2 */
+
+	MKK13_MKKB = 0xDE,      /* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + MKKB + All passive + no adhoc */
+
+	/* Following definitions are used only by s/w to map old
+	 * Japan SKUs.
+	 */
+	MKK3_MKKA = 0xF0,       /* Japan UNI-1 even + MKKA */
+	MKK3_MKKA1 = 0xF1,      /* Japan UNI-1 even + MKKA1 */
+	MKK3_FCCA = 0xF2,       /* Japan UNI-1 even + FCCA */
+	MKK4_MKKA = 0xF3,       /* Japan UNI-1 even + UNI-2 + MKKA */
+	MKK4_MKKA1 = 0xF4,      /* Japan UNI-1 even + UNI-2 + MKKA1 */
+	MKK4_FCCA = 0xF5,       /* Japan UNI-1 even + UNI-2 + FCCA */
+	MKK9_MKKA = 0xF6,       /* Japan UNI-1 even + 4.9GHz */
+	MKK10_MKKA = 0xF7,      /* Japan UNI-1 even + UNI-2 + 4.9GHz */
+	MKK6_MKKA1 = 0xF8,      /* Japan UNI-1 even + UNI-1 odd + UNI-2 + MKKA1 */
+	MKK6_FCCA = 0xF9,       /* Japan UNI-1 even + UNI-1 odd + UNI-2 + FCCA */
+	MKK7_MKKA1 = 0xFA,      /* Japan UNI-1 even + UNI-1 odd + UNI-2 + MKKA1 */
+	MKK7_FCCA = 0xFB,       /* Japan UNI-1 even + UNI-1 odd + UNI-2 + FCCA */
+	MKK9_FCCA = 0xFC,       /* Japan UNI-1 even + 4.9GHz + FCCA */
+	MKK9_MKKA1 = 0xFD,      /* Japan UNI-1 even + 4.9GHz + MKKA1 */
+	MKK9_MKKC = 0xFE,       /* Japan UNI-1 even + 4.9GHz + MKKC */
+	MKK9_MKKA2 = 0xFF,      /* Japan UNI-1 even + 4.9GHz + MKKA2 */
+
+	/*
+	 * Regulator domains ending in a number (e.g. APL1,
+	 * MK1, ETSI4, etc) apply to 5GHz channel and power
+	 * information.  Regulator domains ending in a letter
+	 * (e.g. APLA, FCCA, etc) apply to 2.4GHz channel and
+	 * power information.
+	 */
+	APL1 = 0x0150,          /* LAT & Asia */
+	APL2 = 0x0250,          /* LAT & Asia */
+	APL3 = 0x0350,          /* Taiwan */
+	APL4 = 0x0450,          /* Jordan */
+	APL5 = 0x0550,          /* Chile */
+	APL6 = 0x0650,          /* Singapore */
+	APL7 = 0x0750,          /* Taiwan, disable ch52 */
+	APL8 = 0x0850,          /* Malaysia */
+	APL9 = 0x0950,          /* Korea. Before 11/2007. Now used only by APs */
+	APL10 = 0x1050,         /* Korea. After 11/2007. For STAs only */
+	APL11 = 0x1150,         /* Specific AP Customer 5GHz, For APs Only */
+	APL12 = 0x1160,         /* Kenya */
+
+	ETSI1 = 0x0130,         /* Europe & others */
+	ETSI2 = 0x0230,         /* Europe & others */
+	ETSI3 = 0x0330,         /* Europe & others */
+	ETSI4 = 0x0430,         /* Europe & others */
+	ETSI5 = 0x0530,         /* Europe & others */
+	ETSI6 = 0x0630,         /* Europe & others */
+	ETSI8 = 0x0830,         /* Russia */
+	ETSI9 = 0x0930,         /* Ukraine */
+	ETSIA = 0x0A30,         /* France */
+	ETSIB = 0x0B30,         /* Israel */
+	ETSIC = 0x0C30,         /* Latin America */
+
+	FCC1 = 0x0110,          /* US & others */
+	FCC2 = 0x0120,          /* Canada, Australia & New Zealand */
+	FCC3 = 0x0160,          /* US w/new middle band & DFS */
+	FCC4 = 0x0165,          /* US Public Safety */
+	FCC5 = 0x0510,
+	FCC6 = 0x0610,          /* Canada & Australia */
+	FCCA = 0x0A10,
+
+	APLD = 0x0D50,          /* South Korea */
+
+	MKK1 = 0x0140,          /* Japan (UNI-1 odd) */
+	MKK2 = 0x0240,          /* Japan (4.9 GHz + UNI-1 odd) */
+	MKK3 = 0x0340,          /* Japan (UNI-1 even) */
+	MKK4 = 0x0440,          /* Japan (UNI-1 even + UNI-2) */
+	MKK5 = 0x0540,          /* Japan (UNI-1 even + UNI-2 + mid-band) */
+	MKK6 = 0x0640,          /* Japan (UNI-1 odd + UNI-1 even) */
+	MKK7 = 0x0740,          /* Japan (UNI-1 odd + UNI-1 even + UNI-2 */
+	MKK8 = 0x0840,          /* Japan (UNI-1 odd + UNI-1 even + UNI-2 + mid-band) */
+	MKK9 = 0x0940,          /* Japan (UNI-1 even + 4.9 GHZ) */
+	MKK10 = 0x0B40,         /* Japan (UNI-1 even + UNI-2 + 4.9 GHZ) */
+	MKK11 = 0x1140,         /* Japan (UNI-1 even + UNI-2 + 4.9 GHZ) */
+	MKK12 = 0x1240,         /* Japan (UNI-1 even + UNI-2 + 4.9 GHZ) */
+	MKK13 = 0x0C40,         /* Same as MKK8 but all passive and no adhoc 11a */
+	MKK14 = 0x1440,         /* Japan UNI-1 even + UNI-1 odd + 4.9GHz */
+	MKK15 = 0x1540,         /* Japan UNI-1 even + UNI-1 odd + UNI-2 + 4.9GHz */
+	MKKA = 0x0A40,          /* Japan */
+	MKKC = 0x0A50,
+
+	NULL1 = 0x0198,
+	WORLD = 0x0199,
+	DEBUG_REG_DMN = 0x01ff,
+};
+
+enum {                          /* conformance test limits */
+	FCC = 0x10,
+	MKK = 0x40,
+	ETSI = 0x30,
+};
+/*
+ * The following are flags for different requirements per reg domain.
+ * These requirements are either inhereted from the reg domain pair or
+ * from the unitary reg domain if the reg domain pair flags value is
+ * 0
+ */
+
+enum {
+	NO_REQ = 0x00000000,
+	DISALLOW_ADHOC_11A = 0x00000001,
+	DISALLOW_ADHOC_11A_TURB = 0x00000002,
+	NEED_NFC = 0x00000004,
+
+	ADHOC_PER_11D = 0x00000008,     /* Start Ad-Hoc mode */
+	ADHOC_NO_11A = 0x00000010,
+
+	PUBLIC_SAFETY_DOMAIN = 0x00000020,      /* public safety domain */
+	LIMIT_FRAME_4MS = 0x00000040,   /* 4msec limit on the frame length */
+
+	NO_HOSTAP = 0x00000080, /* No HOSTAP mode opereation */
+
+	REQ_MASK = 0x000000FF,  /* Requirements bit mask */
+};
+
+static const REG_DMN_PAIR_MAPPING ah_cmn_reg_domain_pairs[] = {
+	{NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN, NO_REQ, NO_REQ, PSCAN_DEFER,
+	 0},
+	{NULL1_WORLD, NULL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{NULL1_ETSIB, NULL1, ETSIB, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{NULL1_ETSIC, NULL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+
+	{FCC2_FCCA, FCC2, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC2_WORLD, FCC2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC2_ETSIC, FCC2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC3_FCCA, FCC3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC3_WORLD, FCC3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC3_ETSIC, FCC3, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC4_FCCA, FCC4, FCCA, DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 NO_REQ, PSCAN_DEFER, 0},
+	{FCC5_FCCA, FCC5, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC6_FCCA, FCC6, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC6_WORLD, FCC6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+
+	{ETSI1_WORLD, ETSI1, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{ETSI2_WORLD, ETSI2, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{ETSI3_WORLD, ETSI3, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{ETSI4_WORLD, ETSI4, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{ETSI5_WORLD, ETSI5, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{ETSI6_WORLD, ETSI6, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{ETSI8_WORLD, ETSI8, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{ETSI9_WORLD, ETSI9, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+
+	{ETSI3_ETSIA, ETSI3, WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{FRANCE_RES, ETSI3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+
+	{FCC1_WORLD, FCC1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{FCC1_FCCA, FCC1, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL1_WORLD, APL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL2_WORLD, APL2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL2_FCCA, APL2, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL3_WORLD, APL3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL4_WORLD, APL4, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL5_WORLD, APL5, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL6_WORLD, APL6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL7_FCCA, APL7, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL8_WORLD, APL8, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL9_MKKC, APL9, MKKC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL10_MKKC, APL10, MKKC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL3_FCCA, APL3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL1_ETSIC, APL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{APL2_ETSIC, APL2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+
+	{MKK1_MKKA, MKK1, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKKA, CTRY_JAPAN},
+	{MKK1_MKKB, MKK1, MKKA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+	 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G,
+	 CTRY_JAPAN1},
+	{MKK1_FCCA, MKK1, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1, CTRY_JAPAN2},
+	{MKK1_MKKA1, MKK1, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN4},
+	{MKK1_MKKA2, MKK1, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN5},
+	{MKK1_MKKC, MKK1, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1, CTRY_JAPAN6},
+
+	/* MKK2 */
+	{MKK2_MKKA, MKK2, MKKA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+	 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKKA | PSCAN_MKKA_G,
+	 CTRY_JAPAN3},
+
+	/* MKK3 */
+	{MKK3_MKKA, MKK3, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKKA, CTRY_JAPAN25},
+	{MKK3_MKKB, MKK3, MKKA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+	 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN7},
+	{MKK3_MKKA1, MKK3, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN26},
+	{MKK3_MKKA2, MKK3, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN8},
+	{MKK3_MKKC, MKK3, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 NO_PSCAN, CTRY_JAPAN9},
+	{MKK3_FCCA, MKK3, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 NO_PSCAN, CTRY_JAPAN27},
+
+	/* MKK4 */
+	{MKK4_MKKA, MKK4, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN36},
+	{MKK4_MKKB, MKK4, MKKA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+	 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
+	 CTRY_JAPAN10},
+	{MKK4_MKKA1, MKK4, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN28},
+	{MKK4_MKKA2, MKK4, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11},
+	{MKK4_MKKC, MKK4, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN12},
+	{MKK4_FCCA, MKK4, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN29},
+
+	/* MKK5 */
+/*	{MKK5_MKKA,     MKK5,           MKKA,           DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3, CTRY_JAPAN56 },*/
+	{MKK5_MKKB, MKK5, MKKA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+	 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
+	 CTRY_JAPAN13},
+	{MKK5_MKKA2, MKK5, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN14},
+	{MKK5_MKKC, MKK5, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN15},
+	{MKK5_FCCA,     MKK5,       FCCA,       DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3, CTRY_JAPAN56 },
+
+	/* MKK6 */
+	{MKK6_MKKB, MKK6, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN16},
+	{MKK6_MKKA1, MKK6, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN30},
+	{MKK6_MKKA2, MKK6, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN17},
+	{MKK6_MKKC, MKK6, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1, CTRY_JAPAN18},
+	{MKK6_FCCA, MKK6, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1, CTRY_JAPAN31},
+
+	/* MKK7 */
+	{MKK7_MKKB, MKK7, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN19},
+	{MKK7_MKKA1, MKK7, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN32},
+	{MKK7_MKKA2, MKK7, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN20},
+	{MKK7_MKKC, MKK7, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN21},
+	{MKK7_FCCA, MKK7, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN33},
+
+	/* MKK8 */
+	{MKK8_MKKB, MKK8, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN22},
+	{MKK8_MKKA2, MKK8, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN23},
+	{MKK8_MKKC, MKK8, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN24},
+
+	{MKK9_MKKA, MKK9, MKKA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+	 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3, CTRY_JAPAN34},
+	{MKK9_FCCA, MKK9, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 NO_PSCAN, CTRY_JAPAN37},
+	{MKK9_MKKA1, MKK9, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN38},
+	{MKK9_MKKA2, MKK9, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN40},
+	{MKK9_MKKC, MKK9, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 NO_PSCAN, CTRY_JAPAN39},
+
+	{MKK10_MKKA, MKK10, MKKA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+	 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3, CTRY_JAPAN35},
+	{MKK10_FCCA, MKK10, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN41},
+	{MKK10_MKKA1, MKK10, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN42},
+	{MKK10_MKKA2, MKK10, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN44},
+	{MKK10_MKKC, MKK10, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN43},
+
+	{MKK11_MKKA, MKK11, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN45},
+	{MKK11_FCCA, MKK11, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN46},
+	{MKK11_MKKA1, MKK11, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN47},
+	{MKK11_MKKA2, MKK11, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN49},
+	{MKK11_MKKC, MKK11, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK3, CTRY_JAPAN48},
+
+	{MKK12_MKKA, MKK12, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN50},
+	{MKK12_FCCA, MKK12, FCCA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN51},
+	{MKK12_MKKA1, MKK12, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN52},
+	{MKK12_MKKA2, MKK12, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN54},
+	{MKK12_MKKC, MKK12, MKKC,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN53},
+
+	{MKK13_MKKB, MKK13, MKKA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
+	 LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN57},
+
+	{MKK14_MKKA1, MKK14, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN58},
+	{MKK15_MKKA1, MKK15, MKKA,
+	 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
+	 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN59},
+
+	/* These are super domains */
+	{WOR0_WORLD, WOR0_WORLD, WOR0_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{WOR1_WORLD, WOR1_WORLD, WOR1_WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{WOR2_WORLD, WOR2_WORLD, WOR2_WORLD, DISALLOW_ADHOC_11A_TURB, NO_REQ,
+	 PSCAN_DEFER, 0},
+	{WOR3_WORLD, WOR3_WORLD, WOR3_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{WOR4_WORLD, WOR4_WORLD, WOR4_WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{WOR5_ETSIC, WOR5_ETSIC, WOR5_ETSIC,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{WOR01_WORLD, WOR01_WORLD, WOR01_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{WOR02_WORLD, WOR02_WORLD, WOR02_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{EU1_WORLD, EU1_WORLD, EU1_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+	{WOR9_WORLD, WOR9_WORLD, WOR9_WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{WORA_WORLD, WORA_WORLD, WORA_WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{WORB_WORLD, WORB_WORLD, WORB_WORLD,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, 0},
+	{WORC_WORLD, WORC_WORLD, WORC_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
+};
+
+static const COUNTRY_CODE_TO_ENUM_RD ah_cmn_all_countries[] = {
+	{CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, YES, YES, NO,
+	 NO, NO, 7000},
+	{CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, YES, YES, NO,
+	 NO, NO, 7000},
+	{CTRY_ARGENTINA, FCC3_WORLD, "AR", "ARGENTINA", YES, NO, NO, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, YES, YES,
+	 YES, NO, NO, 7000},
+	{CTRY_ARUBA, ETSI1_WORLD, "AW", "ARUBA", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_AUSTRALIA, FCC3_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_AUSTRALIA2, FCC6_WORLD, "AU", "AUSTRALIA2", YES, YES, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_AUSTRIA, ETSI1_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_BAHAMAS, FCC3_WORLD, "BS", "BAHAMAS", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, YES, YES, YES,
+	 NO, NO, 7000},
+	{CTRY_BANGLADESH, NULL1_WORLD, "BD", "BANGLADESH", YES, NO, YES, YES,
+	 YES, NO, NO, NO, 7000},
+	{CTRY_BARBADOS, FCC2_WORLD, "BB", "BARBADOS", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_BELGIUM2, ETSI4_WORLD, "BE", "BELGIUM2", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_BERMUDA, FCC3_FCCA, "BM", "BERMUDA", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLIVIA", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", "BOSNIA AND HERZEGOVINA", YES, NO,
+	 YES, YES, YES, YES, YES, YES, 7000},
+	{CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", YES, NO, NO, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_BRUNEI_DARUSSALAM, APL6_WORLD, "BN", "BRUNEI DARUSSALAM", YES,
+	 YES, YES, YES, YES, YES, YES, YES, 7000},
+	{CTRY_BULGARIA, ETSI1_WORLD, "BG", "BULGARIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_CAMBODIA, ETSI1_WORLD, "KH", "CAMBODIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_CANADA, FCC3_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_CANADA2, FCC6_FCCA, "CA", "CANADA2", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES, YES,
+	 YES, YES, NO, NO, 7000},
+	{CTRY_CROATIA, ETSI1_WORLD, "HR", "CROATIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_CYPRUS, ETSI1_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_CZECH, ETSI1_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", "DOMINICAN REPUBLIC", YES,
+	 YES, YES, YES, YES, YES, YES, YES, 7000},
+	{CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, YES, YES, YES,
+	 NO, NO, 7000},
+	{CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, YES, YES, YES,
+	 NO, NO, 7000},
+	{CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES, YES,
+	 YES, YES, NO, NO, 7000},
+	{CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_GREENLAND, ETSI1_WORLD, "GL", "GREENLAND", YES, NO, YES, YES, YES,
+	 YES, NO, NO, 7000},
+	{CTRY_GRENADA, FCC3_FCCA, "GD", "GRENADA", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_GUAM, FCC1_FCCA, "GU", "GUAM", YES, NO, YES, YES, YES, YES, NO,
+	 NO, 7000},
+	{CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_HAITI, ETSI1_WORLD, "HT", "HAITI", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_HONDURAS, FCC3_WORLD, "HN", "HONDURAS", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_HONG_KONG, FCC3_WORLD, "HK", "HONG KONG", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_HUNGARY, ETSI1_WORLD, "HU", "HUNGARY", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_INDONESIA, APL2_WORLD, "ID", "INDONESIA", YES, NO, YES, YES, YES,
+	 YES, NO, NO, 7000},
+	{CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, YES, YES,
+	 YES, 7000},
+	{CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_ISRAEL, ETSI3_WORLD, "IL", "ISRAEL", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_JAMAICA, FCC3_WORLD, "JM", "JAMAICA", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_JAPAN, MKK5_MKKA2, "JP", "JAPAN", YES, NO, NO, YES, YES, YES, NO,
+	 NO, 7000},
+	{CTRY_JAPAN1, MKK1_MKKB, "JP", "JAPAN1", YES, NO, NO, YES, YES, YES, NO,
+	 NO, 7000},
+	{CTRY_JAPAN2, MKK1_FCCA, "JP", "JAPAN2", YES, NO, NO, YES, YES, YES, NO,
+	 NO, 7000},
+	{CTRY_JAPAN3, MKK2_MKKA, "JP", "JAPAN3", YES, NO, NO, YES, YES, YES, NO,
+	 NO, 7000},
+	{CTRY_JAPAN4, MKK1_MKKA1, "JP", "JAPAN4", YES, NO, NO, YES, YES, YES,
+	 NO, NO, 7000},
+	{CTRY_JAPAN5, MKK1_MKKA2, "JP", "JAPAN5", YES, NO, NO, YES, YES, YES,
+	 NO, NO, 7000},
+	{CTRY_JAPAN6, MKK1_MKKC, "JP", "JAPAN6", YES, NO, NO, YES, YES, YES, NO,
+	 NO, 7000},
+	{CTRY_JAPAN7, MKK3_MKKB, "JP", "JAPAN7", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN8, MKK3_MKKA2, "JP", "JAPAN8", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN9, MKK3_MKKC, "JP", "JAPAN9", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN10, MKK4_MKKB, "JP", "JAPAN10", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN11, MKK4_MKKA2, "JP", "JAPAN11", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN12, MKK4_MKKC, "JP", "JAPAN12", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN13, MKK5_MKKB, "JP", "JAPAN13", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN14, MKK5_MKKA2, "JP", "JAPAN14", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN15, MKK5_MKKC, "JP", "JAPAN15", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN16, MKK6_MKKB, "JP", "JAPAN16", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN17, MKK6_MKKA2, "JP", "JAPAN17", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN18, MKK6_MKKC, "JP", "JAPAN18", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN19, MKK7_MKKB, "JP", "JAPAN19", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN20, MKK7_MKKA2, "JP", "JAPAN20", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN21, MKK7_MKKC, "JP", "JAPAN21", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN22, MKK8_MKKB, "JP", "JAPAN22", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN23, MKK8_MKKA2, "JP", "JAPAN23", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN24, MKK8_MKKC, "JP", "JAPAN24", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN25, MKK3_MKKA, "JP", "JAPAN25", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN26, MKK3_MKKA1, "JP", "JAPAN26", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN27, MKK3_FCCA, "JP", "JAPAN27", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN28, MKK4_MKKA1, "JP", "JAPAN28", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN29, MKK4_FCCA, "JP", "JAPAN29", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN30, MKK6_MKKA1, "JP", "JAPAN30", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN31, MKK6_FCCA, "JP", "JAPAN31", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN32, MKK7_MKKA1, "JP", "JAPAN32", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN33, MKK7_FCCA, "JP", "JAPAN33", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN34, MKK9_MKKA, "JP", "JAPAN34", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN35, MKK10_MKKA, "JP", "JAPAN35", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN36, MKK4_MKKA, "JP", "JAPAN36", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN37, MKK9_FCCA, "JP", "JAPAN37", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN38, MKK9_MKKA1, "JP", "JAPAN38", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN39, MKK9_MKKC, "JP", "JAPAN39", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN40, MKK9_MKKA2, "JP", "JAPAN40", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN41, MKK10_FCCA, "JP", "JAPAN41", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN42, MKK10_MKKA1, "JP", "JAPAN42", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN43, MKK10_MKKC, "JP", "JAPAN43", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN44, MKK10_MKKA2, "JP", "JAPAN44", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN45, MKK11_MKKA, "JP", "JAPAN45", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN46, MKK11_FCCA, "JP", "JAPAN46", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN47, MKK11_MKKA1, "JP", "JAPAN47", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN48, MKK11_MKKC, "JP", "JAPAN48", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN49, MKK11_MKKA2, "JP", "JAPAN49", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN50, MKK12_MKKA, "JP", "JAPAN50", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN51, MKK12_FCCA, "JP", "JAPAN51", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN52, MKK12_MKKA1, "JP", "JAPAN52", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN53, MKK12_MKKC, "JP", "JAPAN53", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN54, MKK12_MKKA2, "JP", "JAPAN54", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+/*    {CTRY_JAPAN55,     MKK5_MKKA,     "JP", "JAPAN55",        YES,  NO,  NO, YES, YES, YES, YES, NO, 7000 },*/
+	{CTRY_JAPAN56,     MKK5_FCCA,     "JP", "JAPAN56",        YES,  NO,  NO,
+	 YES, YES, YES, YES, NO, 7000 },
+	{CTRY_JAPAN57, MKK13_MKKB, "JP", "JAPAN57", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN58, MKK14_MKKA1, "JP", "JAPAN58", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JAPAN59, MKK15_MKKA1, "JP", "JAPAN59", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES, YES,
+	 YES, NO, NO, NO, 7000},
+	{CTRY_KENYA, APL1_WORLD, "KE", "KENYA", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_KOREA_NORTH, APL9_MKKC, "KP", "NORTH KOREA", YES, NO, NO, YES,
+	 NO, YES, NO, NO, 7000},
+	{CTRY_KOREA_ROC, APL10_MKKC, "KR", "KOREA REPUBLIC", YES, NO, NO, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_KOREA_ROC3, APL9_MKKC, "KR", "KOREA REPUBLIC3", YES, NO, NO, YES,
+	 NO, YES, NO, NO, 7000},
+	{CTRY_KUWAIT, ETSI3_WORLD, "KW", "KUWAIT", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_LEBANON, APL1_WORLD, "LB", "LEBANON", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO, YES,
+	 YES, YES, YES, YES, YES, 7000},
+	{CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_MACAU, FCC2_WORLD, "MO", "MACAU SAR", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_MACEDONIA, ETSI1_WORLD, "MK", "MACEDONIA, FYRO", YES, NO, YES,
+	 YES, YES, YES, YES, YES, 7000},
+	{CTRY_MALAYSIA, FCC1_WORLD, "MY", "MALAYSIA", YES, NO, NO, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_MAURITIUS, ETSI1_WORLD, "MU", "MAURITIUS", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_MEXICO, FCC1_WORLD, "MX", "MEXICO", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_MOROCCO, APL4_WORLD, "MA", "MOROCCO", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_NEPAL, APL1_WORLD, "NP", "NEPAL", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN", "NETHERLANDS ANTILLES",
+	 YES, NO, YES, YES, YES, YES, YES, YES, 7000},
+	{CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_NICARAGUA, FCC3_FCCA, "NI", "NICARAGUA", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_OMAN, FCC3_WORLD, "OM", "OMAN", YES, NO, YES, YES, YES, YES, YES,
+	 YES, 7000},
+	{CTRY_PAKISTAN, APL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG", "PAPUA NEW GUINEA", YES, YES,
+	 YES, YES, YES, YES, YES, YES, 7000},
+	{CTRY_PARAGUAY, FCC3_WORLD, "PY", "PARAGUAY", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_PERU, FCC3_WORLD, "PE", "PERU", YES, NO, YES, YES, YES, YES, YES,
+	 YES, 7000},
+	{CTRY_PHILIPPINES, FCC3_WORLD, "PH", "PHILIPPINES", YES, YES, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_QATAR, APL1_WORLD, "QA", "QATAR", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_ROMANIA, ETSI1_WORLD, "RO", "ROMANIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_RUSSIA, ETSI8_WORLD, "RU", "RUSSIA", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_RWANDA, APL1_WORLD, "RW", "RWANDA", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_SAUDI_ARABIA, FCC2_WORLD, "SA", "SAUDI ARABIA", YES, NO, YES, YES,
+	 YES, YES, NO, NO, 7000},
+	{CTRY_SERBIA, ETSI1_WORLD, "RS", "REPUBLIC OF SERBIA", YES, NO, YES,
+	 YES, YES, YES, YES, YES, 7000},
+	{CTRY_MONTENEGRO, ETSI1_WORLD, "ME", "MONTENEGRO", YES, NO, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_SINGAPORE, FCC3_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAKIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_SRI_LANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, YES, YES,
+	 YES, NO, NO, 7000},
+	{CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIAN ARAB REPUBLIC", YES, NO, YES,
+	 YES, YES, NO, NO, NO, 7000},
+	{CTRY_TAIWAN, APL7_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_TANZANIA, APL1_WORLD, "TZ", "TANZANIA", YES, YES, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_THAILAND, FCC3_WORLD, "TH", "THAILAND", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT", "TRINIDAD AND TOBAGO", YES,
+	 NO, YES, YES, YES, YES, YES, YES, 7000},
+	{CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, YES, YES,
+	 YES, NO, NO, 7000},
+	{CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, YES, YES, YES,
+	 NO, NO, 7000},
+	{CTRY_UGANDA, FCC3_WORLD, "UG", "UGANDA", YES, NO, NO, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_UKRAINE, ETSI9_WORLD, "UA", "UKRAINE", YES, NO, NO, YES, YES, YES,
+	 YES, NO, 7000},
+	{CTRY_UAE, ETSI1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", "UNITED KINGDOM", YES, NO, YES,
+	 YES, YES, YES, YES, YES, 7000},
+	{CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES, YES,
+	 YES, YES, YES, YES, YES, 5825},
+	{CTRY_UNITED_STATES2, FCC6_FCCA, "US", "UNITED STATES2", YES, YES, YES,
+	 YES, YES, YES, YES, YES, 7000},
+	{CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS",
+	 "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, YES, YES,
+	 YES, 7000},
+	{CTRY_URUGUAY, FCC3_WORLD, "UY", "URUGUAY", YES, NO, YES, YES, YES, YES,
+	 YES, YES, 7000},
+	{CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES, YES,
+	 YES, YES, YES, YES, 7000},
+	{CTRY_VENEZUELA, FCC1_WORLD, "VE", "VENEZUELA", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_VIET_NAM, ETSI3_WORLD, "VN", "VIET NAM", YES, NO, YES, YES, YES,
+	 YES, YES, YES, 7000},
+	{CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, YES, YES, NO, NO,
+	 NO, 7000},
+	{CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, YES, YES,
+	 NO, NO, NO, 7000}
+};
+
+/* Bit masks for DFS per regdomain */
+
+enum {
+	NO_DFS = 0x0000000000000000ULL,
+	DFS_FCC3 = 0x0000000000000001ULL,
+	DFS_ETSI = 0x0000000000000002ULL,
+	DFS_MKK4 = 0x0000000000000004ULL,
+};
+
+/* The table of frequency bands is indexed by a bitmask.  The ordering
+ * must be consistent with the enum below.  When adding a new
+ * frequency band, be sure to match the location in the enum with the
+ * comments
+ */
+
+/*
+ * 5GHz 11A channel tags
+ */
+enum {
+	F1_4912_4947,
+	F1_4915_4925,
+	F2_4915_4925,
+	F1_4935_4945,
+	F2_4935_4945,
+	F1_4920_4980,
+	F2_4920_4980,
+	F1_4942_4987,
+	F1_4945_4985,
+	F1_4950_4980,
+	F1_5032_5057,
+	F1_5035_5040,
+	F2_5035_5040,
+	F1_5035_5045,
+	F1_5040_5040,
+	F1_5040_5080,
+	F2_5040_5080,
+	F1_5055_5055,
+	F2_5055_5055,
+
+	F1_5120_5240,
+
+	F1_5170_5230,
+	F2_5170_5230,
+
+	F1_5180_5240,
+	F2_5180_5240,
+	F3_5180_5240,
+	F4_5180_5240,
+	F5_5180_5240,
+	F6_5180_5240,
+	F7_5180_5240,
+	F8_5180_5240,
+	F9_5180_5240,
+	F10_5180_5240,
+
+	F1_5240_5280,
+
+	F1_5260_5280,
+
+	F1_5260_5320,
+	F2_5260_5320,
+	F3_5260_5320,
+	F4_5260_5320,
+	F5_5260_5320,
+	F6_5260_5320,
+	F7_5260_5320,
+
+	F1_5260_5700,
+
+	F1_5280_5320,
+	F2_5280_5320,
+	F1_5500_5560,
+
+	F1_5500_5580,
+	F2_5500_5580,
+
+	F1_5500_5620,
+
+	F1_5500_5660,
+
+	F1_5500_5720,
+	F2_5500_5700,
+	F3_5500_5700,
+	F4_5500_5700,
+	F5_5500_5700,
+	F6_5500_5700,
+
+	F1_5660_5700,
+	F2_5660_5720,
+	F3_5660_5720,
+
+	F1_5745_5765,
+
+	F1_5745_5805,
+	F2_5745_5805,
+	F3_5745_5805,
+	F4_5745_5805,
+
+	F1_5745_5825,
+	F2_5745_5825,
+	F3_5745_5825,
+	F4_5745_5825,
+	F5_5745_5825,
+	F6_5745_5825,
+	F7_5745_5825,
+	F8_5745_5825,
+	F9_5745_5825,
+
+	F1_5845_5865,
+
+	W1_4920_4980,
+	W1_5040_5080,
+	W1_5170_5230,
+	W1_5180_5240,
+	W1_5260_5320,
+	W1_5745_5825,
+	W1_5500_5700,
+	A_DEMO_ALL_CHANNELS
+};
+
+static const REG_DMN_FREQ_BAND reg_dmn5_ghz_freq[] = {
+	{4915, 4925, 20, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},     /* F1_4915_4925 */
+	{4915, 4925, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},     /* F2_4915_4925 */
+	{4935, 4945, 20, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},     /* F1_4935_4945 */
+	{4935, 4945, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},     /* F2_4935_4945 */
+	{4920, 4980, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 7},     /* F1_4920_4980 */
+	{4920, 4980, 20, 0, 20, 20, NO_DFS, PSCAN_MKK2, 7},     /* F2_4920_4980 */
+	{4942, 4987, 27, 6, 5, 5, NO_DFS, PSCAN_FCC, 0},        /* F1_4942_4987 */
+	{4945, 4985, 30, 6, 10, 5, NO_DFS, PSCAN_FCC, 0},       /* F1_4945_4985 */
+	{4950, 4980, 33, 6, 20, 5, NO_DFS, PSCAN_FCC, 0},       /* F1_4950_4980 */
+	{5035, 5040, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},     /* F1_5035_5040 */
+	{5035, 5040, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},     /* F2_5035_5040 */
+	{5040, 5040, 20, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},     /* F1_5040_5040 */
+	{5040, 5080, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 2},     /* F1_5040_5080 */
+	{5040, 5080, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 6},       /* F2_5040_5080 */
+	{5055, 5055, 20, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},     /* F1_5055_5055 */
+	{5055, 5055, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},     /* F2_5055_5055 */
+
+	{5120, 5240, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},        /* F1_5120_5240 */
+
+	{5170, 5230, 23, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},        /* F1_5170_5230 */
+	{5170, 5230, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},        /* F2_5170_5230 */
+
+	{5180, 5240, 15, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 1}, /* F1_5180_5240 */
+	{5180, 5240, 17, 6, 20, 20, NO_DFS, NO_PSCAN, 1},       /* F2_5180_5240 */
+	{5180, 5240, 18, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 1}, /* F3_5180_5240 */
+	{5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 1}, /* F4_5180_5240 */
+	{5180, 5240, 23, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 1}, /* F5_5180_5240 */
+	{5180, 5240, 23, 6, 20, 20, NO_DFS, PSCAN_FCC, 1},      /* F6_5180_5240 */
+	{5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK3, 0},        /* F7_5180_5240 */
+	{5180, 5240, 23, 6, 20, 20, NO_DFS, NO_PSCAN, 1},       /* F8_5180_5240 */
+	{5180, 5240, 20, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},     /* F9_5180_5240 */
+	{5180, 5240, 23, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 1}, /* F10_5180_5240 */
+
+	{5240, 5280, 23, 0, 20, 20, DFS_FCC3, PSCAN_FCC | PSCAN_ETSI, 0},       /* F1_5240_5280 */
+
+	{5260, 5280, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 2},    /* F1_5260_5280 */
+
+	{5260, 5320, 18, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 2},    /* F1_5260_5320 */
+
+	{5260, 5320, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
+	 PSCAN_FCC | PSCAN_ETSI | PSCAN_MKK3, 0},
+	/* F2_5260_5320 */
+
+	{5260, 5320, 24, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 2},    /* F3_5260_5320 */
+	{5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2}, /* F4_5260_5320 */
+	{5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2}, /* F5_5260_5320 */
+	{5260, 5320, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 2},       /* F6_5260_5320 */
+	{5260, 5320, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
+	 PSCAN_FCC | PSCAN_ETSI | PSCAN_MKK3, 0},
+	/* F7_5260_5320 */
+
+	{5260, 5700, 5, 6, 20, 20, DFS_FCC3 | DFS_ETSI, NO_PSCAN, 0},   /* F1_5260_5700 */
+
+	{5280, 5320, 17, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2}, /* F1_5280_5320 */
+
+	{5500, 5580, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 4},    /* F1_5500_5580 */
+	{5500, 5580, 30, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 4},    /* F2_5500_5580 */
+
+	{5500, 5620, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 3},   /* F1_5500_5620 */
+
+	{5500, 5660, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 0},    /* F1_5500_5660 */
+
+	{5500, 5720, 24, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 4}, /* F1_5500_5720 */
+	{5500, 5700, 27, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 3},    /* F2_5500_5700 */
+	{5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 3},    /* F3_5500_5700 */
+	{5500, 5700, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4, PSCAN_MKK3 | PSCAN_FCC, 0}, /* F4_5500_5700 */
+	{5500, 5700, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},   /* F5_5500_5700 */
+	{5500, 5700, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4, PSCAN_MKK3 | PSCAN_FCC, 0}, /* F6_5500_5700 */
+
+	{5660, 5700, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 4},    /* F1_5660_5700 */
+	{5660, 5700, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 4},    /* F2_5660_5700 */
+	{5660, 5700, 30, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, 4},    /* F3_5660_5700 */
+
+	{5745, 5805, 23, 0, 20, 20, NO_DFS, NO_PSCAN, 3},       /* F1_5745_5805 */
+	{5745, 5805, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 3},       /* F2_5745_5805 */
+	{5745, 5805, 30, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},     /* F3_5745_5805 */
+	{5745, 5805, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 0},       /* F4_5745_5805 */
+
+	{5745, 5825, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 5},        /* F1_5745_5825 */
+	{5745, 5825, 17, 0, 20, 20, NO_DFS, NO_PSCAN, 5},       /* F2_5745_5825 */
+	{5745, 5825, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 0},       /* F3_5745_5825 */
+	{5745, 5825, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},       /* F4_5745_5825 */
+	{5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 5},       /* F5_5745_5825 */
+	{5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 5},       /* F6_5745_5825 */
+	{5745, 5825, 30, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},     /* F7_5745_5825 */
+	{5745, 5825, 20, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},     /* F8_5745_5825 */
+
+	/*
+	 * Below are the world roaming channels
+	 * All WWR domains have no power limit, instead use the card's CTL
+	 * or max power settings.
+	 */
+	{4920, 4980, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},      /* W1_4920_4980 */
+	{5040, 5080, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},      /* W1_5040_5080 */
+	{5170, 5230, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},      /* W1_5170_5230 */
+	{5180, 5240, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},      /* W1_5180_5240 */
+	{5260, 5320, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0}, /* W1_5260_5320 */
+	{5745, 5825, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},      /* W1_5745_5825 */
+	{5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0}, /* W1_5500_5700 */
+	{4920, 6100, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},       /* A_DEMO_ALL_CHANNELS */
+};
+
+/*
+ * 2GHz 11b channel tags
+ */
+enum {
+	F1_2312_2372,
+	F2_2312_2372,
+
+	F1_2412_2472,
+	F2_2412_2472,
+	F3_2412_2472,
+	F4_2412_2472,
+
+	F1_2412_2462,
+	F2_2412_2462,
+
+	F1_2432_2442,
+
+	F1_2457_2472,
+
+	F1_2467_2472,
+
+	F1_2484_2484,
+	F2_2484_2484,
+
+	F1_2512_2732,
+
+	W1_2312_2372,
+	W1_2412_2412,
+	W1_2417_2432,
+	W1_2437_2442,
+	W1_2447_2457,
+	W1_2462_2462,
+	W1_2467_2467,
+	W2_2467_2467,
+	W1_2472_2472,
+	W2_2472_2472,
+	W1_2484_2484,
+	W2_2484_2484,
+};
+
+static const REG_DMN_FREQ_BAND reg_dmn2_ghz_freq[] = {
+	{2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, /* F1_2312_2372 */
+	{2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* F2_2312_2372 */
+
+	{2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, /* F1_2412_2472 */
+	{2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 30},     /* F2_2412_2472 */
+	{2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 4},        /* F3_2412_2472 */
+	{2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},      /* F4_2412_2472 */
+
+	{2412, 2462, 30, 6, 20, 5, NO_DFS, NO_PSCAN, 12},       /* F1_2412_2462 */
+	{2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 30},     /* F2_2412_2462 */
+
+	{2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 4},        /* F1_2432_2442 */
+
+	{2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* F1_2457_2472 */
+
+	{2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 30},       /* F1_2467_2472 */
+
+	{2484, 2484, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, /* F1_2484_2484 */
+	{2484, 2484, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA | PSCAN_MKKA1 | PSCAN_MKKA2, 31}, /* F2_2484_2484 */
+
+	{2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, /* F1_2512_2732 */
+
+	/*
+	 * WWR have powers opened up to 20dBm.  Limits should often come from CTL/Max powers
+	 */
+
+	{2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* W1_2312_2372 */
+	{2412, 2412, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* W1_2412_2412 */
+	{2417, 2432, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* W1_2417_2432 */
+	{2437, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* W1_2437_2442 */
+	{2447, 2457, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* W1_2447_2457 */
+	{2462, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* W1_2462_2462 */
+	{2467, 2467, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0}, /* W1_2467_2467 */
+	{2467, 2467, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},  /* W2_2467_2467 */
+	{2472, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0}, /* W1_2472_2472 */
+	{2472, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},  /* W2_2472_2472 */
+	{2484, 2484, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0}, /* W1_2484_2484 */
+	{2484, 2484, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},  /* W2_2484_2484 */
+};
+
+/*
+ * 2GHz 11g channel tags
+ */
+
+enum {
+	G1_2312_2372,
+	G2_2312_2372,
+
+	G1_2412_2472,
+	G2_2412_2472,
+	G3_2412_2472,
+	G4_2412_2472,
+
+	G1_2412_2462,
+	G2_2412_2462,
+
+	G1_2432_2442,
+
+	G1_2457_2472,
+
+	G1_2512_2732,
+
+	G1_2467_2472,
+	G2_2467_2472,
+
+	G1_2484_2484,
+
+	WG1_2312_2372,
+	WG1_2412_2462,
+	WG1_2412_2472,
+	WG2_2412_2472,
+	G_DEMO_ALMOST_ALL_CHANNELS,
+	G_DEMO_ALL_CHANNELS,
+};
+
+static const REG_DMN_FREQ_BAND reg_dmn2_ghz11g_freq[] = {
+	{2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, /* G1_2312_2372 */
+	{2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* G2_2312_2372 */
+
+	{2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, /* G1_2412_2472 */
+	{2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G | PSCAN_MKKA2 | PSCAN_MKKA | PSCAN_EXT_CHAN, 30},       /* G2_2412_2472 */
+	{2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 4},        /* G3_2412_2472 */
+	{2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G | PSCAN_MKKA2 | PSCAN_MKKA | PSCAN_EXT_CHAN, 0},        /* G4_2412_2472 */
+
+	{2412, 2462, 30, 6, 20, 5, NO_DFS, NO_PSCAN, 12},       /* G1_2412_2462 */
+	{2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 30},   /* G2_2412_2462 */
+
+	{2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 4},        /* G1_2432_2442 */
+
+	{2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* G1_2457_2472 */
+
+	{2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, /* G1_2512_2732 */
+
+	{2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 30},       /* G1_2467_2472 */
+	{2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G | PSCAN_MKKA2, 0},      /* G2_2467_2472 */
+
+	{2484, 2484, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, /* G1_2484_2484 */
+	/*
+	 * WWR open up the power to 20dBm
+	 */
+
+	{2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* WG1_2312_2372 */
+	{2412, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},        /* WG1_2412_2462 */
+	{2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN | PSCAN_EXT_CHAN, 0},        /* WG1_2412_2472 */
+	{2412, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},  /* WG2_2412_2472 */
+	{2312, 2532, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},        /* G_DEMO_ALMOST_ALL_CHANNELS */
+	{2312, 2732, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},        /* G_DEMO_ALL_CHANNELS */
+};
+
+/* regulatory capabilities */
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN    0x0080
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2         0x0100
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND    0x0200
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD     0x0400
+
+static const JAPAN_BANDCHECK j_bandcheck[] = {
+	{F1_5170_5230, REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD},
+	{F4_5180_5240, REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN},
+	{F2_5260_5320, REGDMN_EEPROM_EEREGCAP_EN_KK_U2},
+	{F4_5500_5700, REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND}
+};
+
+static const COMMON_MODE_POWER common_mode_pwrtbl[] = {
+	{4900, 5000, 17},
+	{5000, 5100, 17},
+	{5150, 5250, 17},       /* ETSI & MKK */
+	{5250, 5350, 18},       /* ETSI */
+	{5470, 5725, 20},       /* ETSI */
+	{5725, 5825, 20},       /* Singapore */
+	{5825, 5850, 23} /* Korea */
+};
+
+/*
+ * 5GHz Turbo (dynamic & static) tags
+ */
+
+enum {
+	T1_5130_5650,
+	T1_5150_5670,
+
+	T1_5200_5200,
+	T2_5200_5200,
+	T3_5200_5200,
+	T4_5200_5200,
+	T5_5200_5200,
+	T6_5200_5200,
+	T7_5200_5200,
+	T8_5200_5200,
+
+	T1_5200_5280,
+	T2_5200_5280,
+	T3_5200_5280,
+	T4_5200_5280,
+	T5_5200_5280,
+	T6_5200_5280,
+
+	T1_5200_5240,
+	T1_5210_5210,
+	T2_5210_5210,
+	T3_5210_5210,
+	T4_5210_5210,
+	T5_5210_5210,
+	T6_5210_5210,
+	T7_5210_5210,
+	T8_5210_5210,
+	T9_5210_5210,
+	T10_5210_5210,
+	T1_5240_5240,
+
+	T1_5210_5250,
+	T1_5210_5290,
+	T2_5210_5290,
+	T3_5210_5290,
+
+	T1_5280_5280,
+	T2_5280_5280,
+	T1_5290_5290,
+	T2_5290_5290,
+	T3_5290_5290,
+	T1_5250_5290,
+	T2_5250_5290,
+	T3_5250_5290,
+	T4_5250_5290,
+
+	T1_5540_5660,
+	T2_5540_5660,
+	T3_5540_5660,
+	T1_5760_5800,
+	T2_5760_5800,
+	T3_5760_5800,
+	T4_5760_5800,
+	T5_5760_5800,
+	T6_5760_5800,
+	T7_5760_5800,
+
+	T1_5765_5805,
+	T2_5765_5805,
+	T3_5765_5805,
+	T4_5765_5805,
+	T5_5765_5805,
+	T6_5765_5805,
+	T7_5765_5805,
+	T8_5765_5805,
+	T9_5765_5805,
+
+	WT1_5210_5250,
+	WT1_5290_5290,
+	WT1_5540_5660,
+	WT1_5760_5800,
+};
+
+/*
+ * 2GHz Dynamic turbo tags
+ */
+#ifndef ATH_REMOVE_2G_TURBO_RD_TABLE
+enum {
+	T1_2312_2372,
+	T1_2437_2437,
+	T2_2437_2437,
+	T3_2437_2437,
+	T1_2512_2732
+};
+
+static const REG_DMN_FREQ_BAND reg_dmn2_ghz11g_turbo_freq[] = {
+	{2312, 2372, 5, 6, 40, 40, NO_DFS, NO_PSCAN, 0},        /* T1_2312_2372 */
+	{2437, 2437, 5, 6, 40, 40, NO_DFS, NO_PSCAN, 0},        /* T1_2437_2437 */
+	{2437, 2437, 20, 6, 40, 40, NO_DFS, NO_PSCAN, 0},       /* T2_2437_2437 */
+	{2437, 2437, 18, 6, 40, 40, NO_DFS, PSCAN_WWR, 0},      /* T3_2437_2437 */
+	{2512, 2732, 5, 6, 40, 40, NO_DFS, NO_PSCAN, 0},        /* T1_2512_2732 */
+};
+#endif /* ATH_REMOVE_2G_TURBO_RD_TABLE */
+
+static const REG_DOMAIN ah_cmn_reg_domains[] = {
+
+	{DEBUG_REG_DMN, FCC, DFS_FCC3, NO_PSCAN, NO_REQ,
+	 CHAN_11A_BM(A_DEMO_ALL_CHANNELS, F6_5745_5825,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(T1_5130_5650, T1_5150_5670, F6_5745_5825,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(T1_5200_5240, T1_5280_5280, T1_5540_5660, T1_5765_5805,
+		     -1, -1, -1, -1, -1, -1, -1, -1)
+	 BM(F1_2312_2372, F1_2412_2472, F1_2484_2484, F1_2512_2732,
+	    -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(G_DEMO_ALMOST_ALL_CHANNELS,
+	    G1_2484_2484, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T1_2312_2372, T1_2437_2437, T1_2512_2732,
+			 -1, -1, -1, -1, -1, -1, -1, -1, -1)},
+
+	{APL1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+	 BM(F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+	 BM(F1_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL3, FCC, DFS_FCC3, PSCAN_FCC, NO_REQ,
+	 BM(F1_5280_5320, F6_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5290_5290, T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL4, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+	 BM(F5_5180_5240, F9_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5210_5210, T3_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5200_5200, T3_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+	 BM(F2_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T4_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T4_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL6, ETSI, DFS_ETSI, PSCAN_FCC_T | PSCAN_FCC, NO_REQ,
+	 BM(F9_5180_5240, F2_5260_5320, F3_5745_5825, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T2_5210_5210, T1_5250_5290, T1_5760_5800, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T1_5200_5280, T5_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL7, FCC, DFS_FCC3 | DFS_ETSI, PSCAN_FCC | PSCAN_ETSI, NO_REQ,
+	 BM(F2_5280_5320, F2_5500_5580, F3_5660_5720, F7_5745_5825, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL8, ETSI, NO_DFS, NO_PSCAN,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F6_5260_5320, F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5290_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5280_5280, T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL9, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F9_5180_5240, F2_5260_5320, F1_5500_5620, F3_5745_5805, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL10, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F9_5180_5240, F2_5260_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL11, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F9_5180_5240, F2_5260_5320, F5_5500_5700, F7_5745_5825,
+	    F1_5845_5865, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{APL12, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F5_5180_5240, F1_5500_5560, F1_5745_5765, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{ETSI1, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F2_5180_5240, F2_5260_5320, F2_5500_5700, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{ETSI2, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F3_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{ETSI3, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{ETSI4, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F3_5180_5240, F1_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T3_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{ETSI5, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F1_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T4_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T3_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{ETSI6, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F5_5180_5240, F1_5260_5280, F3_5500_5700, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T1_5210_5250, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T4_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{ETSI8, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F4_5180_5240, F2_5260_5320, F1_5660_5700, F4_5745_5825, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 BMZERO},
+
+	{ETSI9, ETSI, DFS_ETSI, PSCAN_ETSI,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F4_5180_5240, F2_5260_5320, F1_5500_5660, F8_5745_5825, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 BMZERO},
+
+	{FCC1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+	 BM(F2_5180_5240, F4_5260_5320, F5_5745_5825, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T6_5210_5210, T2_5250_5290, T6_5760_5800, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T1_5200_5240, T2_5280_5280, T7_5765_5805, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{FCC2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+	 BM(F6_5180_5240, F5_5260_5320, F6_5745_5825, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{FCC3, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
+	 BM(F2_5180_5240, F3_5260_5320, F1_5500_5720, F5_5745_5825, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T4_5200_5200, T8_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+	/*
+
+	   Bug Fix: EV 98583 Public Safety channel
+	   Exclude the following channel in FCC Public safety domain
+	   Uni-1: 5180, 5200, 5220, 5240
+	   Uni-2: 5260, 5280, 5300, 5320
+	   Uni-3: 5745, 5765, 5785, 5805, 5825
+	 */
+	{FCC4, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
+	 BM(F1_4942_4987, F1_4945_4985, F1_4950_4980, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T8_5210_5210, T4_5250_5290, T7_5760_5800, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T1_5200_5240, T1_5280_5280, T9_5765_5805, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{FCC5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+	 BM(F2_5180_5240, F6_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T8_5200_5200, T7_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{FCC6, FCC, DFS_FCC3, PSCAN_FCC, NO_REQ,
+	 BM(F8_5180_5240, F5_5260_5320, F1_5500_5580, F2_5660_5720,
+	    F6_5745_5825, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{MKK1, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F1_5170_5230, F10_5180_5240, F7_5260_5320, F4_5500_5700, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	{MKK2, MKK, DFS_MKK4, PSCAN_MKK2 | PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F2_4915_4925, F2_4935_4945, F1_4920_4980, F1_5035_5040,
+	    F2_5055_5055, F1_5040_5080, F1_5170_5230, F10_5180_5240, -1, -1, -1,
+	    -1),
+	 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 even */
+	{MKK3, MKK, NO_DFS, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 even + UNI-2 */
+	{MKK4, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T10_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 even + UNI-2 + mid-band */
+	{MKK5, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F4_5180_5240, F2_5260_5320, F6_5500_5700, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 odd + even */
+	{MKK6, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
+	 BM(F2_5170_5230, F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 odd + UNI-1 even + UNI-2 */
+	{MKK7, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F2_5170_5230, F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1,
+	    -1, -1, -1),
+	 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T5_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 odd + UNI-1 even + UNI-2 + mid-band */
+	{MKK8, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F2_5170_5230, F4_5180_5240, F2_5260_5320, F6_5500_5700, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 even + 4.9 GHZ */
+	{MKK9, MKK, NO_DFS, PSCAN_MKK2 | PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F1_4912_4947, F1_5032_5057, F1_4915_4925, F1_4935_4945,
+	    F2_4920_4980, F1_5035_5045, F1_5055_5055, F2_5040_5080,
+	    F4_5180_5240, -1, -1, -1),
+	 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 even + UNI-2 + 4.9 GHZ */
+	{MKK10, MKK, DFS_MKK4, PSCAN_MKK2 | PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F1_4912_4947, F1_5032_5057, F1_4915_4925, F1_4935_4945,
+	    F2_4920_4980, F1_5035_5045, F1_5055_5055, F2_5040_5080,
+	    F4_5180_5240, F2_5260_5320, -1, -1),
+	 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* Japan UNI-1 even + UNI-2 + mid-band + 4.9GHz */
+	{MKK11, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F1_4912_4947, F1_5032_5057, F1_4915_4925, F1_4935_4945,
+	    F2_4920_4980, F1_5035_5045, F1_5055_5055, F2_5040_5080,
+	    F4_5180_5240, F2_5260_5320, F6_5500_5700, -1),
+	 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* Japan UNI-1 even + UNI-1 odd + UNI-2 + mid-band + 4.9GHz */
+	{MKK12, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F1_4915_4925, F1_4935_4945, F2_4920_4980, F1_5040_5040,
+	    F1_5055_5055, F2_5040_5080, F2_5170_5230, F4_5180_5240,
+	    F2_5260_5320, F6_5500_5700, -1, -1),
+	 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 odd + UNI-1 even + UNI-2 + mid-band */
+	{MKK13, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 BM(F2_5170_5230, F7_5180_5240, F2_5260_5320, F6_5500_5700, -1, -1, -1,
+	    -1, -1, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 odd + UNI-1 even + 4.9GHz */
+	{MKK14, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
+	 BM(F1_4915_4925, F1_4935_4945, F2_4920_4980, F1_5040_5040,
+	    F2_5040_5080, F1_5055_5055, F2_5170_5230, F4_5180_5240, -1, -1, -1,
+	    -1),
+	 BMZERO,
+	 BMZERO,
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/* UNI-1 odd + UNI-1 even + UNI-2 + 4.9GHz */
+	{MKK15, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
+	 BM(F1_4915_4925, F1_4935_4945, F2_4920_4980, F1_5040_5040,
+	    F2_5040_5080, F1_5055_5055, F2_5170_5230, F4_5180_5240,
+	    F2_5260_5320, -1, -1, -1),
+	 BMZERO,
+	 BMZERO,
+	 BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+
+	/*=== 2 GHz ===*/
+
+	/* Defined here to use when 2G channels are authorised for country K2 */
+	{APLD, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(F2_2312_2372, F4_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(G2_2312_2372, G4_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BMZERO},
+
+	{ETSIA, NO_CTL, NO_DFS, PSCAN_ETSIA,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 CHAN_11A_BMZERO CHAN_11A_BMZERO CHAN_11A_BMZERO BM(F1_2457_2472, -1,
+							    -1, -1, -1, -1, -1,
+							    -1, -1, -1, -1, -1),
+	 BM(G1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{ETSIB, ETSI, NO_DFS, PSCAN_ETSIB,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 CHAN_11A_BMZERO CHAN_11A_BMZERO CHAN_11A_BMZERO BM(F1_2432_2442, -1,
+							    -1, -1, -1, -1, -1,
+							    -1, -1, -1, -1, -1),
+	 BM(G1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{ETSIC, ETSI, NO_DFS, PSCAN_ETSIC,
+	 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
+	 CHAN_11A_BMZERO CHAN_11A_BMZERO CHAN_11A_BMZERO BM(F3_2412_2472, -1,
+							    -1, -1, -1, -1, -1,
+							    -1, -1, -1, -1, -1),
+	 BM(G3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{FCCA, FCC, NO_DFS, NO_PSCAN, NO_REQ,
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(F1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(G1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{MKKA, MKK, NO_DFS,
+	 PSCAN_MKKA | PSCAN_MKKA_G | PSCAN_MKKA1 | PSCAN_MKKA1_G | PSCAN_MKKA2 |
+	 PSCAN_MKKA2_G, DISALLOW_ADHOC_11A_TURB,
+	 CHAN_11A_BMZERO CHAN_11A_BMZERO CHAN_11A_BMZERO BM(F2_2412_2462,
+							    F1_2467_2472,
+							    F2_2484_2484,
+							    -1, -1, -1, -1, -1,
+							    -1, -1, -1, -1),
+	 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{MKKC, MKK, NO_DFS, NO_PSCAN, NO_REQ,
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WORLD, ETSI, NO_DFS, NO_PSCAN, NO_REQ,
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(F4_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 BM(G4_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR0_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700,
+		     -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR01_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+		     W1_5500_5700, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
+	    W1_2447_2457,
+	    -1, -1, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR02_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+		     W1_5500_5700, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{EU1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+		     W1_5500_5700, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W2_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1),
+	 BM(WG2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+		     W1_5500_5700, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR2_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+		     W1_5500_5700, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR3_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
+		     -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR4_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5745_5825,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
+	    W1_2447_2457,
+	    -1, -1, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR5_ETSIC, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5745_5825,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WOR9_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700,
+		     -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
+	    W1_2447_2457,
+	    -1, -1, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WORA_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700,
+		     -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WORB_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5500_5700,
+		     -1, -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{WORC_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
+	 CHAN_11A_BM(W1_5260_5320, W1_5180_5240, W1_5500_5700, W1_5745_5825,
+		     -1, -1, -1, -1, -1, -1, -1, -1)
+	 CHAN_11A_BMZERO
+	 CHAN_11A_BMZERO
+	 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
+	    W1_2417_2432,
+	    W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+	 BM(WG1_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+	 CHAN_TURBO_G_BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+			 -1)},
+
+	{NULL1, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
+	 CHAN_11A_BMZERO CHAN_11A_BMZERO CHAN_11A_BMZERO BMZERO,
+	 BMZERO,
+	 CHAN_TURBO_G_BMZERO},
+};
+
+static const struct cmode modes[] = {
+	{REGDMN_MODE_TURBO, IEEE80211_CHAN_ST}, /* TURBO means 11a Static Turbo */
+	{REGDMN_MODE_11A, IEEE80211_CHAN_A},
+	{REGDMN_MODE_11B, IEEE80211_CHAN_B},
+	{REGDMN_MODE_11G, IEEE80211_CHAN_PUREG},
+	{REGDMN_MODE_11G_TURBO, IEEE80211_CHAN_108G},
+	{REGDMN_MODE_11A_TURBO, IEEE80211_CHAN_108A},
+	{REGDMN_MODE_11NG_HT20, IEEE80211_CHAN_11NG_HT20},
+	{REGDMN_MODE_11NG_HT40PLUS, IEEE80211_CHAN_11NG_HT40PLUS},
+	{REGDMN_MODE_11NG_HT40MINUS, IEEE80211_CHAN_11NG_HT40MINUS},
+	{REGDMN_MODE_11NA_HT20, IEEE80211_CHAN_11NA_HT20},
+	{REGDMN_MODE_11NA_HT40PLUS, IEEE80211_CHAN_11NA_HT40PLUS},
+	{REGDMN_MODE_11NA_HT40MINUS, IEEE80211_CHAN_11NA_HT40MINUS},
+	{REGDMN_MODE_11AC_VHT20, IEEE80211_CHAN_11AC_VHT20},
+	{REGDMN_MODE_11AC_VHT40PLUS, IEEE80211_CHAN_11AC_VHT40PLUS},
+	{REGDMN_MODE_11AC_VHT40MINUS, IEEE80211_CHAN_11AC_VHT40MINUS},
+	{REGDMN_MODE_11AC_VHT80, IEEE80211_CHAN_11AC_VHT80},
+	{REGDMN_MODE_11AC_VHT20_2G, IEEE80211_CHAN_11AC_VHT20_2G},
+	{REGDMN_MODE_11AC_VHT40_2G, IEEE80211_CHAN_11AC_VHT40_2G},
+	{REGDMN_MODE_11AC_VHT80_2G, IEEE80211_CHAN_11AC_VHT80_2G},
+};
+
+typedef enum offset {
+	BW20 = 0,
+	BW40_LOW_PRIMARY = 1,
+	BW40_HIGH_PRIMARY = 3,
+	BW80,
+	BWALL
+} offset_t;
+
+typedef struct _regdm_op_class_map {
+	uint8_t op_class;
+	uint8_t ch_spacing;
+	offset_t offset;
+	uint8_t channels[MAX_CHANNELS_PER_OPERATING_CLASS];
+} regdm_op_class_map_t;
+
+typedef struct _regdm_supp_op_classes {
+	uint8_t num_classes;
+	uint8_t classes[SIR_MAC_MAX_SUPP_OPER_CLASSES];
+} regdm_supp_op_classes;
+
+uint16_t cds_regdm_get_opclass_from_channel(uint8_t *country, uint8_t channel,
+					    uint8_t offset);
+uint16_t cds_regdm_set_curr_opclasses(uint8_t num_classes, uint8_t *class);
+uint16_t cds_regdm_get_curr_opclasses(uint8_t *num_classes, uint8_t *class);

+ 451 - 0
core/cds/inc/cds_sched.h

@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined( __CDS_SCHED_H )
+#define __CDS_SCHED_H
+
+/**=========================================================================
+
+   \file  cds_sched.h
+
+   \brief Connectivity driver services scheduler
+
+   ========================================================================*/
+
+/*--------------------------------------------------------------------------
+   Include Files
+   ------------------------------------------------------------------------*/
+#include <cdf_event.h>
+#include "i_cdf_types.h"
+#include <linux/wait.h>
+#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif
+#include <cds_mq.h>
+#include <cdf_types.h>
+#include "cdf_lock.h"
+
+#define TX_POST_EVENT_MASK               0x001
+#define TX_SUSPEND_EVENT_MASK            0x002
+#define MC_POST_EVENT_MASK               0x001
+#define MC_SUSPEND_EVENT_MASK            0x002
+#define RX_POST_EVENT_MASK               0x001
+#define RX_SUSPEND_EVENT_MASK            0x002
+#define TX_SHUTDOWN_EVENT_MASK           0x010
+#define MC_SHUTDOWN_EVENT_MASK           0x010
+#define RX_SHUTDOWN_EVENT_MASK           0x010
+#define WD_POST_EVENT_MASK               0x001
+#define WD_SHUTDOWN_EVENT_MASK           0x002
+#define WD_CHIP_RESET_EVENT_MASK         0x004
+#define WD_WLAN_SHUTDOWN_EVENT_MASK      0x008
+#define WD_WLAN_REINIT_EVENT_MASK        0x010
+
+/*
+ * Maximum number of messages in the system
+ * These are buffers to account for all current messages
+ * with some accounting of what we think is a
+ * worst-case scenario.  Must be able to handle all
+ * incoming frames, as well as overhead for internal
+ * messaging
+ *
+ * Increased to 8000 to handle more RX frames
+ */
+#define CDS_CORE_MAX_MESSAGES 8000
+
+#ifdef QCA_CONFIG_SMP
+/*
+** Maximum number of cds messages to be allocated for
+** OL Rx thread.
+*/
+#define CDS_MAX_OL_RX_PKT 4000
+
+typedef void (*cds_ol_rx_thread_cb)(void *context, void *rxpkt, uint16_t staid);
+#endif
+
+/*
+** CDF Message queue definition.
+*/
+typedef struct _cds_mq_type {
+	/* Lock use to synchronize access to this message queue */
+	spinlock_t mqLock;
+
+	/* List of vOS Messages waiting on this queue */
+	struct list_head mqList;
+
+} cds_mq_type, *p_cds_mq_type;
+
+#ifdef QCA_CONFIG_SMP
+/*
+** CDS message wrapper for data rx from TXRX
+*/
+struct cds_ol_rx_pkt {
+	struct list_head list;
+	void *context;
+
+	/* Rx skb */
+	void *Rxpkt;
+
+	/* Station id to which this packet is destined */
+	uint16_t staId;
+
+	/* Call back to further send this packet to txrx layer */
+	cds_ol_rx_thread_cb callback;
+
+};
+#endif
+
+/*
+** CDS Scheduler context
+** The scheduler context contains the following:
+**   ** the messages queues
+**   ** the handle to the tread
+**   ** pointer to the events that gracefully shutdown the MC and Tx threads
+**
+*/
+typedef struct _cds_sched_context {
+	/* Place holder to the CDS Context */
+	void *pVContext;
+	/* WMA Message queue on the Main thread */
+	cds_mq_type wmaMcMq;
+
+	/* PE Message queue on the Main thread */
+	cds_mq_type peMcMq;
+
+	/* SME Message queue on the Main thread */
+	cds_mq_type smeMcMq;
+
+	/* SYS Message queue on the Main thread */
+	cds_mq_type sysMcMq;
+
+	/* Handle of Event for MC thread to signal startup */
+	struct completion McStartEvent;
+
+	struct task_struct *McThread;
+
+	/* completion object for MC thread shutdown */
+	struct completion McShutdown;
+
+	/* Wait queue for MC thread */
+	wait_queue_head_t mcWaitQueue;
+
+	unsigned long mcEventFlag;
+
+	/* Completion object to resume Mc thread */
+	struct completion ResumeMcEvent;
+
+	/* lock to make sure that McThread suspend/resume mechanism is in sync */
+	spinlock_t McThreadLock;
+#ifdef QCA_CONFIG_SMP
+	spinlock_t ol_rx_thread_lock;
+
+	/* OL Rx thread handle */
+	struct task_struct *ol_rx_thread;
+
+	/* Handle of Event for Rx thread to signal startup */
+	struct completion ol_rx_start_event;
+
+	/* Completion object to suspend OL rx thread */
+	struct completion ol_suspend_rx_event;
+
+	/* Completion objext to resume OL rx thread */
+	struct completion ol_resume_rx_event;
+
+	/* Completion object for OL Rxthread shutdown */
+	struct completion ol_rx_shutdown;
+
+	/* Waitq for OL Rx thread */
+	wait_queue_head_t ol_rx_wait_queue;
+
+	unsigned long ol_rx_event_flag;
+
+	/* Rx buffer queue */
+	struct list_head ol_rx_thread_queue;
+
+	/* Spinlock to synchronize between tasklet and thread */
+	spinlock_t ol_rx_queue_lock;
+
+	/* Rx queue length */
+	unsigned int ol_rx_queue_len;
+
+	/* Lock to synchronize free buffer queue access */
+	spinlock_t cds_ol_rx_pkt_freeq_lock;
+
+	/* Free message queue for OL Rx processing */
+	struct list_head cds_ol_rx_pkt_freeq;
+
+	/* cpu hotplug notifier */
+	struct notifier_block *cpu_hot_plug_notifier;
+#endif
+} cds_sched_context, *p_cds_sched_context;
+
+/**
+ * struct cds_log_complete - Log completion internal structure
+ * @is_fatal: Type is fatal or not
+ * @indicator: Source of bug report
+ * @reason_code: Reason code for bug report
+ * @is_report_in_progress: If bug report is in progress
+ *
+ * This structure internally stores the log related params
+ */
+struct cds_log_complete {
+	uint32_t is_fatal;
+	uint32_t indicator;
+	uint32_t reason_code;
+	bool is_report_in_progress;
+};
+
+/*
+** CDS Sched Msg Wrapper
+** Wrapper messages so that they can be chained to their respective queue
+** in the scheduler.
+*/
+typedef struct _cds_msg_wrapper {
+	/* Message node */
+	struct list_head msgNode;
+
+	/* the Vos message it is associated to */
+	cds_msg_t *pVosMsg;
+
+} cds_msg_wrapper, *p_cds_msg_wrapper;
+
+typedef struct _cds_context_type {
+	/* Messages buffers */
+	cds_msg_t aMsgBuffers[CDS_CORE_MAX_MESSAGES];
+
+	cds_msg_wrapper aMsgWrappers[CDS_CORE_MAX_MESSAGES];
+
+	/* Free Message queue */
+	cds_mq_type freeVosMq;
+
+	/* Scheduler Context */
+	cds_sched_context cdf_sched;
+
+	/* HDD Module Context  */
+	void *pHDDContext;
+
+	/* MAC Module Context  */
+	void *pMACContext;
+
+#ifndef WLAN_FEATURE_MBSSID
+	/* SAP Context */
+	void *pSAPContext;
+#endif
+
+	cdf_event_t ProbeEvent;
+
+	volatile uint8_t isLogpInProgress;
+
+	cdf_event_t wmaCompleteEvent;
+
+	/* WMA Context */
+	void *pWMAContext;
+
+	void *pHIFContext;
+
+	void *htc_ctx;
+
+	void *epping_ctx;
+	/*
+	 * cdf_ctx will be used by cdf
+	 * while allocating dma memory
+	 * to access dev information.
+	 */
+	cdf_device_t cdf_ctx;
+
+	void *pdev_txrx_ctx;
+
+	/* Configuration handle used to get system configuration */
+	void *cfg_ctx;
+
+	volatile uint8_t isLoadUnloadInProgress;
+
+	bool is_wakelock_log_enabled;
+	uint32_t wakelock_log_level;
+	uint32_t connectivity_log_level;
+	uint32_t packet_stats_log_level;
+	uint32_t driver_debug_log_level;
+	uint32_t fw_debug_log_level;
+	struct cds_log_complete log_complete;
+	cdf_spinlock_t bug_report_lock;
+	cdf_event_t connection_update_done_evt;
+
+} cds_context_type, *p_cds_contextType;
+
+/*---------------------------------------------------------------------------
+   Function declarations and documenation
+   ---------------------------------------------------------------------------*/
+
+#ifdef QCA_CONFIG_SMP
+/*---------------------------------------------------------------------------
+   \brief cds_drop_rxpkt_by_staid() - API to drop pending Rx packets for a sta
+   The \a cds_drop_rxpkt_by_staid() drops queued packets for a station, to drop
+   all the pending packets the caller has to send WLAN_MAX_STA_COUNT as staId.
+   \param  pSchedContext - pointer to the global CDS Sched Context
+   \param staId - Station Id
+
+   \return Nothing
+   \sa cds_drop_rxpkt_by_staid()
+   -------------------------------------------------------------------------*/
+void cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext, uint16_t staId);
+
+/*---------------------------------------------------------------------------
+   \brief cds_indicate_rxpkt() - API to Indicate rx data packet
+   The \a cds_indicate_rxpkt() enqueues the rx packet onto ol_rx_thread_queue
+   and notifies cds_ol_rx_thread().
+   \param  Arg - pointer to the global CDS Sched Context
+   \param pkt - Vos data message buffer
+
+   \return Nothing
+   \sa cds_indicate_rxpkt()
+   -------------------------------------------------------------------------*/
+void cds_indicate_rxpkt(p_cds_sched_context pSchedContext,
+			struct cds_ol_rx_pkt *pkt);
+
+/*---------------------------------------------------------------------------
+   \brief cds_alloc_ol_rx_pkt() - API to return next available cds message
+   The \a cds_alloc_ol_rx_pkt() returns next available cds message buffer
+   used for Rx Data processing.
+   \param pSchedContext - pointer to the global CDS Sched Context
+
+   \return pointer to cds message buffer
+   \sa cds_alloc_ol_rx_pkt()
+   -------------------------------------------------------------------------*/
+struct cds_ol_rx_pkt *cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext);
+
+/*---------------------------------------------------------------------------
+   \brief cds_free_ol_rx_pkt() - API to release cds message to the freeq
+   The \a cds_free_ol_rx_pkt() returns the cds message used for Rx data
+   to the free queue.
+   \param  pSchedContext - pointer to the global CDS Sched Context
+   \param  pkt - Vos message buffer to be returned to free queue.
+
+   \return Nothing
+   \sa cds_free_ol_rx_pkt()
+   -------------------------------------------------------------------------*/
+void cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
+			 struct cds_ol_rx_pkt *pkt);
+/*---------------------------------------------------------------------------
+   \brief cds_free_ol_rx_pkt_freeq() - Free cdss buffer free queue
+   The \a cds_free_ol_rx_pkt_freeq() does mem free of the buffers
+   available in free cds buffer queue which is used for Data rx processing
+   from Tlshim.
+   \param pSchedContext - pointer to the global CDS Sched Context
+
+   \return Nothing
+   \sa cds_free_ol_rx_pkt_freeq()
+   -------------------------------------------------------------------------*/
+void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
+#endif
+
+/*---------------------------------------------------------------------------
+
+   \brief cds_sched_open() - initialize the CDS Scheduler
+
+   The \a cds_sched_open() function initializes the CDS Scheduler
+   Upon successful initialization:
+
+     - All the message queues are initialized
+
+     - The Main Controller thread is created and ready to receive and
+       dispatch messages.
+
+     - The Tx thread is created and ready to receive and dispatch messages
+
+   \param  p_cds_context - pointer to the global CDF Context
+
+   \param  p_cds_sched_context - pointer to a previously allocated buffer big
+          enough to hold a scheduler context.
+ \
+
+   \return CDF_STATUS_SUCCESS - Scheduler was successfully initialized and
+          is ready to be used.
+
+          CDF_STATUS_E_RESOURCES - System resources (other than memory)
+          are unavailable to initilize the scheduler
+
+          CDF_STATUS_E_NOMEM - insufficient memory exists to initialize
+          the scheduler
+
+          CDF_STATUS_E_INVAL - Invalid parameter passed to the scheduler Open
+          function
+
+          CDF_STATUS_E_FAILURE - Failure to initialize the scheduler/
+
+   \sa cds_sched_open()
+
+   -------------------------------------------------------------------------*/
+CDF_STATUS cds_sched_open(void *p_cds_context,
+			  p_cds_sched_context pSchedCxt, uint32_t SchedCtxSize);
+
+/*---------------------------------------------------------------------------
+
+   \brief cds_sched_close() - Close the CDS Scheduler
+
+   The \a cds_sched_closes() function closes the CDS Scheduler
+   Upon successful closing:
+
+     - All the message queues are flushed
+
+     - The Main Controller thread is closed
+
+     - The Tx thread is closed
+
+   \param  p_cds_context - pointer to the global CDF Context
+
+   \return CDF_STATUS_SUCCESS - Scheduler was successfully initialized and
+          is ready to be used.
+
+          CDF_STATUS_E_INVAL - Invalid parameter passed to the scheduler Open
+          function
+
+          CDF_STATUS_E_FAILURE - Failure to initialize the scheduler/
+
+   \sa cds_sched_close()
+
+   ---------------------------------------------------------------------------*/
+CDF_STATUS cds_sched_close(void *p_cds_context);
+
+/* Helper routines provided to other CDS API's */
+CDF_STATUS cds_mq_init(p_cds_mq_type pMq);
+void cds_mq_deinit(p_cds_mq_type pMq);
+void cds_mq_put(p_cds_mq_type pMq, p_cds_msg_wrapper pMsgWrapper);
+p_cds_msg_wrapper cds_mq_get(p_cds_mq_type pMq);
+bool cds_is_mq_empty(p_cds_mq_type pMq);
+p_cds_sched_context get_cds_sched_ctxt(void);
+CDF_STATUS cds_sched_init_mqs(p_cds_sched_context pSchedContext);
+void cds_sched_deinit_mqs(p_cds_sched_context pSchedContext);
+void cds_sched_flush_mc_mqs(p_cds_sched_context pSchedContext);
+
+void cdf_timer_module_init(void);
+void cds_ssr_protect_init(void);
+void cds_ssr_protect(const char *caller_func);
+void cds_ssr_unprotect(const char *caller_func);
+bool cds_is_ssr_ready(const char *caller_func);
+
+#define cds_wait_for_work_thread_completion(func) cds_is_ssr_ready(func)
+
+#endif /* #if !defined __CDS_SCHED_H */

+ 189 - 0
core/cds/inc/cds_utils.h

@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined( __CDS_UTILS_H )
+#define __CDS_UTILS_H
+
+/**=========================================================================
+
+   \file  cds_utils.h
+
+   \brief Connectivity driver services (CDS) utility APIs
+
+   Various utility functions
+
+   ========================================================================*/
+
+/*--------------------------------------------------------------------------
+   Include Files
+   ------------------------------------------------------------------------*/
+#include <cdf_types.h>
+#include <cdf_status.h>
+#include <cdf_event.h>
+#include "ani_global.h"
+
+/*--------------------------------------------------------------------------
+   Preprocessor definitions and constants
+   ------------------------------------------------------------------------*/
+#define CDS_DIGEST_SHA1_SIZE    (20)
+#define CDS_DIGEST_MD5_SIZE     (16)
+#define CDS_BAND_2GHZ          (1)
+#define CDS_BAND_5GHZ          (2)
+
+#define CDS_24_GHZ_BASE_FREQ   (2407)
+#define CDS_5_GHZ_BASE_FREQ    (5000)
+#define CDS_24_GHZ_CHANNEL_14  (14)
+#define CDS_24_GHZ_CHANNEL_15  (15)
+#define CDS_24_GHZ_CHANNEL_27  (27)
+#define CDS_5_GHZ_CHANNEL_170  (170)
+#define CDS_CHAN_SPACING_5MHZ  (5)
+#define CDS_CHAN_SPACING_20MHZ (20)
+#define CDS_CHAN_14_FREQ       (2484)
+#define CDS_CHAN_15_FREQ       (2512)
+#define CDS_CHAN_170_FREQ      (5852)
+
+#define cds_log(level, args...) CDF_TRACE(CDF_MODULE_ID_CDF, level, ## args)
+#define cds_logfl(level, format, args...) cds_log(level, FL(format), ## args)
+
+#define cds_alert(format, args...) \
+		cds_logfl(CDF_TRACE_LEVEL_FATAL, format, ## args)
+#define cds_err(format, args...) \
+		cds_logfl(CDF_TRACE_LEVEL_ERROR, format, ## args)
+#define cds_warn(format, args...) \
+		cds_logfl(CDF_TRACE_LEVEL_WARN, format, ## args)
+#define cds_notice(format, args...) \
+		cds_logfl(CDF_TRACE_LEVEL_INFO, format, ## args)
+#define cds_info(format, args...) \
+		cds_logfl(CDF_TRACE_LEVEL_INFO_HIGH, format, ## args)
+#define cds_debug(format, args...) \
+		cds_logfl(CDF_TRACE_LEVEL_DEBUG, format, ## args)
+/*--------------------------------------------------------------------------
+   Type declarations
+   ------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------
+   Function declarations and documenation
+   ------------------------------------------------------------------------*/
+
+CDF_STATUS cds_crypto_init(uint32_t *phCryptProv);
+
+CDF_STATUS cds_crypto_deinit(uint32_t hCryptProv);
+
+/**
+ * cds_rand_get_bytes
+
+ * FUNCTION:
+ * Returns cryptographically secure pseudo-random bytes.
+ *
+ *
+ * @param pbBuf - the caller allocated location where the bytes should be copied
+ * @param numBytes the number of bytes that should be generated and
+ * copied
+ *
+ * @return CDF_STATUS_SUCCSS if the operation succeeds
+ */
+CDF_STATUS cds_rand_get_bytes(uint32_t handle, uint8_t *pbBuf,
+			      uint32_t numBytes);
+
+/**
+ * cds_sha1_hmac_str
+ *
+ * FUNCTION:
+ * Generate the HMAC-SHA1 of a string given a key.
+ *
+ * LOGIC:
+ * Standard HMAC processing from RFC 2104. The code is provided in the
+ * appendix of the RFC.
+ *
+ * ASSUMPTIONS:
+ * The RFC is correct.
+ *
+ * @param text text to be hashed
+ * @param textLen length of text
+ * @param key key to use for HMAC
+ * @param keyLen length of key
+ * @param digest holds resultant SHA1 HMAC (20B)
+ *
+ * @return CDF_STATUS_SUCCSS if the operation succeeds
+ *
+ */
+CDF_STATUS cds_sha1_hmac_str(uint32_t cryptHandle,      /* Handle */
+			     uint8_t * text,    /* pointer to data stream */
+			     uint32_t textLen,  /* length of data stream */
+			     uint8_t * key,     /* pointer to authentication key */
+			     uint32_t keyLen,   /* length of authentication key */
+			     uint8_t digest[CDS_DIGEST_SHA1_SIZE]);     /* caller digest to be filled in */
+
+/**
+ * cds_md5_hmac_str
+ *
+ * FUNCTION:
+ * Generate the HMAC-MD5 of a string given a key.
+ *
+ * LOGIC:
+ * Standard HMAC processing from RFC 2104. The code is provided in the
+ * appendix of the RFC.
+ *
+ * ASSUMPTIONS:
+ * The RFC is correct.
+ *
+ * @param text text to be hashed
+ * @param textLen length of text
+ * @param key key to use for HMAC
+ * @param keyLen length of key
+ * @param digest holds resultant MD5 HMAC (16B)
+ *
+ * @return CDF_STATUS_SUCCSS if the operation succeeds
+ *
+ */
+CDF_STATUS cds_md5_hmac_str(uint32_t cryptHandle,       /* Handle */
+			    uint8_t * text,     /* pointer to data stream */
+			    uint32_t textLen,   /* length of data stream */
+			    uint8_t * key,      /* pointer to authentication key */
+			    uint32_t keyLen,    /* length of authentication key */
+			    uint8_t digest[CDS_DIGEST_MD5_SIZE]);       /* caller digest to be filled in */
+
+CDF_STATUS cds_encrypt_aes(uint32_t cryptHandle,        /* Handle */
+			   uint8_t *pText,      /* pointer to data stream */
+			   uint8_t *Encrypted, uint8_t *pKey);          /* pointer to authentication key */
+
+CDF_STATUS cds_decrypt_aes(uint32_t cryptHandle,        /* Handle */
+			   uint8_t *pText,      /* pointer to data stream */
+			   uint8_t *pDecrypted, uint8_t *pKey);         /* pointer to authentication key */
+
+uint32_t cds_chan_to_freq(uint8_t chan);
+uint8_t cds_freq_to_chan(uint32_t freq);
+uint8_t cds_chan_to_band(uint32_t chan);
+#ifdef WLAN_FEATURE_11W
+bool cds_is_mmie_valid(uint8_t *key, uint8_t *ipn,
+		       uint8_t *frm, uint8_t *efrm);
+bool cds_attach_mmie(uint8_t *igtk, uint8_t *ipn, uint16_t key_id,
+		     uint8_t *frm, uint8_t *efrm, uint16_t frmLen);
+uint8_t cds_get_mmie_size(void);
+#endif /* WLAN_FEATURE_11W */
+CDF_STATUS sme_send_flush_logs_cmd_to_fw(tpAniSirGlobal pMac);
+#endif /* #if !defined __CDS_UTILS_H */

+ 2085 - 0
core/cds/src/cds_api.c

@@ -0,0 +1,2085 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cds_api.c
+ *
+ * Connectivity driver services APIs
+ */
+
+#include <cds_mq.h>
+#include "cds_sched.h"
+#include <cds_api.h>
+#include "sir_types.h"
+#include "sir_api.h"
+#include "sir_mac_prot_def.h"
+#include "sme_api.h"
+#include "mac_init_api.h"
+#include "wlan_qct_sys.h"
+#include "wlan_hdd_misc.h"
+#include "i_cds_packet.h"
+#include "cds_reg_service.h"
+#include "wma_types.h"
+#include "wlan_hdd_main.h"
+#include <linux/vmalloc.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+
+#include "sap_api.h"
+#include "cdf_trace.h"
+#include "bmi.h"
+#include "ol_fw.h"
+#include "ol_if_athvar.h"
+#include "hif.h"
+
+#include "cds_utils.h"
+#include "wlan_logging_sock_svc.h"
+#include "wma.h"
+
+#include "wlan_hdd_ipa.h"
+/* Preprocessor Definitions and Constants */
+
+/* Maximum number of cds message queue get wrapper failures to cause panic */
+#define CDS_WRAPPER_MAX_FAIL_COUNT (CDS_CORE_MAX_MESSAGES * 3)
+
+#ifdef IPA_OFFLOAD
+#define CDS_IPA_CE_SR_BASE_PADDR                                               \
+	(&((hdd_context_t *)(gp_cds_context->pHDDContext))->ce_sr_base_paddr)
+#define CDS_IPA_CE_RING_SIZE                                                   \
+	(&((hdd_context_t *)(gp_cds_context->pHDDContext))->ce_sr_ring_size)
+#define CDS_IPA_CE_REG_PADDR                                                   \
+	(&((hdd_context_t *)(gp_cds_context->pHDDContext))->ce_reg_paddr)
+#define CDS_IPA_TX_COMP_BASE_PADDR                                             \
+	(&((hdd_context_t *)                                                   \
+		(gp_cds_context->pHDDContext))->tx_comp_ring_base_paddr)
+#define CDS_IPA_TX_COMP_RING_SIZE                                              \
+	(&((hdd_context_t *)(gp_cds_context->pHDDContext))->tx_comp_ring_size)
+#define CDS_IPA_TX_NUM_BUFF                                                    \
+	(&((hdd_context_t *)(gp_cds_context->pHDDContext))->tx_num_alloc_buffer)
+#define CDS_IPA_RX_RDY_RING_BASE_PADDR                                         \
+	(&((hdd_context_t *)                                                   \
+		(gp_cds_context->pHDDContext))->rx_rdy_ring_base_paddr)
+#define CDS_IPA_RX_RDY_RING_SIZE                                               \
+	(&((hdd_context_t *)(gp_cds_context->pHDDContext))->rx_rdy_ring_size)
+#define CDS_IPA_RX_PROC_DONE_IDX_PADDR                                         \
+	(&((hdd_context_t *)                                                   \
+		(gp_cds_context->pHDDContext))->rx_proc_done_idx_paddr)
+#else
+#define CDS_IPA_CE_SR_BASE_PADDR       (NULL)
+#define CDS_IPA_CE_RING_SIZE           (NULL)
+#define CDS_IPA_CE_REG_PADDR           (NULL)
+#define CDS_IPA_TX_COMP_BASE_PADDR     (NULL)
+#define CDS_IPA_TX_COMP_RING_SIZE      (NULL)
+#define CDS_IPA_TX_NUM_BUFF            (NULL)
+#define CDS_IPA_RX_RDY_RING_BASE_PADDR (NULL)
+#define CDS_IPA_RX_RDY_RING_SIZE       (NULL)
+#define CDS_IPA_RX_PROC_DONE_IDX_PADDR (NULL)
+#endif /* IPA_OFFLOAD */
+
+/* Data definitions */
+static cds_context_type g_cds_context;
+static p_cds_contextType gp_cds_context;
+static struct __cdf_device g_cdf_ctx;
+
+/* Debug variable to detect MC thread stuck */
+static atomic_t cds_wrapper_empty_count;
+
+static uint8_t cds_multicast_logging;
+
+void cds_sys_probe_thread_cback(void *pUserData);
+
+/**
+ * cds_alloc_global_context() - allocate CDS global context
+ * @p_cds_context: A pointer to where to store the CDS Context
+ *
+ * cds_alloc_global_context() function allocates the CDS global Context,
+ * but does not initialize all the members. This overal initialization will
+ * happen at cds_open().
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_alloc_global_context(v_CONTEXT_t *p_cds_context)
+{
+	if (p_cds_context == NULL)
+		return CDF_STATUS_E_FAILURE;
+
+	/* allocate the CDS Context */
+	*p_cds_context = NULL;
+	gp_cds_context = &g_cds_context;
+
+	cdf_mem_zero(gp_cds_context, sizeof(cds_context_type));
+	*p_cds_context = gp_cds_context;
+
+	gp_cds_context->cdf_ctx = &g_cdf_ctx;
+	cdf_mem_zero(&g_cdf_ctx, sizeof(g_cdf_ctx));
+
+	/* initialize the spinlock */
+	cdf_trace_spin_lock_init();
+	/* it is the right time to initialize MTRACE structures */
+#if defined(TRACE_RECORD)
+	cdf_trace_init();
+#endif
+
+	cdf_dp_trace_init();
+	return CDF_STATUS_SUCCESS;
+} /* cds_alloc_global_context() */
+
+/**
+ * cds_free_global_context() - free CDS global context
+ * @p_cds_context: A pointer to where the CDS Context was stored
+ *
+ * cds_free_global_context() function frees the CDS Context.
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_free_global_context(v_CONTEXT_t *p_cds_context)
+{
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: De-allocating the CDS Context", __func__);
+
+	if ((p_cds_context == NULL) || (*p_cds_context == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: vOS Context is Null", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if (gp_cds_context != *p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Context mismatch", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+	gp_cds_context->cdf_ctx = NULL;
+	*p_cds_context = gp_cds_context = NULL;
+
+	return CDF_STATUS_SUCCESS;
+} /* cds_free_global_context() */
+
+#ifdef WLAN_FEATURE_NAN
+/**
+ * cds_set_nan_enable() - set nan enable flag in mac open param
+ * @wma_handle: Pointer to mac open param
+ * @hdd_ctx: Pointer to hdd context
+ *
+ * Return: none
+ */
+static void cds_set_nan_enable(tMacOpenParameters *param,
+					hdd_context_t *hdd_ctx)
+{
+	param->is_nan_enabled = hdd_ctx->config->enable_nan_support;
+}
+#else
+static void cds_set_nan_enable(tMacOpenParameters *param,
+					hdd_context_t *pHddCtx)
+{
+}
+#endif
+
+/**
+ * cds_open() - open the CDS Module
+ * @p_cds_context: A pointer to where the CDS Context was stored
+ * @hddContextSize: Size of the HDD context to allocate.
+ *
+ * cds_open() function opens the CDS Scheduler
+ * Upon successful initialization:
+ * - All CDS submodules should have been initialized
+ *
+ * - The CDS scheduler should have opened
+ *
+ * - All the WLAN SW components should have been opened. This includes
+ * SYS, MAC, SME, WMA and TL.
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_open(v_CONTEXT_t *p_cds_context, uint32_t hddContextSize)
+{
+	CDF_STATUS cdf_status = CDF_STATUS_SUCCESS;
+	int iter = 0;
+	tSirRetStatus sirStatus = eSIR_SUCCESS;
+	tMacOpenParameters mac_openParms;
+	cdf_device_t cdf_ctx;
+	HTC_INIT_INFO htcInfo;
+	struct ol_softc *scn;
+	void *HTCHandle;
+	hdd_context_t *pHddCtx;
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Opening CDS", __func__);
+
+	if (NULL == gp_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Trying to open CDS without a PreOpen", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	/* Initialize the timer module */
+	cdf_timer_module_init();
+
+	/* Initialize bug reporting structure */
+	cds_init_log_completion();
+
+	/* Initialize the probe event */
+	if (cdf_event_init(&gp_cds_context->ProbeEvent) != CDF_STATUS_SUCCESS) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Unable to init probeEvent", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+	if (cdf_event_init(&(gp_cds_context->wmaCompleteEvent)) !=
+	    CDF_STATUS_SUCCESS) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Unable to init wmaCompleteEvent", __func__);
+		CDF_ASSERT(0);
+		goto err_probe_event;
+	}
+
+	/* Initialize the free message queue */
+	cdf_status = cds_mq_init(&gp_cds_context->freeVosMq);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		/* Critical Error ...  Cannot proceed further */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to initialize CDS free message queue",
+			  __func__);
+		CDF_ASSERT(0);
+		goto err_wma_complete_event;
+	}
+
+	for (iter = 0; iter < CDS_CORE_MAX_MESSAGES; iter++) {
+		(gp_cds_context->aMsgWrappers[iter]).pVosMsg =
+			&(gp_cds_context->aMsgBuffers[iter]);
+		INIT_LIST_HEAD(&gp_cds_context->aMsgWrappers[iter].msgNode);
+		cds_mq_put(&gp_cds_context->freeVosMq,
+			   &(gp_cds_context->aMsgWrappers[iter]));
+	}
+
+	/* Now Open the CDS Scheduler */
+	cdf_status = cds_sched_open(gp_cds_context, &gp_cds_context->cdf_sched,
+				    sizeof(cds_sched_context));
+
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		/* Critical Error ...  Cannot proceed further */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to open CDS Scheduler", __func__);
+		CDF_ASSERT(0);
+		goto err_msg_queue;
+	}
+
+	pHddCtx = (hdd_context_t *) (gp_cds_context->pHDDContext);
+	if ((NULL == pHddCtx) || (NULL == pHddCtx->config)) {
+		/* Critical Error ...  Cannot proceed further */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Hdd Context is Null", __func__);
+		CDF_ASSERT(0);
+		goto err_sched_close;
+	}
+
+	scn = cds_get_context(CDF_MODULE_ID_HIF);
+	if (!scn) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: scn is null!", __func__);
+		goto err_sched_close;
+	}
+	scn->enableuartprint = pHddCtx->config->enablefwprint;
+	scn->enablefwlog = pHddCtx->config->enablefwlog;
+	scn->max_no_of_peers = pHddCtx->config->maxNumberOfPeers;
+#ifdef WLAN_FEATURE_LPSS
+	scn->enablelpasssupport = pHddCtx->config->enablelpasssupport;
+#endif
+	scn->enable_ramdump_collection =
+				pHddCtx->config->is_ramdump_enabled;
+	scn->enable_self_recovery = pHddCtx->config->enableSelfRecovery;
+
+	/* Initialize BMI and Download firmware */
+	cdf_status = bmi_download_firmware(scn);
+	if (cdf_status != CDF_STATUS_SUCCESS) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "BMI FIALED status:%d", cdf_status);
+		goto err_bmi_close;
+	}
+
+	htcInfo.pContext = gp_cds_context->pHIFContext;
+	htcInfo.TargetFailure = ol_target_failure;
+	htcInfo.TargetSendSuspendComplete = wma_target_suspend_acknowledge;
+	cdf_ctx = cds_get_context(CDF_MODULE_ID_CDF_DEVICE);
+
+	/* Create HTC */
+	gp_cds_context->htc_ctx =
+		htc_create(htcInfo.pContext, &htcInfo, cdf_ctx);
+	if (!gp_cds_context->htc_ctx) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to Create HTC", __func__);
+		goto err_bmi_close;
+	}
+
+	if (bmi_done(scn)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to complete BMI phase", __func__);
+		goto err_htc_close;
+	}
+
+	/*
+	** Need to open WMA first because it calls WDI_Init, which calls wpalOpen
+	** The reason that is needed becasue cds_packet_open need to use PAL APIs
+	*/
+
+	/*Open the WMA module */
+	cdf_mem_set(&mac_openParms, sizeof(mac_openParms), 0);
+	/* UMA is supported in hardware for performing the
+	** frame translation 802.11 <-> 802.3
+	*/
+	mac_openParms.frameTransRequired = 1;
+	mac_openParms.driverType = eDRIVER_TYPE_PRODUCTION;
+	mac_openParms.powersaveOffloadEnabled =
+		pHddCtx->config->enablePowersaveOffload;
+	mac_openParms.staDynamicDtim = pHddCtx->config->enableDynamicDTIM;
+	mac_openParms.staModDtim = pHddCtx->config->enableModulatedDTIM;
+	mac_openParms.staMaxLIModDtim = pHddCtx->config->fMaxLIModulatedDTIM;
+	mac_openParms.wowEnable = pHddCtx->config->wowEnable;
+	mac_openParms.maxWoWFilters = pHddCtx->config->maxWoWFilters;
+	/* Here olIniInfo is used to store ini status of arp offload
+	 * ns offload and others. Currently 1st bit is used for arp
+	 * off load and 2nd bit for ns offload currently, rest bits are unused
+	 */
+	if (pHddCtx->config->fhostArpOffload)
+		mac_openParms.olIniInfo = mac_openParms.olIniInfo | 0x1;
+	if (pHddCtx->config->fhostNSOffload)
+		mac_openParms.olIniInfo = mac_openParms.olIniInfo | 0x2;
+	/*
+	 * Copy the DFS Phyerr Filtering Offload status.
+	 * This parameter reflects the value of the
+	 * dfsPhyerrFilterOffload flag  as set in the ini.
+	 */
+	mac_openParms.dfsPhyerrFilterOffload =
+		pHddCtx->config->fDfsPhyerrFilterOffload;
+	if (pHddCtx->config->ssdp)
+		mac_openParms.ssdp = pHddCtx->config->ssdp;
+#ifdef FEATURE_WLAN_RA_FILTERING
+	mac_openParms.RArateLimitInterval =
+		pHddCtx->config->RArateLimitInterval;
+	mac_openParms.IsRArateLimitEnabled =
+		pHddCtx->config->IsRArateLimitEnabled;
+#endif
+
+	mac_openParms.apMaxOffloadPeers = pHddCtx->config->apMaxOffloadPeers;
+
+	mac_openParms.apMaxOffloadReorderBuffs =
+		pHddCtx->config->apMaxOffloadReorderBuffs;
+
+	mac_openParms.apDisableIntraBssFwd =
+		pHddCtx->config->apDisableIntraBssFwd;
+
+	mac_openParms.dfsRadarPriMultiplier =
+		pHddCtx->config->dfsRadarPriMultiplier;
+	mac_openParms.reorderOffload = pHddCtx->config->reorderOffloadSupport;
+
+	/* IPA micro controller data path offload resource config item */
+	mac_openParms.ucOffloadEnabled = hdd_ipa_uc_is_enabled(pHddCtx);
+	mac_openParms.ucTxBufCount = pHddCtx->config->IpaUcTxBufCount;
+	mac_openParms.ucTxBufSize = pHddCtx->config->IpaUcTxBufSize;
+	mac_openParms.ucRxIndRingCount = pHddCtx->config->IpaUcRxIndRingCount;
+	mac_openParms.ucTxPartitionBase = pHddCtx->config->IpaUcTxPartitionBase;
+	mac_openParms.max_scan = pHddCtx->config->max_scan_count;
+
+	mac_openParms.ip_tcp_udp_checksum_offload =
+			pHddCtx->config->enable_ip_tcp_udp_checksum_offload;
+	mac_openParms.enable_rxthread = pHddCtx->config->enableRxThread;
+	mac_openParms.ce_classify_enabled =
+				pHddCtx->config->ce_classify_enabled;
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	mac_openParms.tx_flow_stop_queue_th =
+				pHddCtx->config->TxFlowStopQueueThreshold;
+	mac_openParms.tx_flow_start_queue_offset =
+				pHddCtx->config->TxFlowStartQueueOffset;
+#endif
+	cds_set_nan_enable(&mac_openParms, pHddCtx);
+
+	mac_openParms.tx_chain_mask_cck = pHddCtx->config->tx_chain_mask_cck;
+	mac_openParms.self_gen_frm_pwr = pHddCtx->config->self_gen_frm_pwr;
+
+	cdf_status = wma_open(gp_cds_context,
+			      hdd_update_tgt_cfg,
+			      hdd_dfs_indicate_radar, &mac_openParms);
+
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		/* Critical Error ...  Cannot proceed further */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to open WMA module", __func__);
+		CDF_ASSERT(0);
+		goto err_htc_close;
+	}
+
+	/* Number of peers limit differs in each chip version. If peer max
+	 * limit configured in ini exceeds more than supported, WMA adjusts
+	 * and keeps correct limit in mac_openParms.maxStation. So, make sure
+	 * config entry pHddCtx->config->maxNumberOfPeers has adjusted value
+	 */
+	pHddCtx->config->maxNumberOfPeers = mac_openParms.maxStation;
+	HTCHandle = cds_get_context(CDF_MODULE_ID_HTC);
+	if (!HTCHandle) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: HTCHandle is null!", __func__);
+		goto err_wma_close;
+	}
+	if (htc_wait_target(HTCHandle)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to complete BMI phase", __func__);
+		goto err_wma_close;
+	}
+
+	/* Now proceed to open the MAC */
+
+	/* UMA is supported in hardware for performing the
+	 * frame translation 802.11 <-> 802.3
+	 */
+	mac_openParms.frameTransRequired = 1;
+
+	sirStatus =
+		mac_open(&(gp_cds_context->pMACContext), gp_cds_context->pHDDContext,
+			 &mac_openParms);
+
+	if (eSIR_SUCCESS != sirStatus) {
+		/* Critical Error ...  Cannot proceed further */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to open MAC", __func__);
+		CDF_ASSERT(0);
+		goto err_wma_close;
+	}
+
+	/* Now proceed to open the SME */
+	cdf_status = sme_open(gp_cds_context->pMACContext);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		/* Critical Error ...  Cannot proceed further */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to open SME", __func__);
+		CDF_ASSERT(0);
+		goto err_mac_close;
+	}
+
+	gp_cds_context->pdev_txrx_ctx =
+		ol_txrx_pdev_alloc(gp_cds_context->cfg_ctx,
+				    gp_cds_context->htc_ctx,
+				    gp_cds_context->cdf_ctx);
+	if (!gp_cds_context->pdev_txrx_ctx) {
+		/* Critical Error ...  Cannot proceed further */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to open TXRX", __func__);
+		CDF_ASSERT(0);
+		goto err_sme_close;
+	}
+
+	ol_txrx_ipa_uc_get_resource(gp_cds_context->pdev_txrx_ctx,
+				CDS_IPA_CE_SR_BASE_PADDR,
+				CDS_IPA_CE_RING_SIZE,
+				CDS_IPA_CE_REG_PADDR,
+				CDS_IPA_TX_COMP_BASE_PADDR,
+				CDS_IPA_TX_COMP_RING_SIZE,
+				CDS_IPA_TX_NUM_BUFF,
+				CDS_IPA_RX_RDY_RING_BASE_PADDR,
+				CDS_IPA_RX_RDY_RING_SIZE,
+				CDS_IPA_RX_PROC_DONE_IDX_PADDR);
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: CDS successfully Opened", __func__);
+
+	*p_cds_context = gp_cds_context;
+
+	return CDF_STATUS_SUCCESS;
+
+err_sme_close:
+	sme_close(gp_cds_context->pMACContext);
+
+err_mac_close:
+	mac_close(gp_cds_context->pMACContext);
+
+err_wma_close:
+	wma_close(gp_cds_context);
+
+	wma_wmi_service_close(gp_cds_context);
+
+err_htc_close:
+	if (gp_cds_context->htc_ctx) {
+		htc_destroy(gp_cds_context->htc_ctx);
+		gp_cds_context->htc_ctx = NULL;
+	}
+
+err_bmi_close:
+	bmi_cleanup(scn);
+
+err_sched_close:
+	cds_sched_close(gp_cds_context);
+
+err_msg_queue:
+	cds_mq_deinit(&gp_cds_context->freeVosMq);
+
+err_wma_complete_event:
+	cdf_event_destroy(&gp_cds_context->wmaCompleteEvent);
+
+err_probe_event:
+	cdf_event_destroy(&gp_cds_context->ProbeEvent);
+
+	return CDF_STATUS_E_FAILURE;
+} /* cds_open() */
+
+/**
+ * cds_pre_enable() - pre enable cds
+ * @cds_context: CDS context
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_pre_enable(v_CONTEXT_t cds_context)
+{
+	CDF_STATUS cdf_status = CDF_STATUS_SUCCESS;
+	p_cds_contextType p_cds_context = (p_cds_contextType) cds_context;
+	void *scn;
+	CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_INFO, "cds prestart");
+
+	if (gp_cds_context != p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Context mismatch", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	if (p_cds_context->pMACContext == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: MAC NULL context", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	if (p_cds_context->pWMAContext == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: WMA NULL context", __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	scn = cds_get_context(CDF_MODULE_ID_HIF);
+	if (!scn) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: scn is null!", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	/* Reset wma wait event */
+	cdf_event_reset(&gp_cds_context->wmaCompleteEvent);
+
+	/*call WMA pre start */
+	cdf_status = wma_pre_start(gp_cds_context);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_FATAL,
+			  "Failed to WMA prestart");
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	/* Need to update time out of complete */
+	cdf_status = cdf_wait_single_event(&gp_cds_context->wmaCompleteEvent,
+					   CDS_WMA_TIMEOUT);
+	if (cdf_status != CDF_STATUS_SUCCESS) {
+		if (cdf_status == CDF_STATUS_E_TIMEOUT) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: Timeout occurred before WMA complete",
+				  __func__);
+		} else {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: wma_pre_start reporting other error",
+				  __func__);
+		}
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Test MC thread by posting a probe message to SYS",
+			  __func__);
+		wlan_sys_probe();
+
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	cdf_status = htc_start(gp_cds_context->htc_ctx);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_FATAL,
+			  "Failed to Start HTC");
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+	cdf_status = wma_wait_for_ready_event(gp_cds_context->pWMAContext);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "Failed to get ready event from target firmware");
+		htc_set_target_to_sleep(scn);
+		htc_stop(gp_cds_context->htc_ctx);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if (ol_txrx_pdev_attach(gp_cds_context->pdev_txrx_ctx)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			"Failed to attach pdev");
+		htc_set_target_to_sleep(scn);
+		htc_stop(gp_cds_context->htc_ctx);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	htc_set_target_to_sleep(scn);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_enable() - start/enable cds module
+ * @cds_context: CDS context
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_enable(v_CONTEXT_t cds_context)
+{
+	CDF_STATUS cdf_status = CDF_STATUS_SUCCESS;
+	tSirRetStatus sirStatus = eSIR_SUCCESS;
+	p_cds_contextType p_cds_context = (p_cds_contextType) cds_context;
+	tHalMacStartParameters halStartParams;
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: Starting Libra SW", __func__);
+
+	/* We support only one instance for now ... */
+	if (gp_cds_context != p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: mismatch in context", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if ((p_cds_context->pWMAContext == NULL) ||
+	    (p_cds_context->pMACContext == NULL)) {
+		if (p_cds_context->pWMAContext == NULL)
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: WMA NULL context", __func__);
+		else
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: MAC NULL context", __func__);
+
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	/* Start the wma */
+	cdf_status = wma_start(p_cds_context);
+	if (cdf_status != CDF_STATUS_SUCCESS) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to start wma", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: wma correctly started", __func__);
+
+	/* Start the MAC */
+	cdf_mem_zero(&halStartParams,
+		     sizeof(tHalMacStartParameters));
+
+	/* Start the MAC */
+	sirStatus =
+		mac_start(p_cds_context->pMACContext, &halStartParams);
+
+	if (eSIR_SUCCESS != sirStatus) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to start MAC", __func__);
+		goto err_wma_stop;
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: MAC correctly started", __func__);
+
+	/* START SME */
+	cdf_status = sme_start(p_cds_context->pMACContext);
+
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Failed to start SME", __func__);
+		goto err_mac_stop;
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: SME correctly started", __func__);
+
+	if (ol_txrx_pdev_attach_target
+		       (p_cds_context->pdev_txrx_ctx) != A_OK) {
+	   CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+				"%s: Failed attach target", __func__);
+	   goto err_sme_stop;
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "TL correctly started");
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: CDS Start is successful!!", __func__);
+
+	return CDF_STATUS_SUCCESS;
+
+err_sme_stop:
+	sme_stop(p_cds_context->pMACContext, HAL_STOP_TYPE_SYS_RESET);
+
+err_mac_stop:
+	mac_stop(p_cds_context->pMACContext, HAL_STOP_TYPE_SYS_RESET);
+
+err_wma_stop:
+	cdf_event_reset(&(gp_cds_context->wmaCompleteEvent));
+	cdf_status = wma_stop(p_cds_context, HAL_STOP_TYPE_RF_KILL);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to stop wma", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+		wma_setneedshutdown(cds_context);
+	} else {
+		cdf_status =
+			cdf_wait_single_event(&(gp_cds_context->wmaCompleteEvent),
+					      CDS_WMA_TIMEOUT);
+		if (cdf_status != CDF_STATUS_SUCCESS) {
+			if (cdf_status == CDF_STATUS_E_TIMEOUT) {
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_FATAL,
+					  "%s: Timeout occurred before WMA_stop complete",
+					  __func__);
+			} else {
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_FATAL,
+					  "%s: WMA_stop reporting other error",
+					  __func__);
+			}
+			CDF_ASSERT(0);
+			wma_setneedshutdown(cds_context);
+		}
+	}
+
+	return CDF_STATUS_E_FAILURE;
+} /* cds_enable() */
+
+/**
+ * cds_disable() - stop/disable cds module
+ * @cds_context: CDS context
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_disable(v_CONTEXT_t cds_context)
+{
+	CDF_STATUS cdf_status;
+
+	/* wma_stop is called before the SYS so that the processing of target
+	 * pending responses will not be handled during uninitialization of
+	 * WLAN driver
+	 */
+	cdf_event_reset(&(gp_cds_context->wmaCompleteEvent));
+
+	cdf_status = wma_stop(cds_context, HAL_STOP_TYPE_RF_KILL);
+
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to stop wma", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+		wma_setneedshutdown(cds_context);
+	}
+
+	hif_disable_isr(((cds_context_type *) cds_context)->pHIFContext);
+	hif_reset_soc(((cds_context_type *) cds_context)->pHIFContext);
+
+	/* SYS STOP will stop SME and MAC */
+	cdf_status = sys_stop(cds_context);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to stop SYS", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_close() - close cds module
+ * @cds_context: CDS context
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_close(v_CONTEXT_t cds_context)
+{
+	CDF_STATUS cdf_status;
+
+	if (gp_cds_context->htc_ctx) {
+		htc_stop(gp_cds_context->htc_ctx);
+		htc_destroy(gp_cds_context->htc_ctx);
+		gp_cds_context->htc_ctx = NULL;
+	}
+
+	ol_txrx_pdev_detach(gp_cds_context->pdev_txrx_ctx, 1);
+	cds_free_context(cds_context, CDF_MODULE_ID_TXRX,
+			 gp_cds_context->pdev_txrx_ctx);
+
+	cdf_status = sme_close(((p_cds_contextType) cds_context)->pMACContext);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to close SME", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	cdf_status = mac_close(((p_cds_contextType) cds_context)->pMACContext);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to close MAC", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	((p_cds_contextType) cds_context)->pMACContext = NULL;
+
+	if (true == wma_needshutdown(cds_context)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: Failed to shutdown wma", __func__);
+	} else {
+		cdf_status = wma_close(cds_context);
+		if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: Failed to close wma", __func__);
+			CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+		}
+	}
+
+	cdf_status = wma_wmi_service_close(cds_context);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to close wma_wmi_service", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	cds_mq_deinit(&((p_cds_contextType) cds_context)->freeVosMq);
+
+	cdf_status = cdf_event_destroy(&gp_cds_context->wmaCompleteEvent);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: failed to destroy wmaCompleteEvent", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	cdf_status = cdf_event_destroy(&gp_cds_context->ProbeEvent);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: failed to destroy ProbeEvent", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	cds_deinit_log_completion();
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_get_context() - get context data area
+ *
+ * @moduleId: ID of the module who's context data is being retrived.
+ *
+ * Each module in the system has a context / data area that is allocated
+ * and managed by CDS.  This API allows any user to get a pointer to its
+ * allocated context data area from the CDS global context.
+ *
+ * Return: pointer to the context data area of the module ID
+ *	   specified, or NULL if the context data is not allocated for
+ *	   the module ID specified
+ */
+void *cds_get_context(CDF_MODULE_ID moduleId)
+{
+	void *pModContext = NULL;
+
+	if (gp_cds_context == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: cds context pointer is null", __func__);
+		return NULL;
+	}
+
+	switch (moduleId) {
+#ifndef WLAN_FEATURE_MBSSID
+	case CDF_MODULE_ID_SAP:
+	{
+		pModContext = gp_cds_context->pSAPContext;
+		break;
+	}
+#endif
+
+	case CDF_MODULE_ID_HDD:
+	{
+		pModContext = gp_cds_context->pHDDContext;
+		break;
+	}
+
+	case CDF_MODULE_ID_SME:
+	case CDF_MODULE_ID_PE:
+	{
+		/* In all these cases, we just return the MAC Context */
+		pModContext = gp_cds_context->pMACContext;
+		break;
+	}
+
+	case CDF_MODULE_ID_WMA:
+	{
+		/* For wma module */
+		pModContext = gp_cds_context->pWMAContext;
+		break;
+	}
+
+	case CDF_MODULE_ID_CDF:
+	{
+		/* For SYS this is CDS itself */
+		pModContext = gp_cds_context;
+		break;
+	}
+
+	case CDF_MODULE_ID_HIF:
+	{
+		pModContext = gp_cds_context->pHIFContext;
+		break;
+	}
+
+	case CDF_MODULE_ID_HTC:
+	{
+		pModContext = gp_cds_context->htc_ctx;
+		break;
+	}
+
+	case CDF_MODULE_ID_CDF_DEVICE:
+	{
+		pModContext = gp_cds_context->cdf_ctx;
+		break;
+	}
+
+	case CDF_MODULE_ID_TXRX:
+	{
+		pModContext = gp_cds_context->pdev_txrx_ctx;
+		break;
+	}
+
+	case CDF_MODULE_ID_CFG:
+	{
+		pModContext = gp_cds_context->cfg_ctx;
+		break;
+	}
+
+	default:
+	{
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Module ID %i does not have its context maintained by CDS",
+			  __func__, moduleId);
+		CDF_ASSERT(0);
+		return NULL;
+	}
+	}
+
+	if (pModContext == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Module ID %i context is Null", __func__,
+			  moduleId);
+	}
+
+	return pModContext;
+} /* cds_get_context() */
+
+/**
+ * cds_get_global_context() - get CDS global Context
+ *
+ * This API allows any user to get the CDS Global Context pointer from a
+ * module context data area.
+ *
+ * Return: pointer to the CDS global context, NULL if the function is
+ *	   unable to retreive the CDS context.
+ */
+v_CONTEXT_t cds_get_global_context(void)
+{
+	if (gp_cds_context == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: global cds context is NULL", __func__);
+	}
+
+	return gp_cds_context;
+} /* cds_get_global_context() */
+
+/**
+ * cds_is_logp_in_progress() - check if ssr/self recovery is going on
+ *
+ * Return: true if ssr/self recvoery is going on else false
+ */
+uint8_t cds_is_logp_in_progress(void)
+{
+	if (gp_cds_context == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: global cds context is NULL", __func__);
+		return 1;
+	}
+
+	return gp_cds_context->isLogpInProgress;
+}
+
+/**
+ * cds_set_logp_in_progress() - set ssr/self recovery in progress
+ * @value: value to set
+ *
+ * Return: none
+ */
+void cds_set_logp_in_progress(uint8_t value)
+{
+	hdd_context_t *pHddCtx = NULL;
+
+	if (gp_cds_context == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: global cds context is NULL", __func__);
+		return;
+	}
+	gp_cds_context->isLogpInProgress = value;
+
+	/* HDD uses it's own context variable to check if SSR in progress,
+	 * instead of modifying all HDD APIs set the HDD context variable
+	 * here
+	 */
+	pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (!pHddCtx) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: HDD context is Null", __func__);
+		return;
+	}
+	pHddCtx->isLogpInProgress = value;
+}
+
+/**
+ * cds_is_load_unload_in_progress() - check if driver load/unload in progress
+ *
+ * Return: true if load/unload is going on else false
+ */
+uint8_t cds_is_load_unload_in_progress(void)
+{
+	if (gp_cds_context == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: global cds context is NULL", __func__);
+		return 0;
+	}
+
+	return gp_cds_context->isLoadUnloadInProgress;
+}
+
+/**
+ * cds_set_load_unload_in_progress() - set load/unload in progress
+ * @value: value to set
+ *
+ * Return: none
+ */
+void cds_set_load_unload_in_progress(uint8_t value)
+{
+	if (gp_cds_context == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: global cds context is NULL", __func__);
+		return;
+	}
+	gp_cds_context->isLoadUnloadInProgress = value;
+
+#ifdef CONFIG_CNSS
+	if (value)
+		cnss_set_driver_status(CNSS_LOAD_UNLOAD);
+	else
+		cnss_set_driver_status(CNSS_INITIALIZED);
+#endif
+}
+
+/**
+ * cds_alloc_context() - allocate a context within the CDS global Context
+ * @p_cds_context: pointer to the global Vos context
+ * @moduleId: module ID who's context area is being allocated.
+ * @ppModuleContext: pointer to location where the pointer to the
+ *	allocated context is returned. Note this output pointer
+ *	is valid only if the API returns CDF_STATUS_SUCCESS
+ * @param size: size of the context area to be allocated.
+ *
+ * This API allows any user to allocate a user context area within the
+ * CDS Global Context.
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_alloc_context(void *p_cds_context, CDF_MODULE_ID moduleID,
+			     void **ppModuleContext, uint32_t size)
+{
+	void **pGpModContext = NULL;
+
+	if (p_cds_context == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: cds context is null", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if ((gp_cds_context != p_cds_context) || (ppModuleContext == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: context mismatch or null param passed",
+			  __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	switch (moduleID) {
+
+#ifndef WLAN_FEATURE_MBSSID
+	case CDF_MODULE_ID_SAP:
+	{
+		pGpModContext = &(gp_cds_context->pSAPContext);
+		break;
+	}
+#endif
+
+	case CDF_MODULE_ID_WMA:
+	{
+		pGpModContext = &(gp_cds_context->pWMAContext);
+		break;
+	}
+
+	case CDF_MODULE_ID_HIF:
+	{
+		pGpModContext = &(gp_cds_context->pHIFContext);
+		break;
+	}
+
+	case CDF_MODULE_ID_EPPING:
+	{
+		pGpModContext = &(gp_cds_context->epping_ctx);
+		break;
+	}
+	case CDF_MODULE_ID_SME:
+	case CDF_MODULE_ID_PE:
+	case CDF_MODULE_ID_HDD:
+	case CDF_MODULE_ID_HDD_SOFTAP:
+	default:
+	{
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Module ID %i "
+			  "does not have its context allocated by CDS",
+			  __func__, moduleID);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+	}
+
+	if (NULL != *pGpModContext) {
+		/* Context has already been allocated!
+		 * Prevent double allocation
+		 */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Module ID %i context has already been allocated",
+			  __func__, moduleID);
+		return CDF_STATUS_E_EXISTS;
+	}
+
+	/* Dynamically allocate the context for module */
+
+	*ppModuleContext = cdf_mem_malloc(size);
+
+	if (*ppModuleContext == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to " "allocate Context for module ID %i",
+			  __func__, moduleID);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_NOMEM;
+	}
+
+	if (moduleID == CDF_MODULE_ID_TLSHIM)
+		cdf_mem_zero(*ppModuleContext, size);
+
+	*pGpModContext = *ppModuleContext;
+
+	return CDF_STATUS_SUCCESS;
+} /* cds_alloc_context() */
+
+/**
+ * cds_free_context() - free an allocated context within the
+ *			CDS global Context
+ * @p_cds_context: pointer to the global Vos context
+ * @moduleId: module ID who's context area is being free
+ * @pModuleContext: pointer to module context area to be free'd.
+ *
+ *  This API allows a user to free the user context area within the
+ *  CDS Global Context.
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_free_context(void *p_cds_context, CDF_MODULE_ID moduleID,
+			    void *pModuleContext)
+{
+	void **pGpModContext = NULL;
+
+	if ((p_cds_context == NULL) || (gp_cds_context != p_cds_context) ||
+	    (pModuleContext == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params or context mismatch", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	switch (moduleID) {
+#ifndef WLAN_FEATURE_MBSSID
+	case CDF_MODULE_ID_SAP:
+	{
+		pGpModContext = &(gp_cds_context->pSAPContext);
+		break;
+	}
+#endif
+
+	case CDF_MODULE_ID_WMA:
+	{
+		pGpModContext = &(gp_cds_context->pWMAContext);
+		break;
+	}
+
+	case CDF_MODULE_ID_HIF:
+	{
+		pGpModContext = &(gp_cds_context->pHIFContext);
+		break;
+	}
+
+	case CDF_MODULE_ID_EPPING:
+	{
+		pGpModContext = &(gp_cds_context->epping_ctx);
+		break;
+	}
+
+	case CDF_MODULE_ID_TXRX:
+	{
+		pGpModContext = &(gp_cds_context->pdev_txrx_ctx);
+		break;
+	}
+
+	case CDF_MODULE_ID_HDD:
+	case CDF_MODULE_ID_SME:
+	case CDF_MODULE_ID_PE:
+	case CDF_MODULE_ID_HDD_SOFTAP:
+	default:
+	{
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Module ID %i "
+			  "does not have its context allocated by CDS",
+			  __func__, moduleID);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_INVAL;
+	}
+	}
+
+	if (NULL == *pGpModContext) {
+		/* Context has not been allocated or freed already! */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Module ID %i "
+			  "context has not been allocated or freed already",
+			  __func__, moduleID);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if (*pGpModContext != pModuleContext) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: pGpModContext != pModuleContext", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if (pModuleContext != NULL)
+		cdf_mem_free(pModuleContext);
+
+	*pGpModContext = NULL;
+
+	return CDF_STATUS_SUCCESS;
+} /* cds_free_context() */
+
+/**
+ * cds_mq_post_message() - post a message to a message queue
+ * @msgQueueId: identifies the message queue upon which the message
+ *	will be posted.
+ * @message: a pointer to a message buffer. Memory for this message
+ *	buffer is allocated by the caller and free'd by the CDF after the
+ *	message is posted to the message queue.  If the consumer of the
+ *	message needs anything in this message, it needs to copy the contents
+ *	before returning from the message queue handler.
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_mq_post_message(CDS_MQ_ID msgQueueId, cds_msg_t *pMsg)
+{
+	p_cds_mq_type pTargetMq = NULL;
+	p_cds_msg_wrapper pMsgWrapper = NULL;
+	uint32_t debug_count = 0;
+
+	if ((gp_cds_context == NULL) || (pMsg == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params or global cds context is null",
+			  __func__);
+		CDF_ASSERT(0);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	switch (msgQueueId) {
+	/* Message Queue ID for messages bound for SME */
+	case CDS_MQ_ID_SME:
+	{
+		pTargetMq = &(gp_cds_context->cdf_sched.smeMcMq);
+		break;
+	}
+
+	/* Message Queue ID for messages bound for PE */
+	case CDS_MQ_ID_PE:
+	{
+		pTargetMq = &(gp_cds_context->cdf_sched.peMcMq);
+		break;
+	}
+
+	/* Message Queue ID for messages bound for wma */
+	case CDS_MQ_ID_WMA:
+	{
+		pTargetMq = &(gp_cds_context->cdf_sched.wmaMcMq);
+		break;
+	}
+
+	/* Message Queue ID for messages bound for the SYS module */
+	case CDS_MQ_ID_SYS:
+	{
+		pTargetMq = &(gp_cds_context->cdf_sched.sysMcMq);
+		break;
+	}
+
+	default:
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("%s: Trying to queue msg into unknown MC Msg queue ID %d"),
+			  __func__, msgQueueId);
+
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	CDF_ASSERT(NULL != pTargetMq);
+	if (pTargetMq == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: pTargetMq == NULL", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	/* Try and get a free Msg wrapper */
+	pMsgWrapper = cds_mq_get(&gp_cds_context->freeVosMq);
+
+	if (NULL == pMsgWrapper) {
+		debug_count = atomic_inc_return(&cds_wrapper_empty_count);
+		if (1 == debug_count)
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				"%s: CDS Core run out of message wrapper %d",
+				__func__, debug_count);
+
+		if (CDS_WRAPPER_MAX_FAIL_COUNT == debug_count)
+			CDF_BUG(0);
+
+		return CDF_STATUS_E_RESOURCES;
+	}
+
+	atomic_set(&cds_wrapper_empty_count, 0);
+
+	/* Copy the message now */
+	cdf_mem_copy((void *)pMsgWrapper->pVosMsg,
+		     (void *)pMsg, sizeof(cds_msg_t));
+
+	cds_mq_put(pTargetMq, pMsgWrapper);
+
+	set_bit(MC_POST_EVENT_MASK, &gp_cds_context->cdf_sched.mcEventFlag);
+	wake_up_interruptible(&gp_cds_context->cdf_sched.mcWaitQueue);
+
+	return CDF_STATUS_SUCCESS;
+} /* cds_mq_post_message() */
+
+/**
+ * cds_sys_probe_thread_cback() -  probe mc thread callback
+ * @pUserData: pointer to user data
+ *
+ * Return: none
+ */
+void cds_sys_probe_thread_cback(void *pUserData)
+{
+	if (gp_cds_context != pUserData) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: gp_cds_context != pUserData", __func__);
+		return;
+	}
+
+	if (cdf_event_set(&gp_cds_context->ProbeEvent) != CDF_STATUS_SUCCESS) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: cdf_event_set failed", __func__);
+		return;
+	}
+} /* cds_sys_probe_thread_cback() */
+
+/**
+ * cds_wma_complete_cback() - wma complete callback
+ * @pUserData: pointer to user data
+ *
+ * Return: none
+ */
+void cds_wma_complete_cback(void *pUserData)
+{
+	if (gp_cds_context != pUserData) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: gp_cds_context != pUserData", __func__);
+		return;
+	}
+
+	if (cdf_event_set(&gp_cds_context->wmaCompleteEvent) !=
+	    CDF_STATUS_SUCCESS) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: cdf_event_set failed", __func__);
+		return;
+	}
+} /* cds_wma_complete_cback() */
+
+/**
+ * cds_core_return_msg() - return core message
+ * @pVContext: pointer to cds context
+ * @pMsgWrapper: pointer to message wrapper
+ *
+ * Return: none
+ */
+void cds_core_return_msg(void *pVContext, p_cds_msg_wrapper pMsgWrapper)
+{
+	p_cds_contextType p_cds_context = (p_cds_contextType) pVContext;
+
+	CDF_ASSERT(gp_cds_context == p_cds_context);
+
+	if (gp_cds_context != p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: gp_cds_context != p_cds_context", __func__);
+		return;
+	}
+
+	CDF_ASSERT(NULL != pMsgWrapper);
+
+	if (pMsgWrapper == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: pMsgWrapper == NULL in function", __func__);
+		return;
+	}
+
+	/*
+	** Return the message on the free message queue
+	*/
+	INIT_LIST_HEAD(&pMsgWrapper->msgNode);
+	cds_mq_put(&p_cds_context->freeVosMq, pMsgWrapper);
+} /* cds_core_return_msg() */
+
+
+/**
+ * cds_shutdown() - shutdown CDS
+ * @cds_context: global cds context
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_shutdown(v_CONTEXT_t cds_context)
+{
+	CDF_STATUS cdf_status;
+	tpAniSirGlobal pmac = (((p_cds_contextType)cds_context)->pMACContext);
+
+	ol_txrx_pdev_detach(gp_cds_context->pdev_txrx_ctx, 1);
+	cds_free_context(cds_context, CDF_MODULE_ID_TXRX,
+			 gp_cds_context->pdev_txrx_ctx);
+
+	cdf_status = sme_close(((p_cds_contextType) cds_context)->pMACContext);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to close SME", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+	/*
+	 * CAC timer will be initiated and started only when SAP starts on
+	 * DFS channel and it will be stopped and destroyed immediately once the
+	 * radar detected or timedout. So as per design CAC timer should be
+	 * destroyed after stop
+	 */
+	if (pmac->sap.SapDfsInfo.is_dfs_cac_timer_running) {
+		cdf_mc_timer_stop(&pmac->sap.SapDfsInfo.sap_dfs_cac_timer);
+		pmac->sap.SapDfsInfo.is_dfs_cac_timer_running = 0;
+		cdf_mc_timer_destroy(&pmac->sap.SapDfsInfo.sap_dfs_cac_timer);
+	}
+
+	cdf_status = mac_close(((p_cds_contextType) cds_context)->pMACContext);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to close MAC", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	((p_cds_contextType) cds_context)->pMACContext = NULL;
+
+	if (false == wma_needshutdown(cds_context)) {
+
+		cdf_status = wma_close(cds_context);
+		if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: Failed to close wma!", __func__);
+			CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+		}
+	}
+
+	if (gp_cds_context->htc_ctx) {
+		htc_stop(gp_cds_context->htc_ctx);
+		htc_destroy(gp_cds_context->htc_ctx);
+		gp_cds_context->htc_ctx = NULL;
+	}
+
+	cdf_status = wma_wmi_service_close(cds_context);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to close wma_wmi_service!", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	cds_mq_deinit(&((p_cds_contextType) cds_context)->freeVosMq);
+
+	cdf_status = cdf_event_destroy(&gp_cds_context->wmaCompleteEvent);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: failed to destroy wmaCompleteEvent", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	cdf_status = cdf_event_destroy(&gp_cds_context->ProbeEvent);
+	if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: failed to destroy ProbeEvent", __func__);
+		CDF_ASSERT(CDF_IS_STATUS_SUCCESS(cdf_status));
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_get_vdev_types() - get vdev type
+ * @mode: mode
+ * @type: type
+ * @sub_type: sub_type
+ *
+ * Return: WMI vdev type
+ */
+CDF_STATUS cds_get_vdev_types(tCDF_CON_MODE mode, uint32_t *type,
+			      uint32_t *sub_type)
+{
+	CDF_STATUS status = CDF_STATUS_SUCCESS;
+	*type = 0;
+	*sub_type = 0;
+
+	switch (mode) {
+	case CDF_STA_MODE:
+		*type = WMI_VDEV_TYPE_STA;
+		break;
+	case CDF_SAP_MODE:
+		*type = WMI_VDEV_TYPE_AP;
+		break;
+	case CDF_P2P_DEVICE_MODE:
+		*type = WMI_VDEV_TYPE_AP;
+		*sub_type = WMI_UNIFIED_VDEV_SUBTYPE_P2P_DEVICE;
+		break;
+	case CDF_P2P_CLIENT_MODE:
+		*type = WMI_VDEV_TYPE_STA;
+		*sub_type = WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT;
+		break;
+	case CDF_P2P_GO_MODE:
+		*type = WMI_VDEV_TYPE_AP;
+		*sub_type = WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO;
+		break;
+	case CDF_OCB_MODE:
+		*type = WMI_VDEV_TYPE_OCB;
+		break;
+	default:
+		hddLog(CDF_TRACE_LEVEL_ERROR, "Invalid device mode %d", mode);
+		status = CDF_STATUS_E_INVAL;
+		break;
+	}
+	return status;
+}
+
+/**
+ * cds_flush_work() - flush pending works
+ * @work: pointer to work
+ *
+ * Return: none
+ */
+void cds_flush_work(void *work)
+{
+#if defined (CONFIG_CNSS)
+	cnss_flush_work(work);
+#elif defined (WLAN_OPEN_SOURCE)
+	cancel_work_sync(work);
+#endif
+}
+
+/**
+ * cds_flush_delayed_work() - flush delayed works
+ * @dwork: pointer to delayed work
+ *
+ * Return: none
+ */
+void cds_flush_delayed_work(void *dwork)
+{
+#if defined (CONFIG_CNSS)
+	cnss_flush_delayed_work(dwork);
+#elif defined (WLAN_OPEN_SOURCE)
+	cancel_delayed_work_sync(dwork);
+#endif
+}
+
+/**
+ * cds_is_packet_log_enabled() - check if packet log is enabled
+ *
+ * Return: true if packet log is enabled else false
+ */
+bool cds_is_packet_log_enabled(void)
+{
+	hdd_context_t *pHddCtx;
+
+	pHddCtx = (hdd_context_t *) (gp_cds_context->pHDDContext);
+	if ((NULL == pHddCtx) || (NULL == pHddCtx->config)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Hdd Context is Null", __func__);
+		return false;
+	}
+
+	return pHddCtx->config->enablePacketLog;
+}
+
+/**
+ * cds_trigger_recovery() - trigger self recovery
+ *
+ * Return: none
+ */
+void cds_trigger_recovery(void)
+{
+	tp_wma_handle wma_handle = cds_get_context(CDF_MODULE_ID_WMA);
+	CDF_STATUS status = CDF_STATUS_SUCCESS;
+
+	if (!wma_handle) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			"WMA context is invald!");
+		return;
+	}
+
+	wma_crash_inject(wma_handle, RECOVERY_SIM_SELF_RECOVERY, 0);
+
+	status = cdf_wait_single_event(&wma_handle->recovery_event,
+		WMA_CRASH_INJECT_TIMEOUT);
+
+	if (CDF_STATUS_SUCCESS != status) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			"CRASH_INJECT command is timed out!");
+ #ifdef CONFIG_CNSS
+		if (cds_is_logp_in_progress()) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				"LOGP is in progress, ignore!");
+			return;
+		}
+		cds_set_logp_in_progress(true);
+		cnss_schedule_recovery_work();
+ #endif
+
+		return;
+	}
+}
+
+/**
+ * cds_get_monotonic_boottime() - Get kernel boot time.
+ *
+ * Return: Time in microseconds
+ */
+
+uint64_t cds_get_monotonic_boottime(void)
+{
+#ifdef CONFIG_CNSS
+	struct timespec ts;
+
+	cnss_get_monotonic_boottime(&ts);
+	return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
+#else
+	return ((uint64_t)cdf_system_ticks_to_msecs(cdf_system_ticks()) *
+			 1000);
+#endif
+}
+
+/**
+ * cds_set_wakelock_logging() - Logging of wakelock enabled/disabled
+ * @value: Boolean value
+ *
+ * This function is used to set the flag which will indicate whether
+ * logging of wakelock is enabled or not
+ *
+ * Return: None
+ */
+void cds_set_wakelock_logging(bool value)
+{
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"cds context is Invald");
+		return;
+	}
+	p_cds_context->is_wakelock_log_enabled = value;
+}
+
+/**
+ * cds_is_wakelock_enabled() - Check if logging of wakelock is enabled/disabled
+ * @value: Boolean value
+ *
+ * This function is used to check whether logging of wakelock is enabled or not
+ *
+ * Return: true if logging of wakelock is enabled
+ */
+bool cds_is_wakelock_enabled(void)
+{
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"cds context is Invald");
+		return false;
+	}
+	return p_cds_context->is_wakelock_log_enabled;
+}
+
+/**
+ * cds_set_ring_log_level() - Sets the log level of a particular ring
+ * @ring_id: ring_id
+ * @log_levelvalue: Log level specificed
+ *
+ * This function converts HLOS values to driver log levels and sets the log
+ * level of a particular ring accordingly.
+ *
+ * Return: None
+ */
+void cds_set_ring_log_level(uint32_t ring_id, uint32_t log_level)
+{
+	p_cds_contextType p_cds_context;
+	uint32_t log_val;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: cds context is Invald", __func__);
+		return;
+	}
+
+	switch (log_level) {
+	case LOG_LEVEL_NO_COLLECTION:
+		log_val = WLAN_LOG_LEVEL_OFF;
+		break;
+	case LOG_LEVEL_NORMAL_COLLECT:
+		log_val = WLAN_LOG_LEVEL_NORMAL;
+		break;
+	case LOG_LEVEL_ISSUE_REPRO:
+		log_val = WLAN_LOG_LEVEL_REPRO;
+		break;
+	case LOG_LEVEL_ACTIVE:
+	default:
+		log_val = WLAN_LOG_LEVEL_ACTIVE;
+		break;
+	}
+
+	if (ring_id == RING_ID_WAKELOCK) {
+		p_cds_context->wakelock_log_level = log_val;
+		return;
+	} else if (ring_id == RING_ID_CONNECTIVITY) {
+		p_cds_context->connectivity_log_level = log_val;
+		return;
+	} else if (ring_id == RING_ID_PER_PACKET_STATS) {
+		p_cds_context->packet_stats_log_level = log_val;
+		return;
+	} else if (ring_id == RIND_ID_DRIVER_DEBUG) {
+		p_cds_context->driver_debug_log_level = log_val;
+		return;
+	} else if (ring_id == RING_ID_FIRMWARE_DEBUG) {
+		p_cds_context->fw_debug_log_level = log_val;
+		return;
+	}
+}
+
+/**
+ * cds_get_ring_log_level() - Get the a ring id's log level
+ * @ring_id: Ring id
+ *
+ * Fetch and return the log level corresponding to a ring id
+ *
+ * Return: Log level corresponding to the ring ID
+ */
+enum wifi_driver_log_level cds_get_ring_log_level(uint32_t ring_id)
+{
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: cds context is Invald", __func__);
+		return WLAN_LOG_LEVEL_OFF;
+	}
+
+	if (ring_id == RING_ID_WAKELOCK)
+		return p_cds_context->wakelock_log_level;
+	else if (ring_id == RING_ID_CONNECTIVITY)
+		return p_cds_context->connectivity_log_level;
+	else if (ring_id == RING_ID_PER_PACKET_STATS)
+		return p_cds_context->packet_stats_log_level;
+	else if (ring_id == RIND_ID_DRIVER_DEBUG)
+		return p_cds_context->driver_debug_log_level;
+	else if (ring_id == RING_ID_FIRMWARE_DEBUG)
+		return p_cds_context->fw_debug_log_level;
+
+	return WLAN_LOG_LEVEL_OFF;
+}
+
+/**
+ * cds_set_multicast_logging() - Set mutlicast logging value
+ * @value: Value of multicast logging
+ *
+ * Set the multicast logging value which will indicate
+ * whether to multicast host and fw messages even
+ * without any registration by userspace entity
+ *
+ * Return: None
+ */
+void cds_set_multicast_logging(uint8_t value)
+{
+	cds_multicast_logging = value;
+}
+
+/**
+ * cds_is_multicast_logging() - Get multicast logging value
+ *
+ * Get the multicast logging value which will indicate
+ * whether to multicast host and fw messages even
+ * without any registration by userspace entity
+ *
+ * Return: 0 - Multicast logging disabled, 1 - Multicast logging enabled
+ */
+uint8_t cds_is_multicast_logging(void)
+{
+	return cds_multicast_logging;
+}
+
+/*
+ * cds_init_log_completion() - Initialize log param structure
+ *
+ * This function is used to initialize the logging related
+ * parameters
+ *
+ * Return: None
+ */
+void cds_init_log_completion(void)
+{
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: cds context is Invalid", __func__);
+		return;
+	}
+
+	p_cds_context->log_complete.is_fatal = WLAN_LOG_TYPE_NON_FATAL;
+	p_cds_context->log_complete.indicator = WLAN_LOG_INDICATOR_UNUSED;
+	p_cds_context->log_complete.reason_code = WLAN_LOG_REASON_CODE_UNUSED;
+	p_cds_context->log_complete.is_report_in_progress = false;
+	/* Attempting to initialize an already initialized lock
+	 * results in a failure. This must be ok here.
+	 */
+	cdf_spinlock_init(&p_cds_context->bug_report_lock);
+}
+
+/**
+ * cds_deinit_log_completion() - Deinitialize log param structure
+ *
+ * This function is used to deinitialize the logging related
+ * parameters
+ *
+ * Return: None
+ */
+void cds_deinit_log_completion(void)
+{
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: cds context is Invalid", __func__);
+		return;
+	}
+
+	cdf_spinlock_destroy(&p_cds_context->bug_report_lock);
+}
+
+/**
+ * cds_set_log_completion() - Store the logging params
+ * @is_fatal: Indicates if the event triggering bug report is fatal or not
+ * @indicator: Source which trigerred the bug report
+ * @reason_code: Reason for triggering bug report
+ *
+ * This function is used to set the logging parameters based on the
+ * caller
+ *
+ * Return: 0 if setting of params is successful
+ */
+CDF_STATUS cds_set_log_completion(uint32_t is_fatal,
+		uint32_t indicator,
+		uint32_t reason_code)
+{
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: cds context is Invalid", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	cdf_spinlock_acquire(&p_cds_context->bug_report_lock);
+	p_cds_context->log_complete.is_fatal = is_fatal;
+	p_cds_context->log_complete.indicator = indicator;
+	p_cds_context->log_complete.reason_code = reason_code;
+	p_cds_context->log_complete.is_report_in_progress = true;
+	cdf_spinlock_release(&p_cds_context->bug_report_lock);
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_get_log_completion() - Get the logging related params
+ * @is_fatal: Indicates if the event triggering bug report is fatal or not
+ * @indicator: Source which trigerred the bug report
+ * @reason_code: Reason for triggering bug report
+ *
+ * This function is used to get the logging related parameters
+ *
+ * Return: None
+ */
+void cds_get_log_completion(uint32_t *is_fatal,
+		uint32_t *indicator,
+		uint32_t *reason_code)
+{
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: cds context is Invalid", __func__);
+		return;
+	}
+
+	cdf_spinlock_acquire(&p_cds_context->bug_report_lock);
+	*is_fatal =  p_cds_context->log_complete.is_fatal;
+	*indicator = p_cds_context->log_complete.indicator;
+	*reason_code = p_cds_context->log_complete.reason_code;
+	p_cds_context->log_complete.is_report_in_progress = false;
+	cdf_spinlock_release(&p_cds_context->bug_report_lock);
+}
+
+/**
+ * cds_is_log_report_in_progress() - Check if bug reporting is in progress
+ *
+ * This function is used to check if the bug reporting is already in progress
+ *
+ * Return: true if the bug reporting is in progress
+ */
+bool cds_is_log_report_in_progress(void)
+{
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: cds context is Invalid", __func__);
+		return true;
+	}
+	return p_cds_context->log_complete.is_report_in_progress;
+}
+
+/**
+ * cds_flush_logs() - Report fatal event to userspace
+ * @is_fatal: Indicates if the event triggering bug report is fatal or not
+ * @indicator: Source which trigerred the bug report
+ * @reason_code: Reason for triggering bug report
+ *
+ * This function sets the log related params and send the WMI command to the
+ * FW to flush its logs. On receiving the flush completion event from the FW
+ * the same will be conveyed to userspace
+ *
+ * Return: 0 on success
+ */
+CDF_STATUS cds_flush_logs(uint32_t is_fatal,
+		uint32_t indicator,
+		uint32_t reason_code)
+{
+	uint32_t ret;
+	CDF_STATUS status;
+
+	p_cds_contextType p_cds_context;
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: cds context is Invalid", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if (cds_is_log_report_in_progress() == true) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: Bug report already in progress - dropping! type:%d, indicator=%d reason_code=%d",
+				__func__, is_fatal, indicator, reason_code);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	status = cds_set_log_completion(is_fatal, indicator, reason_code);
+	if (CDF_STATUS_SUCCESS != status) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+			"%s: Failed to set log trigger params", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO,
+			"%s: Triggering bug report: type:%d, indicator=%d reason_code=%d",
+			__func__, is_fatal, indicator, reason_code);
+
+	ret = sme_send_flush_logs_cmd_to_fw(p_cds_context->pMACContext);
+	if (0 != ret) {
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+				"%s: Failed to send flush FW log", __func__);
+		cds_init_log_completion();
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_logging_set_fw_flush_complete() - Wrapper for FW log flush completion
+ *
+ * This function is used to send signal to the logger thread to indicate
+ * that the flushing of FW logs is complete by the FW
+ *
+ * Return: None
+ *
+ */
+void cds_logging_set_fw_flush_complete(void)
+{
+	wlan_logging_set_fw_flush_complete();
+}

+ 6823 - 0
core/cds/src/cds_concurrency.c

@@ -0,0 +1,6823 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cds_concurrency.c
+ *
+ * WLAN Concurrenct Connection Management functions
+ *
+ */
+
+/* Include files */
+
+#include <cds_api.h>
+#include <cds_sched.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <wlan_hdd_tx_rx.h>
+#include <wni_api.h>
+#include "wlan_hdd_trace.h"
+#include "wlan_hdd_hostapd.h"
+#include "cds_concurrency.h"
+#include "cdf_types.h"
+#include "cdf_trace.h"
+
+#include <net/addrconf.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/rtnetlink.h>
+#include "sap_api.h"
+#include <linux/semaphore.h>
+#include <linux/ctype.h>
+#include <linux/compat.h>
+#include "cfg_api.h"
+#include "qwlan_version.h"
+#include "wma_types.h"
+#include "wma.h"
+#include "wma_api.h"
+#include "cds_utils.h"
+#include "cds_reg_service.h"
+#include "wlan_hdd_ipa.h"
+
+#define CDS_MAX_FEATURE_SET   8
+static struct cds_conc_connection_info
+	conc_connection_list[MAX_NUMBER_OF_CONC_CONNECTIONS];
+
+#define CONC_CONNECTION_LIST_VALID_INDEX(index) \
+		((MAX_NUMBER_OF_CONC_CONNECTIONS > index) && \
+			(conc_connection_list[index].in_use))
+
+#define CDS_MAX_CON_STRING_LEN   50
+/**
+ * first_connection_pcl_table - table which provides PCL for the
+ * very first connection in the system
+ */
+static const enum cds_pcl_type
+first_connection_pcl_table[CDS_MAX_NUM_OF_MODE]
+			[CDS_MAX_CONC_PRIORITY_MODE] = {
+	[CDS_STA_MODE] = {CDS_NONE, CDS_NONE, CDS_NONE},
+	[CDS_SAP_MODE] = {CDS_5G,   CDS_5G,   CDS_5G  },
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G,   CDS_5G,   CDS_5G  },
+	[CDS_P2P_GO_MODE] = {CDS_5G,   CDS_5G,   CDS_5G  },
+	[CDS_IBSS_MODE] = {CDS_NONE, CDS_NONE, CDS_NONE},
+};
+
+/**
+ * second_connection_pcl_dbs_table - table which provides PCL
+ * for the 2nd connection, when we have a connection already in
+ * the system (with DBS supported by HW)
+ */
+static const enum cds_pcl_type
+second_connection_pcl_dbs_table[CDS_MAX_ONE_CONNECTION_MODE]
+			[CDS_MAX_NUM_OF_MODE][CDS_MAX_CONC_PRIORITY_MODE] = {
+	[CDS_STA_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {CDS_5G,        CDS_5G,        CDS_5G       } },
+
+	[CDS_STA_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {CDS_5G,        CDS_5G,        CDS_5G       } },
+
+	[CDS_STA_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {CDS_24G,        CDS_24G,        CDS_24G       } },
+
+	[CDS_STA_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {CDS_24G,        CDS_24G,        CDS_24G       } },
+
+	[CDS_P2P_CLI_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_CLI_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_CLI_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_CLI_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_SAP_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_SAP_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_SAP_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_SAP_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_SCC_CH_24G, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_IBSS_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_IBSS_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_IBSS_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_24G_SCC_CH, CDS_24G_SCC_CH, CDS_24G_SCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_IBSS_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_24G_SCC_CH, CDS_24G_SCC_CH, CDS_24G_SCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] =	{
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+};
+
+/**
+ * second_connection_pcl_nodbs_table - table which provides PCL
+ * for the 2nd connection, when we have a connection already in
+ * the system (with DBS not supported by HW)
+ */
+static const enum cds_pcl_type
+second_connection_pcl_nodbs_table[CDS_MAX_ONE_CONNECTION_MODE]
+			[CDS_MAX_NUM_OF_MODE][CDS_MAX_CONC_PRIORITY_MODE] = {
+	[CDS_STA_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_P2P_GO_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_P2P_GO_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_CLI_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_P2P_GO_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_CLI_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_P2P_GO_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_CLI_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_CLI_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH,    CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH,    CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH,    CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH,    CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH,    CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH,    CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_SAP_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_SAP_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_SAP_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_SAP_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_IBSS_24_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_IBSS_24_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_IBSS_5_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_IBSS_5_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+};
+
+/**
+ * third_connection_pcl_dbs_table - table which provides PCL for
+ * the 3rd connection, when we have two connections already in
+ * the system (with DBS supported by HW)
+ */
+static const enum cds_pcl_type
+third_connection_pcl_dbs_table[CDS_MAX_TWO_CONNECTION_MODE]
+			[CDS_MAX_NUM_OF_MODE][CDS_MAX_CONC_PRIORITY_MODE] = {
+	[CDS_STA_SAP_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#ifndef QCA_WIFI_3_0_EMU
+	[CDS_STA_SAP_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_SAP_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_CLIENT_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_GO_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#else
+	[CDS_STA_SAP_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_SAP_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_CLIENT_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_GO_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#endif
+	[CDS_STA_P2P_GO_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#ifndef QCA_WIFI_3_0_EMU
+	[CDS_STA_P2P_GO_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_SAP_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_CLIENT_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_GO_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#else
+	[CDS_STA_P2P_GO_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_SAP_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_CLIENT_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_GO_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#endif
+	[CDS_STA_P2P_CLI_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_IBSS_MODE] = {CDS_NONE, CDS_NONE, CDS_NONE} },
+
+	[CDS_STA_P2P_CLI_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_MCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#ifndef QCA_WIFI_3_0_EMU
+	[CDS_STA_P2P_CLI_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_CLIENT_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_GO_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#else
+	[CDS_STA_P2P_CLI_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_CLIENT_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_GO_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#endif
+	[CDS_P2P_GO_P2P_CLI_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_5G_SCC_CH, CDS_5G_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_5G, CDS_5G_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH_24G, CDS_24G_SCC_CH, CDS_SCC_CH_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {CDS_24G,        CDS_24G, CDS_24G},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#ifndef QCA_WIFI_3_0_EMU
+	[CDS_P2P_GO_P2P_CLI_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_SAP_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_CLIENT_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_GO_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24_5G, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#else
+	[CDS_P2P_GO_P2P_CLI_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_SAP_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_CLIENT_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_P2P_GO_MODE] = {
+		CDS_SCC_ON_5_SCC_ON_24, CDS_NONE, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#endif
+	[CDS_P2P_GO_SAP_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#ifndef QCA_WIFI_3_0_EMU
+	[CDS_P2P_GO_SAP_DBS_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_ON_5_SCC_ON_24_5G,
+	CDS_SCC_ON_5_SCC_ON_24_5G, CDS_SCC_ON_5_SCC_ON_24_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#else
+	[CDS_P2P_GO_SAP_DBS_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_ON_5_SCC_ON_24,
+	CDS_SCC_ON_5_SCC_ON_24, CDS_SCC_ON_5_SCC_ON_24},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+#endif
+};
+
+/**
+ * third_connection_pcl_nodbs_table - table which provides PCL
+ * for the 3rd connection, when we have two connections already
+ * in the system (with DBS not supported by HW)
+ */
+static const enum cds_pcl_type
+third_connection_pcl_nodbs_table[CDS_MAX_TWO_CONNECTION_MODE]
+			[CDS_MAX_NUM_OF_MODE][CDS_MAX_CONC_PRIORITY_MODE] = {
+	[CDS_STA_SAP_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_SAP_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_GO_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH_5G, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH_5G, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_STA_P2P_CLI_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_5G,        CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_5G_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {
+			CDS_5G_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_CLIENT_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_P2P_GO_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_P2P_CLI_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_SCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_SCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_24_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_24_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_SCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_SCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH, CDS_MCC_CH, CDS_MCC_CH},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_24G, CDS_24G, CDS_24G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_24_5_1x1] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_MCC_24_5_2x2] = {
+	[CDS_STA_MODE] = {CDS_MCC_CH_5G, CDS_5G, CDS_5G},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+
+	[CDS_P2P_GO_SAP_DBS_1x1] = {
+	[CDS_STA_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_SAP_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_CLIENT_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_P2P_GO_MODE] = {
+			CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE},
+	[CDS_IBSS_MODE] = {
+		CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
+};
+
+/**
+ * next_action_two_connection_table - table which provides next
+ * action while a new connection is coming up, with one
+ * connection already in the system
+ */
+static const enum cds_conc_next_action
+next_action_two_connection_table[CDS_MAX_ONE_CONNECTION_MODE][CDS_MAX_BAND] = {
+	[CDS_STA_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_STA_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_STA_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_STA_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_P2P_CLI_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_P2P_CLI_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_P2P_CLI_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_P2P_CLI_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_P2P_GO_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_P2P_GO_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_P2P_GO_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_P2P_GO_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_SAP_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_SAP_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_SAP_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_SAP_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_IBSS_24_1x1] = {CDS_NOP,             CDS_NOP},
+	[CDS_IBSS_24_2x2] = {CDS_NOP,             CDS_NOP},
+	[CDS_IBSS_5_1x1] = {CDS_NOP,             CDS_NOP},
+	[CDS_IBSS_5_2x2] = {CDS_NOP,             CDS_NOP},
+};
+
+/**
+ * next_action_three_connection_table - table which provides next
+ * action while a new connection is coming up, with two
+ * connections already in the system
+ */
+static const enum cds_conc_next_action
+next_action_three_connection_table[CDS_MAX_TWO_CONNECTION_MODE]
+				[CDS_MAX_BAND] = {
+	[CDS_STA_SAP_SCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_STA_SAP_SCC_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_STA_SAP_MCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_STA_SAP_MCC_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_STA_SAP_SCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_STA_SAP_SCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_STA_SAP_MCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_STA_SAP_MCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_STA_SAP_MCC_24_5_1x1] = {CDS_DBS,             CDS_DBS},
+	[CDS_STA_SAP_MCC_24_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_DBS_DOWNGRADE},
+	[CDS_STA_SAP_DBS_1x1] = {CDS_NOP,             CDS_NOP},
+	[CDS_STA_P2P_GO_SCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_STA_P2P_GO_SCC_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_STA_P2P_GO_MCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_STA_P2P_GO_MCC_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_STA_P2P_GO_SCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_STA_P2P_GO_SCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_STA_P2P_GO_MCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_STA_P2P_GO_MCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_STA_P2P_GO_MCC_24_5_1x1] = {CDS_DBS,             CDS_DBS},
+	[CDS_STA_P2P_GO_MCC_24_5_2x2] = {
+			CDS_DBS_DOWNGRADE,   CDS_DBS_DOWNGRADE},
+	[CDS_STA_P2P_GO_DBS_1x1] = {CDS_NOP,             CDS_NOP},
+	[CDS_STA_P2P_CLI_SCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_STA_P2P_CLI_SCC_24_2x2] = {
+			CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_STA_P2P_CLI_MCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_STA_P2P_CLI_MCC_24_2x2] = {
+			CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_STA_P2P_CLI_SCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_STA_P2P_CLI_SCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_STA_P2P_CLI_MCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_STA_P2P_CLI_MCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_STA_P2P_CLI_MCC_24_5_1x1] = {CDS_DBS,             CDS_DBS},
+	[CDS_STA_P2P_CLI_MCC_24_5_2x2] = {
+			CDS_DBS_DOWNGRADE,   CDS_DBS_DOWNGRADE},
+	[CDS_STA_P2P_CLI_DBS_1x1] = {CDS_NOP,             CDS_NOP},
+	[CDS_P2P_GO_P2P_CLI_SCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_P2P_GO_P2P_CLI_SCC_24_2x2] = {
+			CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_P2P_GO_P2P_CLI_MCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_P2P_GO_P2P_CLI_MCC_24_2x2] = {
+			CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_P2P_GO_P2P_CLI_SCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_P2P_GO_P2P_CLI_SCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_P2P_GO_P2P_CLI_MCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_P2P_GO_P2P_CLI_MCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_P2P_GO_P2P_CLI_MCC_24_5_1x1] = {CDS_DBS,             CDS_DBS},
+	[CDS_P2P_GO_P2P_CLI_MCC_24_5_2x2] = {
+			CDS_DBS_DOWNGRADE,   CDS_DBS_DOWNGRADE},
+	[CDS_P2P_GO_P2P_CLI_DBS_1x1] = {CDS_NOP,             CDS_NOP},
+	[CDS_P2P_GO_SAP_SCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_P2P_GO_SAP_SCC_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_P2P_GO_SAP_MCC_24_1x1] = {CDS_NOP,             CDS_DBS},
+	[CDS_P2P_GO_SAP_MCC_24_2x2] = {CDS_NOP,             CDS_DBS_DOWNGRADE},
+	[CDS_P2P_GO_SAP_SCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_P2P_GO_SAP_SCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_P2P_GO_SAP_MCC_5_1x1] = {CDS_DBS,             CDS_NOP},
+	[CDS_P2P_GO_SAP_MCC_5_2x2] = {CDS_DBS_DOWNGRADE,   CDS_NOP},
+	[CDS_P2P_GO_SAP_MCC_24_5_1x1] = {CDS_DBS,             CDS_DBS},
+	[CDS_P2P_GO_SAP_MCC_24_5_2x2] = {
+			CDS_DBS_DOWNGRADE,   CDS_DBS_DOWNGRADE},
+	[CDS_P2P_GO_SAP_DBS_1x1] = {CDS_NOP,             CDS_NOP},
+};
+
+/**
+ * cds_update_conc_list() - Update the concurrent connection list
+ * @conn_index: Connection index
+ * @mode: Mode
+ * @chan: Channel
+ * @mac: Mac id
+ * @chain_mask: Chain mask
+ * @tx_spatial_stream: Tx spatial stream
+ * @rx_spatial_stream: Rx spatial stream
+ * @vdev_id: vdev id
+ * @in_use: Flag to indicate if the index is in use or not
+ *
+ * Updates the index value of the concurrent connection list
+ *
+ * Return: None
+ */
+static void cds_update_conc_list(uint32_t conn_index,
+		enum cds_con_mode mode,
+		uint8_t chan,
+		uint8_t mac,
+		enum cds_chain_mode chain_mask,
+		uint8_t tx_spatial_stream,
+		uint8_t rx_spatial_stream,
+		uint32_t original_nss,
+		uint32_t vdev_id,
+		bool in_use)
+{
+	if (conn_index >= MAX_NUMBER_OF_CONC_CONNECTIONS) {
+		cds_err("Number of connections exceeded conn_index: %d",
+			conn_index);
+		return;
+	}
+	conc_connection_list[conn_index].mode = mode;
+	conc_connection_list[conn_index].chan = chan;
+	conc_connection_list[conn_index].mac = mac;
+	conc_connection_list[conn_index].chain_mask = chain_mask;
+	conc_connection_list[conn_index].tx_spatial_stream = tx_spatial_stream;
+	conc_connection_list[conn_index].rx_spatial_stream = rx_spatial_stream;
+	conc_connection_list[conn_index].original_nss = original_nss;
+	conc_connection_list[conn_index].vdev_id = vdev_id;
+	conc_connection_list[conn_index].in_use = in_use;
+}
+
+/**
+ * cds_mode_specific_connection_count() - provides the
+ * count of connections of specific mode
+ * @hdd_ctx:	HDD Context
+ * @mode: type of connection
+ * @list: To provide the indices on conc_connection_list
+ *	(optional)
+ *
+ * This function provides the count of current connections
+ *
+ * Return: connection count of specific type
+ */
+static uint32_t cds_mode_specific_connection_count(hdd_context_t *hdd_ctx,
+						 enum cds_con_mode mode,
+						 uint32_t *list)
+{
+	uint32_t conn_index = 0, count = 0;
+	for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS;
+		 conn_index++) {
+		if ((conc_connection_list[conn_index].mode == mode) &&
+			conc_connection_list[conn_index].in_use) {
+			if (list != NULL)
+				list[count] = conn_index;
+			 count++;
+		}
+	}
+	return count;
+}
+
+/**
+ * cds_store_and_del_conn_info() - Store and del a connection info
+ * @hdd_ctx: HDD context
+ * @mode: Mode whose entry has to be deleted
+ * @info: Struture pointer where the connection info will be saved
+ *
+ * Saves the connection info corresponding to the provided mode
+ * and deleted that corresponding entry based on vdev from the
+ * connection info structure
+ *
+ * Return: None
+ */
+static void cds_store_and_del_conn_info(hdd_context_t *hdd_ctx,
+				      enum cds_con_mode mode,
+				      struct cds_conc_connection_info *info)
+{
+	uint32_t conn_index = 0;
+	bool found = false;
+
+	while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+		if (mode == conc_connection_list[conn_index].mode) {
+			found = true;
+			break;
+		}
+		conn_index++;
+	}
+
+	if (!found) {
+		cds_err("Mode:%d not available in the conn info", mode);
+		return;
+	}
+
+	/* Storing the STA entry which will be temporarily deleted */
+	*info = conc_connection_list[conn_index];
+
+	/* Deleting the STA entry */
+	cds_decr_connection_count(hdd_ctx, info->vdev_id);
+
+	cds_info("Stored %d (%d), deleted STA entry with vdev id %d, index %d",
+		info->vdev_id, info->mode, info->vdev_id, conn_index);
+
+	/* Caller should set the PCL and restore the STA entry in conn info */
+}
+
+/**
+ * cds_restore_deleted_conn_info() - Restore connection info
+ * @hdd_ctx: HDD context
+ * @info: Saved connection info that is to be restored
+ *
+ * Restores the connection info of STA that was saved before
+ * updating the PCL to the FW
+ *
+ * Return: None
+ */
+static void cds_restore_deleted_conn_info(hdd_context_t *hdd_ctx,
+					struct cds_conc_connection_info *info)
+{
+	uint32_t conn_index;
+
+	conn_index = cds_get_connection_count(hdd_ctx);
+	if (MAX_NUMBER_OF_CONC_CONNECTIONS <= conn_index) {
+		cds_err("Failed to restore the deleted information %d/%d",
+			conn_index, MAX_NUMBER_OF_CONC_CONNECTIONS);
+		return;
+	}
+
+	conc_connection_list[conn_index] = *info;
+
+	cds_info("Restored the deleleted conn info, vdev:%d, index:%d",
+		info->vdev_id, conn_index);
+}
+
+/**
+ * cds_update_hw_mode_conn_info() - Update connection info based on HW mode
+ * @num_vdev_mac_entries: Number of vdev-mac id entries that follow
+ * @vdev_mac_map: Mapping of vdev-mac id
+ * @hw_mode: HW mode
+ *
+ * Updates the connection info parameters based on the new HW mode
+ *
+ * Return: None
+ */
+static void cds_update_hw_mode_conn_info(uint32_t num_vdev_mac_entries,
+				       struct sir_vdev_mac_map *vdev_mac_map,
+				       struct sir_hw_mode_params hw_mode)
+{
+	uint32_t i, conn_index, found;
+	hdd_context_t *hdd_ctx;
+
+	hdd_ctx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (0 != wlan_hdd_validate_context(hdd_ctx)) {
+		cds_err("Invalid HDD Context");
+		return;
+	}
+
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	for (i = 0; i < num_vdev_mac_entries; i++) {
+		conn_index = 0;
+		found = 0;
+		while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+			if (vdev_mac_map[i].vdev_id ==
+				conc_connection_list[conn_index].vdev_id) {
+				found = 1;
+				break;
+			}
+			conn_index++;
+		}
+		if (found) {
+			conc_connection_list[conn_index].mac =
+				vdev_mac_map[i].mac_id;
+			if (vdev_mac_map[i].mac_id == 0) {
+				conc_connection_list[conn_index].
+					tx_spatial_stream = hw_mode.mac0_tx_ss;
+				conc_connection_list[conn_index].
+					rx_spatial_stream = hw_mode.mac0_rx_ss;
+			} else {
+				conc_connection_list[conn_index].
+					tx_spatial_stream = hw_mode.mac1_tx_ss;
+				conc_connection_list[conn_index].
+					rx_spatial_stream = hw_mode.mac1_rx_ss;
+			}
+			cds_info("vdev:%d, mac:%d, tx ss:%d, rx ss;%d",
+			  conc_connection_list[conn_index].vdev_id,
+			  conc_connection_list[conn_index].mac,
+			  conc_connection_list[conn_index].tx_spatial_stream,
+			  conc_connection_list[conn_index].rx_spatial_stream);
+		}
+	}
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+}
+
+/**
+ * cds_soc_set_dual_mac_cfg_cb() - Callback for set dual mac config
+ * @status: Status of set dual mac config
+ * @scan_config: Current scan config whose status is the first param
+ * @fw_mode_config: Current FW mode config whose status is the first param
+ *
+ * Callback on setting the dual mac configuration
+ *
+ * Return: None
+ */
+void cds_soc_set_dual_mac_cfg_cb(enum set_hw_mode_status status,
+		uint32_t scan_config,
+		uint32_t fw_mode_config)
+{
+	cds_info("Status:%d for scan_config:%x fw_mode_config:%x",
+			status, scan_config, fw_mode_config);
+}
+
+/**
+ * cds_set_dual_mac_scan_config() - Set the dual MAC scan config
+ * @hdd_ctx: HDD context
+ * @dbs_val: Value of DBS bit
+ * @dbs_plus_agile_scan_val: Value of DBS plus agile scan bit
+ * @single_mac_scan_with_dbs_val: Value of Single MAC scan with DBS
+ *
+ * Set the values of scan config. For FW mode config, the existing values
+ * will be retained
+ *
+ * Return: None
+ */
+void cds_set_dual_mac_scan_config(hdd_context_t *hdd_ctx,
+		uint8_t dbs_val,
+		uint8_t dbs_plus_agile_scan_val,
+		uint8_t single_mac_scan_with_dbs_val)
+{
+	struct sir_dual_mac_config cfg;
+	CDF_STATUS status;
+
+	if (!hdd_ctx) {
+		cds_err("HDD context is NULL");
+		return;
+	}
+
+	/* Any non-zero positive value is treated as 1 */
+	if (dbs_val != 0)
+		dbs_val = 1;
+	if (dbs_plus_agile_scan_val != 0)
+		dbs_plus_agile_scan_val = 1;
+	if (single_mac_scan_with_dbs_val != 0)
+		single_mac_scan_with_dbs_val = 1;
+
+	status = wma_get_updated_scan_config(&cfg.scan_config,
+			dbs_val,
+			dbs_plus_agile_scan_val,
+			single_mac_scan_with_dbs_val);
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("wma_get_updated_scan_config failed %d", status);
+		return;
+	}
+
+	status = wma_get_updated_fw_mode_config(&cfg.fw_mode_config,
+			wma_get_dbs_config(),
+			wma_get_agile_dfs_config());
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("wma_get_updated_fw_mode_config failed %d", status);
+		return;
+	}
+
+	cfg.set_dual_mac_cb = (void *)cds_soc_set_dual_mac_cfg_cb;
+
+	cds_info("scan_config:%x fw_mode_config:%x",
+			cfg.scan_config, cfg.fw_mode_config);
+
+	status = sme_soc_set_dual_mac_config(hdd_ctx->hHal, cfg);
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("sme_soc_set_dual_mac_config failed %d", status);
+		return;
+	}
+}
+
+/**
+ * cds_set_dual_mac_fw_mode_config() - Set the dual mac FW mode config
+ * @hdd_ctx: HDD context
+ * @dbs: DBS bit
+ * @dfs: Agile DFS bit
+ *
+ * Set the values of fw mode config. For scan config, the existing values
+ * will be retain.
+ *
+ * Return: None
+ */
+void cds_set_dual_mac_fw_mode_config(hdd_context_t *hdd_ctx,
+		uint8_t dbs,
+		uint8_t dfs)
+{
+	struct sir_dual_mac_config cfg;
+	CDF_STATUS status;
+
+	if (!hdd_ctx) {
+		cds_err("HDD context is NULL");
+		return;
+	}
+
+	/* Any non-zero positive value is treated as 1 */
+	if (dbs != 0)
+		dbs = 1;
+	if (dfs != 0)
+		dfs = 1;
+
+	status = wma_get_updated_scan_config(&cfg.scan_config,
+			wma_get_dbs_scan_config(),
+			wma_get_dbs_plus_agile_scan_config(),
+			wma_get_single_mac_scan_with_dfs_config());
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("wma_get_updated_scan_config failed %d", status);
+		return;
+	}
+
+	status = wma_get_updated_fw_mode_config(&cfg.fw_mode_config,
+			dbs, dfs);
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("wma_get_updated_fw_mode_config failed %d", status);
+		return;
+	}
+
+	cfg.set_dual_mac_cb = (void *)cds_soc_set_dual_mac_cfg_cb;
+
+	cds_info("scan_config:%x fw_mode_config:%x",
+			cfg.scan_config, cfg.fw_mode_config);
+
+	status = sme_soc_set_dual_mac_config(hdd_ctx->hHal, cfg);
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("sme_soc_set_dual_mac_config failed %d", status);
+		return;
+	}
+}
+
+/**
+ * cds_soc_set_hw_mode_cb() - Callback for set hw mode
+ * @status: Status
+ * @cfgd_hw_mode_index: Configured HW mode index
+ * @num_vdev_mac_entries: Number of vdev-mac id mapping that follows
+ * @vdev_mac_map: vdev-mac id map. This memory will be freed by the caller.
+ * So, make local copy if needed.
+ *
+ * Provides the status and configured hw mode index set
+ * by the FW
+ *
+ * Return: None
+ */
+static void cds_soc_set_hw_mode_cb(uint32_t status,
+				 uint32_t cfgd_hw_mode_index,
+				 uint32_t num_vdev_mac_entries,
+				 struct sir_vdev_mac_map *vdev_mac_map)
+{
+	CDF_STATUS ret;
+	struct sir_hw_mode_params hw_mode;
+	uint32_t i;
+	p_cds_contextType cds_context;
+
+	cds_context = cds_get_global_context();
+	if (!cds_context) {
+		cds_err("Invalid CDS context");
+		return;
+	}
+
+	if (status != SET_HW_MODE_STATUS_OK) {
+		cds_err("Set HW mode failed with status %d", status);
+		return;
+	}
+
+	if (!vdev_mac_map) {
+		cds_err("vdev_mac_map is NULL");
+		return;
+	}
+
+	cds_info("cfgd_hw_mode_index=%d", cfgd_hw_mode_index);
+
+	for (i = 0; i < num_vdev_mac_entries; i++)
+		cds_info("vdev_id:%d mac_id:%d",
+				vdev_mac_map[i].vdev_id,
+				vdev_mac_map[i].mac_id);
+
+	ret = wma_get_hw_mode_from_idx(cfgd_hw_mode_index, &hw_mode);
+	if (ret != CDF_STATUS_SUCCESS) {
+		cds_err("Get HW mode failed: %d", ret);
+		return;
+	}
+
+	cds_info("MAC0: TxSS:%d, RxSS:%d, Bw:%d",
+		hw_mode.mac0_tx_ss, hw_mode.mac0_rx_ss, hw_mode.mac0_bw);
+	cds_info("MAC1: TxSS:%d, RxSS:%d, Bw:%d",
+		hw_mode.mac1_tx_ss, hw_mode.mac1_rx_ss, hw_mode.mac1_bw);
+	cds_info("DBS:%d, Agile DFS:%d",
+		hw_mode.dbs_cap, hw_mode.agile_dfs_cap);
+
+	/* update conc_connection_list */
+	cds_update_hw_mode_conn_info(num_vdev_mac_entries,
+			vdev_mac_map,
+			hw_mode);
+
+	ret = cdf_event_set(&cds_context->connection_update_done_evt);
+	if (!CDF_IS_STATUS_SUCCESS(ret))
+		cds_err("ERROR: set connection_update_done event failed");
+
+	return;
+}
+
+/**
+ * cds_hw_mode_transition_cb() - Callback for HW mode transition from FW
+ * @old_hw_mode_index: Old HW mode index
+ * @new_hw_mode_index: New HW mode index
+ * @num_vdev_mac_entries: Number of vdev-mac id mapping that follows
+ * @vdev_mac_map: vdev-mac id map. This memory will be freed by the caller.
+ * So, make local copy if needed.
+ *
+ * Provides the old and new HW mode index set by the FW
+ *
+ * Return: None
+ */
+static void cds_hw_mode_transition_cb(uint32_t old_hw_mode_index,
+				 uint32_t new_hw_mode_index,
+				 uint32_t num_vdev_mac_entries,
+				 struct sir_vdev_mac_map *vdev_mac_map)
+{
+	CDF_STATUS status;
+	struct sir_hw_mode_params hw_mode;
+	uint32_t i;
+
+	if (!vdev_mac_map) {
+		cds_err("vdev_mac_map is NULL");
+		return;
+	}
+
+	cds_info("old_hw_mode_index=%d, new_hw_mode_index=%d",
+		old_hw_mode_index, new_hw_mode_index);
+
+	for (i = 0; i < num_vdev_mac_entries; i++)
+		cds_info("vdev_id:%d mac_id:%d",
+			vdev_mac_map[i].vdev_id,
+			vdev_mac_map[i].mac_id);
+
+	status = wma_get_hw_mode_from_idx(new_hw_mode_index, &hw_mode);
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("Get HW mode failed: %d", status);
+		return;
+	}
+
+	cds_info("MAC0: TxSS:%d, RxSS:%d, Bw:%d",
+		hw_mode.mac0_tx_ss, hw_mode.mac0_rx_ss, hw_mode.mac0_bw);
+	cds_info("MAC1: TxSS:%d, RxSS:%d, Bw:%d",
+		hw_mode.mac1_tx_ss, hw_mode.mac1_rx_ss, hw_mode.mac1_bw);
+	cds_info("DBS:%d, Agile DFS:%d",
+		hw_mode.dbs_cap, hw_mode.agile_dfs_cap);
+
+	/* update conc_connection_list */
+	cds_update_hw_mode_conn_info(num_vdev_mac_entries,
+					  vdev_mac_map,
+					  hw_mode);
+
+	return;
+}
+
+/**
+ * cds_soc_set_hw_mode() - Set HW mode command to SME
+ * @hdd_ctx: HDD context
+ * @mac0_ss: MAC0 spatial stream configuration
+ * @mac0_bw: MAC0 bandwidth configuration
+ * @mac1_ss: MAC1 spatial stream configuration
+ * @mac1_bw: MAC1 bandwidth configuration
+ * @dbs: HW DBS capability
+ * @dfs: HW Agile DFS capability
+ *
+ * Sends the set hw mode to the SME module which will pass on
+ * this message to WMA layer
+ *
+ * e.g.: To configure 2x2_80
+ *       mac0_ss = HW_MODE_SS_2x2, mac0_bw = HW_MODE_80_MHZ
+ *       mac1_ss = HW_MODE_SS_0x0, mac1_bw = HW_MODE_BW_NONE
+ *       dbs = HW_MODE_DBS_NONE, dfs = HW_MODE_AGILE_DFS_NONE
+ * e.g.: To configure 1x1_80_1x1_40 (DBS)
+ *       mac0_ss = HW_MODE_SS_1x1, mac0_bw = HW_MODE_80_MHZ
+ *       mac1_ss = HW_MODE_SS_1x1, mac1_bw = HW_MODE_40_MHZ
+ *       dbs = HW_MODE_DBS, dfs = HW_MODE_AGILE_DFS_NONE
+ * e.g.: To configure 1x1_80_1x1_40 (Agile DFS)
+ *       mac0_ss = HW_MODE_SS_1x1, mac0_bw = HW_MODE_80_MHZ
+ *       mac1_ss = HW_MODE_SS_1x1, mac1_bw = HW_MODE_40_MHZ
+ *       dbs = HW_MODE_DBS, dfs = HW_MODE_AGILE_DFS
+ *
+ * Return: Success if the message made it down to the next layer
+ */
+CDF_STATUS cds_soc_set_hw_mode(hdd_context_t *hdd_ctx,
+		enum hw_mode_ss_config mac0_ss,
+		enum hw_mode_bandwidth mac0_bw,
+		enum hw_mode_ss_config mac1_ss,
+		enum hw_mode_bandwidth mac1_bw,
+		enum hw_mode_dbs_capab dbs,
+		enum hw_mode_agile_dfs_capab dfs)
+{
+	int8_t hw_mode_index;
+	struct sir_hw_mode msg;
+	CDF_STATUS status;
+
+	if (!hdd_ctx) {
+		cds_err("Invalid HDD context");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	hw_mode_index = wma_get_hw_mode_idx_from_dbs_hw_list(mac0_ss,
+			mac0_bw, mac1_ss, mac1_bw, dbs, dfs);
+	if (hw_mode_index < 0) {
+		cds_err("Invalid HW mode index obtained");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	msg.hw_mode_index = hw_mode_index;
+	msg.set_hw_mode_cb = (void *)cds_soc_set_hw_mode_cb;
+
+	cds_info("set hw mode to sme: hw_mode_index: %d",
+		msg.hw_mode_index);
+
+	status = sme_soc_set_hw_mode(hdd_ctx->hHal, msg);
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("Failed to set hw mode to SME");
+		return status;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_is_connection_in_progress() - check if connection is in progress
+ * @hdd_ctx - HDD context
+ *
+ * Go through each adapter and check if Connection is in progress
+ *
+ * Return: true if connection is in progress else false
+ */
+bool cds_is_connection_in_progress(hdd_context_t *hdd_ctx)
+{
+	hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
+	hdd_station_ctx_t *hdd_sta_ctx = NULL;
+	hdd_adapter_t *adapter = NULL;
+	CDF_STATUS status = 0;
+	uint8_t sta_id = 0;
+	uint8_t *sta_mac = NULL;
+
+	if (true == hdd_ctx->btCoexModeSet) {
+		cds_info("BTCoex Mode operation in progress");
+		return true;
+	}
+	status = hdd_get_front_adapter(hdd_ctx, &adapter_node);
+	while (NULL != adapter_node && CDF_STATUS_SUCCESS == status) {
+		adapter = adapter_node->pAdapter;
+		if (!adapter)
+			goto end;
+
+		cds_info("Adapter with device mode %s(%d) exists",
+			hdd_device_mode_to_string(adapter->device_mode),
+			adapter->device_mode);
+		if (((WLAN_HDD_INFRA_STATION == adapter->device_mode)
+			|| (WLAN_HDD_P2P_CLIENT == adapter->device_mode)
+			|| (WLAN_HDD_P2P_DEVICE == adapter->device_mode))
+			&& (eConnectionState_Connecting ==
+				(WLAN_HDD_GET_STATION_CTX_PTR(adapter))->
+					conn_info.connState)) {
+			cds_err("%p(%d) Connection is in progress",
+				WLAN_HDD_GET_STATION_CTX_PTR(adapter),
+				adapter->sessionId);
+			return true;
+		}
+		if ((WLAN_HDD_INFRA_STATION == adapter->device_mode) &&
+				sme_neighbor_middle_of_roaming(
+					WLAN_HDD_GET_HAL_CTX(adapter),
+					adapter->sessionId)) {
+			cds_err("%p(%d) Reassociation in progress",
+				WLAN_HDD_GET_STATION_CTX_PTR(adapter),
+				adapter->sessionId);
+			return true;
+		}
+		if ((WLAN_HDD_INFRA_STATION == adapter->device_mode) ||
+			(WLAN_HDD_P2P_CLIENT == adapter->device_mode) ||
+			(WLAN_HDD_P2P_DEVICE == adapter->device_mode)) {
+			hdd_sta_ctx =
+				WLAN_HDD_GET_STATION_CTX_PTR(adapter);
+			if ((eConnectionState_Associated ==
+				hdd_sta_ctx->conn_info.connState)
+				&& (false ==
+				hdd_sta_ctx->conn_info.uIsAuthenticated)) {
+				sta_mac = (uint8_t *)
+					&(adapter->macAddressCurrent.bytes[0]);
+				cds_err("client " MAC_ADDRESS_STR
+					" is in middle of WPS/EAPOL exchange.",
+					MAC_ADDR_ARRAY(sta_mac));
+				return true;
+			}
+		} else if ((WLAN_HDD_SOFTAP == adapter->device_mode) ||
+				(WLAN_HDD_P2P_GO == adapter->device_mode)) {
+			for (sta_id = 0; sta_id < WLAN_MAX_STA_COUNT;
+				sta_id++) {
+				if (!((adapter->aStaInfo[sta_id].isUsed)
+				    && (ol_txrx_peer_state_conn ==
+				    adapter->aStaInfo[sta_id].tlSTAState)))
+					continue;
+
+				sta_mac = (uint8_t *)
+						&(adapter->aStaInfo[sta_id].
+							macAddrSTA.bytes[0]);
+				cds_err("client " MAC_ADDRESS_STR
+				" of SAP/GO is in middle of WPS/EAPOL exchange",
+				MAC_ADDR_ARRAY(sta_mac));
+				return true;
+			}
+			if (hdd_ctx->connection_in_progress) {
+				cds_err("AP/GO: connection is in progress");
+				return true;
+			}
+		}
+end:
+		status = hdd_get_next_adapter(hdd_ctx, adapter_node, &next);
+		adapter_node = next;
+	}
+	return false;
+}
+
+/**
+ * cds_dump_current_concurrency_one_connection() - To dump the
+ * current concurrency info with one connection
+ * @hdd_ctx: HDD context
+ * @cc_mode: connection string
+ * @length: Maximum size of the string
+ *
+ * This routine is called to dump the concurrency info
+ *
+ * Return: length of the string
+ */
+static uint32_t cds_dump_current_concurrency_one_connection(
+			hdd_context_t *hdd_ctx, char *cc_mode, uint32_t length)
+{
+	uint32_t count = 0;
+
+	switch (conc_connection_list[0].mode) {
+	case CDS_STA_MODE:
+		count = strlcat(cc_mode, "STA",
+					length);
+		break;
+	case CDS_SAP_MODE:
+		count = strlcat(cc_mode, "SAP",
+					length);
+		break;
+	case CDS_P2P_CLIENT_MODE:
+		count = strlcat(cc_mode, "P2P CLI",
+					length);
+		break;
+	case CDS_P2P_GO_MODE:
+		count = strlcat(cc_mode, "P2P GO",
+					length);
+		break;
+	case CDS_IBSS_MODE:
+		count = strlcat(cc_mode, "IBSS",
+					length);
+		break;
+	default:
+		/* err msg */
+		cds_err("unexpected mode %d", conc_connection_list[0].mode);
+		break;
+	}
+	return count;
+}
+
+/**
+ * cds_dump_current_concurrency_two_connection() - To dump the
+ * current concurrency info with two connections
+ * @hdd_ctx: HDD context
+ * @cc_mode: connection string
+ * @length: Maximum size of the string
+ *
+ * This routine is called to dump the concurrency info
+ *
+ * Return: length of the string
+ */
+static uint32_t cds_dump_current_concurrency_two_connection(
+			hdd_context_t *hdd_ctx, char *cc_mode, uint32_t length)
+{
+	uint32_t count = 0;
+
+	switch (conc_connection_list[1].mode) {
+	case CDS_STA_MODE:
+		count = cds_dump_current_concurrency_one_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+STA",
+					length);
+		break;
+	case CDS_SAP_MODE:
+		count = cds_dump_current_concurrency_one_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+SAP",
+					length);
+		break;
+	case CDS_P2P_CLIENT_MODE:
+		count = cds_dump_current_concurrency_one_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+P2P CLI",
+					length);
+		break;
+	case CDS_P2P_GO_MODE:
+		count = cds_dump_current_concurrency_one_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+P2P GO",
+					length);
+		break;
+	case CDS_IBSS_MODE:
+		count = cds_dump_current_concurrency_one_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+IBSS",
+					length);
+		break;
+	default:
+		/* err msg */
+		cds_err("unexpected mode %d", conc_connection_list[1].mode);
+		break;
+	}
+	return count;
+}
+
+/**
+ * cds_dump_current_concurrency_three_connection() - To dump the
+ * current concurrency info with three connections
+ * @hdd_ctx: HDD context
+ * @cc_mode: connection string
+ * @length: Maximum size of the string
+ *
+ * This routine is called to dump the concurrency info
+ *
+ * Return: length of the string
+ */
+static uint32_t cds_dump_current_concurrency_three_connection(
+			hdd_context_t *hdd_ctx, char *cc_mode, uint32_t length)
+{
+	uint32_t count = 0;
+
+	switch (conc_connection_list[2].mode) {
+	case CDS_STA_MODE:
+		count = cds_dump_current_concurrency_two_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+STA",
+					length);
+		break;
+	case CDS_SAP_MODE:
+		count = cds_dump_current_concurrency_two_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+SAP",
+					length);
+		break;
+	case CDS_P2P_CLIENT_MODE:
+		count = cds_dump_current_concurrency_two_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+P2P CLI",
+					length);
+		break;
+	case CDS_P2P_GO_MODE:
+		count = cds_dump_current_concurrency_two_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+P2P GO",
+					length);
+		break;
+	case CDS_IBSS_MODE:
+		count = cds_dump_current_concurrency_two_connection(
+				hdd_ctx, cc_mode, length);
+		count += strlcat(cc_mode, "+IBSS",
+					length);
+		break;
+	default:
+		/* err msg */
+		cds_err("unexpected mode %d", conc_connection_list[2].mode);
+		break;
+	}
+	return count;
+}
+
+/**
+ * cds_dump_dbs_concurrency() - To dump the dbs concurrency
+ * combination
+ * @cc_mode: connection string
+ *
+ * This routine is called to dump the concurrency info
+ *
+ * Return: None
+ */
+static void cds_dump_dbs_concurrency(char *cc_mode, uint32_t length)
+{
+	strlcat(cc_mode, " DBS", length);
+	if (conc_connection_list[0].mac ==
+		conc_connection_list[1].mac) {
+		if (conc_connection_list[0].chan ==
+			conc_connection_list[1].chan) {
+			if (0 == conc_connection_list[0].mac)
+				strlcat(cc_mode, " with SCC on mac0",
+					length);
+			else
+				strlcat(cc_mode, " with SCC on mac1",
+					length);
+		} else {
+			if (0 == conc_connection_list[0].mac)
+				strlcat(cc_mode, " with MCC on mac0",
+					length);
+			else
+				strlcat(cc_mode, " with MCC on mac1",
+					length);
+		}
+	}
+	if (conc_connection_list[0].mac == conc_connection_list[2].mac) {
+		if (conc_connection_list[0].chan ==
+			conc_connection_list[2].chan) {
+			if (0 == conc_connection_list[0].mac)
+				strlcat(cc_mode, " with SCC on mac0",
+					length);
+			else
+				strlcat(cc_mode, " with SCC on mac1",
+					length);
+		} else {
+			if (0 == conc_connection_list[0].mac)
+				strlcat(cc_mode, " with MCC on mac0",
+					length);
+			else
+				strlcat(cc_mode, " with MCC on mac1",
+					length);
+		}
+	}
+	if (conc_connection_list[1].mac == conc_connection_list[2].mac) {
+		if (conc_connection_list[1].chan ==
+			conc_connection_list[2].chan) {
+			if (0 == conc_connection_list[1].mac)
+				strlcat(cc_mode, " with SCC on mac0",
+					length);
+			else
+				strlcat(cc_mode, " with SCC on mac1",
+					length);
+		} else {
+			if (0 == conc_connection_list[1].mac)
+				strlcat(cc_mode, " with MCC on mac0",
+					length);
+			else
+				strlcat(cc_mode, " with MCC on mac1",
+					length);
+		}
+	}
+}
+
+/**
+ * cds_dump_current_concurrency() - To dump the current
+ * concurrency combination
+ * @hdd_ctx: HDD context
+ *
+ * This routine is called to dump the concurrency info
+ *
+ * Return: None
+ */
+static void cds_dump_current_concurrency(hdd_context_t *hdd_ctx)
+{
+	uint32_t num_connections = 0;
+	char cc_mode[CDS_MAX_CON_STRING_LEN] = {0};
+	uint32_t count = 0;
+
+	num_connections = cds_get_connection_count(hdd_ctx);
+
+	switch (num_connections) {
+	case 1:
+		cds_dump_current_concurrency_one_connection(hdd_ctx, cc_mode,
+					sizeof(cc_mode));
+		cds_err("%s Standalone", cc_mode);
+		break;
+	case 2:
+		count = cds_dump_current_concurrency_two_connection(
+			hdd_ctx, cc_mode, sizeof(cc_mode));
+		if (conc_connection_list[0].chan ==
+			conc_connection_list[1].chan) {
+			strlcat(cc_mode, " SCC", sizeof(cc_mode));
+		} else if (conc_connection_list[0].mac ==
+					conc_connection_list[1].mac) {
+			strlcat(cc_mode, " MCC", sizeof(cc_mode));
+		} else
+			strlcat(cc_mode, " DBS", sizeof(cc_mode));
+		cds_err("%s", cc_mode);
+		break;
+	case 3:
+		count = cds_dump_current_concurrency_three_connection(
+			hdd_ctx, cc_mode, sizeof(cc_mode));
+		if ((conc_connection_list[0].chan ==
+			conc_connection_list[1].chan) &&
+			(conc_connection_list[0].chan ==
+				conc_connection_list[2].chan)){
+				strlcat(cc_mode, " SCC",
+						sizeof(cc_mode));
+		} else if ((conc_connection_list[0].mac ==
+				conc_connection_list[1].mac)
+				&& (conc_connection_list[0].mac ==
+					conc_connection_list[2].mac)) {
+					strlcat(cc_mode, " MCC on single MAC",
+						sizeof(cc_mode));
+		} else {
+			cds_dump_dbs_concurrency(cc_mode, sizeof(cc_mode));
+		}
+		cds_err("%s", cc_mode);
+		break;
+	default:
+		/* err msg */
+		cds_err("unexpected num_connections value %d",
+			num_connections);
+		break;
+	}
+
+	return;
+}
+
+/**
+ * cds_current_concurrency_is_scc() - To check the current
+ * concurrency combination if it is doing SCC
+ * @hdd_ctx: HDD context
+ *
+ * This routine is called to check if it is doing SCC
+ *
+ * Return: True - SCC, False - Otherwise
+ */
+static bool cds_current_concurrency_is_scc(hdd_context_t *hdd_ctx)
+{
+	uint32_t num_connections = 0;
+	bool is_scc = false;
+
+	num_connections = cds_get_connection_count(hdd_ctx);
+
+	switch (num_connections) {
+	case 1:
+		is_scc = true;
+		break;
+	case 2:
+		if (conc_connection_list[0].chan ==
+			conc_connection_list[1].chan) {
+			is_scc = true;
+		}
+		break;
+	case 3:
+		if ((conc_connection_list[0].chan ==
+			conc_connection_list[1].chan) &&
+			(conc_connection_list[0].chan ==
+				conc_connection_list[2].chan)){
+				is_scc = true;
+		}
+		break;
+	default:
+		/* err msg */
+		cds_err("unexpected num_connections value %d",
+			num_connections);
+		break;
+	}
+
+	return is_scc;
+}
+
+/**
+ * cds_dump_legacy_concurrency() - To dump the current
+ * concurrency combination
+ * @hdd_ctx: HDD context
+ * @sta_channel: Channel STA connection has come up
+ * @ap_channel: Channel SAP connection has come up
+ * @p2p_channel: Channel P2P connection has come up
+ * @sta_bssid: BSSID to which STA is connected to
+ * @p2p_bssid: BSSID to which P2P is connected to
+ * @ap_bssid: BSSID of the AP
+ * @p2p_mode: P2P Client or GO
+ *
+ * This routine is called to dump the concurrency info
+ *
+ * Return: None
+ */
+static void cds_dump_legacy_concurrency(hdd_context_t *hdd_ctx,
+		uint8_t sta_channel, uint8_t ap_channel, uint8_t p2p_channel,
+		struct cdf_mac_addr sta_bssid, struct cdf_mac_addr p2p_bssid,
+		struct cdf_mac_addr ap_bssid, const char *p2p_mode)
+{
+	const char *cc_mode = "Standalone";
+
+	if (sta_channel > 0) {
+		if (ap_channel > 0) {
+			if (p2p_channel > 0) {
+				/* STA + AP + P2P */
+				if (p2p_channel == sta_channel
+					&& ap_channel == sta_channel) {
+					cc_mode = "STA+AP+P2P SCC";
+				} else {
+					if (p2p_channel == sta_channel)
+						cc_mode =
+							"STA+P2P SCC, SAP MCC";
+					else if (ap_channel == sta_channel)
+						cc_mode =
+							"STA+SAP SCC, P2P MCC";
+					else if (ap_channel == p2p_channel)
+						cc_mode =
+							"P2P+SAP SCC, STA MCC";
+				}
+			} else {
+				/* STA + AP */
+				cc_mode = (ap_channel == sta_channel) ?
+						"SCC" : "MCC";
+			}
+		} else {
+			if (p2p_channel > 0) {
+				/* STA + P2P */
+				cc_mode = (p2p_channel == sta_channel) ?
+						"SCC" : "MCC";
+			}
+		}
+	} else {
+		if (ap_channel > 0) {
+			if (p2p_channel > 0) {
+				/* AP + P2P */
+				cc_mode = (p2p_channel == ap_channel) ?
+						"SCC" : "MCC";
+			}
+		}
+	}
+	if (sta_channel > 0)
+		cds_err("wlan(%d) " MAC_ADDRESS_STR " %s",
+			sta_channel, MAC_ADDR_ARRAY(sta_bssid.bytes), cc_mode);
+
+	if (p2p_channel > 0)
+		cds_err("p2p-%s(%d) " MAC_ADDRESS_STR " %s",
+			p2p_mode, p2p_channel, MAC_ADDR_ARRAY(p2p_bssid.bytes),
+			cc_mode);
+
+	if (ap_channel > 0)
+		cds_err("AP(%d) " MAC_ADDRESS_STR " %s",
+			ap_channel, MAC_ADDR_ARRAY(ap_bssid.bytes), cc_mode);
+
+	hdd_ctx->mcc_mode = strcmp(cc_mode, "SCC");
+}
+
+/**
+ * cds_dump_concurrency_info() - To dump concurrency info
+ * @hdd_ctx: HDD context
+ *
+ * This routine is called to dump the concurrency info
+ *
+ * Return: None
+ */
+void cds_dump_concurrency_info(hdd_context_t *hdd_ctx)
+{
+	hdd_adapter_list_node_t *adapterNode = NULL, *pNext = NULL;
+	CDF_STATUS status;
+	hdd_adapter_t *adapter;
+	hdd_station_ctx_t *pHddStaCtx;
+	hdd_ap_ctx_t *hdd_ap_ctx;
+	hdd_hostapd_state_t *hostapd_state;
+	struct cdf_mac_addr staBssid = CDF_MAC_ADDR_ZERO_INITIALIZER;
+	struct cdf_mac_addr p2pBssid = CDF_MAC_ADDR_ZERO_INITIALIZER;
+	struct cdf_mac_addr apBssid = CDF_MAC_ADDR_ZERO_INITIALIZER;
+	uint8_t staChannel = 0, p2pChannel = 0, apChannel = 0;
+	const char *p2pMode = "DEV";
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+	uint8_t targetChannel = 0;
+	uint8_t preAdapterChannel = 0;
+	uint8_t channel24;
+	uint8_t channel5;
+	hdd_adapter_t *preAdapterContext = NULL;
+	hdd_adapter_t *adapter2_4 = NULL;
+	hdd_adapter_t *adapter5 = NULL;
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+	status = hdd_get_front_adapter(hdd_ctx, &adapterNode);
+	while (NULL != adapterNode && CDF_STATUS_SUCCESS == status) {
+		adapter = adapterNode->pAdapter;
+		switch (adapter->device_mode) {
+		case WLAN_HDD_INFRA_STATION:
+			pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
+			if (eConnectionState_Associated ==
+			    pHddStaCtx->conn_info.connState) {
+				staChannel =
+					pHddStaCtx->conn_info.operationChannel;
+				cdf_copy_macaddr(&staBssid,
+						 &pHddStaCtx->conn_info.bssId);
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+				targetChannel = staChannel;
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+			}
+			break;
+		case WLAN_HDD_P2P_CLIENT:
+			pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
+			if (eConnectionState_Associated ==
+			    pHddStaCtx->conn_info.connState) {
+				p2pChannel =
+					pHddStaCtx->conn_info.operationChannel;
+				cdf_copy_macaddr(&p2pBssid,
+						&pHddStaCtx->conn_info.bssId);
+				p2pMode = "CLI";
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+				targetChannel = p2pChannel;
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+			}
+			break;
+		case WLAN_HDD_P2P_GO:
+			hdd_ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(adapter);
+			hostapd_state = WLAN_HDD_GET_HOSTAP_STATE_PTR(adapter);
+			if (hostapd_state->bssState == BSS_START
+			    && hostapd_state->cdf_status ==
+			    CDF_STATUS_SUCCESS) {
+				p2pChannel = hdd_ap_ctx->operatingChannel;
+				cdf_copy_macaddr(&p2pBssid,
+						 &adapter->macAddressCurrent);
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+				targetChannel = p2pChannel;
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+			}
+			p2pMode = "GO";
+			break;
+		case WLAN_HDD_SOFTAP:
+			hdd_ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(adapter);
+			hostapd_state = WLAN_HDD_GET_HOSTAP_STATE_PTR(adapter);
+			if (hostapd_state->bssState == BSS_START
+			    && hostapd_state->cdf_status ==
+			    CDF_STATUS_SUCCESS) {
+				apChannel = hdd_ap_ctx->operatingChannel;
+				cdf_copy_macaddr(&apBssid,
+						&adapter->macAddressCurrent);
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+				targetChannel = apChannel;
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+			}
+			break;
+		case WLAN_HDD_IBSS:
+			return; /* skip printing station message below */
+		default:
+			break;
+		}
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+		if (targetChannel) {
+			/*
+			 * This is first adapter detected as active
+			 * set as default for none concurrency case
+			 */
+			if (!preAdapterChannel) {
+				/* If IPA UC data path is enabled,
+				 * target should reserve extra tx descriptors
+				 * for IPA data path.
+				 * Then host data path should allow less TX
+				 * packet pumping in case IPA
+				 * data path enabled
+				 */
+				if (hdd_ipa_uc_is_enabled(hdd_ctx) &&
+				    (WLAN_HDD_SOFTAP == adapter->device_mode)) {
+					adapter->tx_flow_low_watermark =
+					hdd_ctx->config->TxFlowLowWaterMark +
+					WLAN_TFC_IPAUC_TX_DESC_RESERVE;
+				} else {
+					adapter->tx_flow_low_watermark =
+						hdd_ctx->config->
+							TxFlowLowWaterMark;
+				}
+				adapter->tx_flow_high_watermark_offset =
+				   hdd_ctx->config->TxFlowHighWaterMarkOffset;
+				ol_txrx_ll_set_tx_pause_q_depth(
+					adapter->sessionId,
+					hdd_ctx->config->TxFlowMaxQueueDepth);
+				/* Temporary set log level as error
+				 * TX Flow control feature settled down,
+				 * will lower log level
+				 */
+				cds_err("MODE %d,CH %d,LWM %d,HWM %d,TXQDEP %d",
+				    adapter->device_mode,
+				    targetChannel,
+				    adapter->tx_flow_low_watermark,
+				    adapter->tx_flow_low_watermark +
+				    adapter->tx_flow_high_watermark_offset,
+				    hdd_ctx->config->TxFlowMaxQueueDepth);
+				preAdapterChannel = targetChannel;
+				preAdapterContext = adapter;
+			} else {
+				/*
+				 * SCC, disable TX flow control for both
+				 * SCC each adapter cannot reserve dedicated
+				 * channel resource, as a result, if any adapter
+				 * blocked OS Q by flow control,
+				 * blocked adapter will lost chance to recover
+				 */
+				if (preAdapterChannel == targetChannel) {
+					/* Current adapter */
+					adapter->tx_flow_low_watermark = 0;
+					adapter->
+					tx_flow_high_watermark_offset = 0;
+					ol_txrx_ll_set_tx_pause_q_depth(
+						adapter->sessionId,
+						hdd_ctx->config->
+						TxHbwFlowMaxQueueDepth);
+					cds_err("SCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
+					       hdd_device_mode_to_string(
+							adapter->device_mode),
+					       adapter->device_mode,
+					       targetChannel,
+					       adapter->tx_flow_low_watermark,
+					       adapter->tx_flow_low_watermark +
+					       adapter->
+					       tx_flow_high_watermark_offset,
+					       hdd_ctx->config->
+					       TxHbwFlowMaxQueueDepth);
+
+					if (!preAdapterContext) {
+						cds_err("SCC: Previous adapter context NULL");
+						continue;
+					}
+
+					/* Previous adapter */
+					preAdapterContext->
+					tx_flow_low_watermark = 0;
+					preAdapterContext->
+					tx_flow_high_watermark_offset = 0;
+					ol_txrx_ll_set_tx_pause_q_depth(
+						preAdapterContext->sessionId,
+						hdd_ctx->config->
+						TxHbwFlowMaxQueueDepth);
+					/*
+					 * Temporary set log level as error
+					 * TX Flow control feature settled down,
+					 * will lower log level
+					 */
+					cds_err("SCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
+					       hdd_device_mode_to_string(
+						preAdapterContext->device_mode
+							  ),
+					       preAdapterContext->device_mode,
+					       targetChannel,
+					       preAdapterContext->
+					       tx_flow_low_watermark,
+					       preAdapterContext->
+					       tx_flow_low_watermark +
+					       preAdapterContext->
+					       tx_flow_high_watermark_offset,
+					       hdd_ctx->config->
+					       TxHbwFlowMaxQueueDepth);
+				}
+				/*
+				 * MCC, each adapter will have dedicated
+				 * resource
+				 */
+				else {
+					/* current channel is 2.4 */
+					if (targetChannel <=
+				     WLAN_HDD_TX_FLOW_CONTROL_MAX_24BAND_CH) {
+						channel24 = targetChannel;
+						channel5 = preAdapterChannel;
+						adapter2_4 = adapter;
+						adapter5 = preAdapterContext;
+					} else {
+						/* Current channel is 5 */
+						channel24 = preAdapterChannel;
+						channel5 = targetChannel;
+						adapter2_4 = preAdapterContext;
+						adapter5 = adapter;
+					}
+
+					if (!adapter5) {
+						cds_err("MCC: 5GHz adapter context NULL");
+						continue;
+					}
+					adapter5->tx_flow_low_watermark =
+						hdd_ctx->config->
+						TxHbwFlowLowWaterMark;
+					adapter5->
+					tx_flow_high_watermark_offset =
+						hdd_ctx->config->
+						TxHbwFlowHighWaterMarkOffset;
+					ol_txrx_ll_set_tx_pause_q_depth(
+						adapter5->sessionId,
+						hdd_ctx->config->
+						TxHbwFlowMaxQueueDepth);
+					/*
+					 * Temporary set log level as error
+					 * TX Flow control feature settled down,
+					 * will lower log level
+					 */
+					cds_err("MCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
+					    hdd_device_mode_to_string(
+						    adapter5->device_mode),
+					    adapter5->device_mode,
+					    channel5,
+					    adapter5->tx_flow_low_watermark,
+					    adapter5->
+					    tx_flow_low_watermark +
+					    adapter5->
+					    tx_flow_high_watermark_offset,
+					    hdd_ctx->config->
+					    TxHbwFlowMaxQueueDepth);
+
+					if (!adapter2_4) {
+						cds_err("MCC: 2.4GHz adapter context NULL");
+						continue;
+					}
+					adapter2_4->tx_flow_low_watermark =
+						hdd_ctx->config->
+						TxLbwFlowLowWaterMark;
+					adapter2_4->
+					tx_flow_high_watermark_offset =
+						hdd_ctx->config->
+						TxLbwFlowHighWaterMarkOffset;
+					ol_txrx_ll_set_tx_pause_q_depth(
+						adapter2_4->sessionId,
+						hdd_ctx->config->
+						TxLbwFlowMaxQueueDepth);
+					/*
+					 * Temporary set log level as error
+					 * TX Flow control feature settled down,
+					 * will lower log level
+					 */
+					cds_err("MCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
+						hdd_device_mode_to_string(
+						    adapter2_4->device_mode),
+						adapter2_4->device_mode,
+						channel24,
+						adapter2_4->
+						tx_flow_low_watermark,
+						adapter2_4->
+						tx_flow_low_watermark +
+						adapter2_4->
+						tx_flow_high_watermark_offset,
+						hdd_ctx->config->
+						TxLbwFlowMaxQueueDepth);
+
+				}
+			}
+		}
+		targetChannel = 0;
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+		status = hdd_get_next_adapter(hdd_ctx, adapterNode, &pNext);
+		adapterNode = pNext;
+	}
+	if (hdd_ctx->config->policy_manager_enabled) {
+		cds_dump_current_concurrency(hdd_ctx);
+		hdd_ctx->mcc_mode = !cds_current_concurrency_is_scc(hdd_ctx);
+	} else {
+		/* hdd_ctx->mcc_mode gets updated inside below function, which
+		 *  gets used by IPA
+		 */
+		cds_dump_legacy_concurrency(hdd_ctx,
+			staChannel, apChannel, p2pChannel,
+			staBssid, p2pBssid, apBssid, p2pMode);
+	}
+}
+
+/**
+ * cds_set_concurrency_mode() - To set concurrency mode
+ * @hdd_ctx: HDD context
+ * @mode: Concurrency mode
+ *
+ * This routine is called to set the concurrency mode
+ *
+ * Return: NONE
+ */
+void cds_set_concurrency_mode(hdd_context_t *hdd_ctx, tCDF_CON_MODE mode)
+{
+	switch (mode) {
+	case CDF_STA_MODE:
+	case CDF_P2P_CLIENT_MODE:
+	case CDF_P2P_GO_MODE:
+	case CDF_SAP_MODE:
+		hdd_ctx->concurrency_mode |= (1 << mode);
+		hdd_ctx->no_of_open_sessions[mode]++;
+		break;
+	default:
+		break;
+	}
+	cds_info("concurrency_mode = 0x%x Number of open sessions for mode %d = %d",
+		hdd_ctx->concurrency_mode, mode,
+		hdd_ctx->no_of_open_sessions[mode]);
+}
+
+/**
+ * cds_clear_concurrency_mode() - To clear concurrency mode
+ * @hdd_ctx: HDD context
+ * @mode: Concurrency mode
+ *
+ * This routine is called to clear the concurrency mode
+ *
+ * Return: NONE
+ */
+void cds_clear_concurrency_mode(hdd_context_t *hdd_ctx,
+				     tCDF_CON_MODE mode)
+{
+	switch (mode) {
+	case CDF_STA_MODE:
+	case CDF_P2P_CLIENT_MODE:
+	case CDF_P2P_GO_MODE:
+	case CDF_SAP_MODE:
+		hdd_ctx->no_of_open_sessions[mode]--;
+		if (!(hdd_ctx->no_of_open_sessions[mode]))
+			hdd_ctx->concurrency_mode &= (~(1 << mode));
+		break;
+	default:
+		break;
+	}
+	cds_info("concurrency_mode = 0x%x Number of open sessions for mode %d = %d",
+		hdd_ctx->concurrency_mode, mode,
+		hdd_ctx->no_of_open_sessions[mode]);
+}
+
+/**
+ * cds_soc_set_pcl() - Sets PCL to FW
+ * @hdd_ctx: HDD context
+ * @mode: Connection mode
+ *
+ * Fetches the PCL and sends the PCL to SME
+ * module which in turn will send the WMI
+ * command WMI_SOC_SET_PCL_CMDID to the fw
+ *
+ * Return: None
+ */
+static void cds_soc_set_pcl(hdd_context_t *hdd_ctx, tCDF_CON_MODE mode)
+{
+	CDF_STATUS status;
+	enum cds_con_mode con_mode;
+	struct sir_pcl_list pcl;
+	pcl.pcl_len = 0;
+
+	switch (mode) {
+	case CDF_STA_MODE:
+		con_mode = CDS_STA_MODE;
+		break;
+	case CDF_P2P_CLIENT_MODE:
+		con_mode = CDS_P2P_CLIENT_MODE;
+		break;
+	case CDF_P2P_GO_MODE:
+		con_mode = CDS_P2P_GO_MODE;
+		break;
+	case CDF_SAP_MODE:
+		con_mode = CDS_SAP_MODE;
+		break;
+	case CDF_IBSS_MODE:
+		con_mode = CDS_IBSS_MODE;
+		break;
+	default:
+		cds_err("Unable to set PCL to FW: %d", mode);
+		return;
+	}
+
+	cds_debug("get pcl to set it to the FW");
+
+	status = cds_get_pcl(hdd_ctx, con_mode,
+			pcl.pcl_list, &pcl.pcl_len);
+	if (status != CDF_STATUS_SUCCESS) {
+		cds_err("Unable to set PCL to FW, Get PCL failed");
+		return;
+	}
+
+	status = sme_soc_set_pcl(hdd_ctx->hHal, pcl);
+	if (status != CDF_STATUS_SUCCESS)
+		cds_err("Send soc set PCL to SME failed");
+	else
+		cds_info("Set PCL to FW for mode:%d", mode);
+}
+
+/**
+ * cds_incr_active_session() - increments the number of active sessions
+ * @hdd_ctx:	HDD Context
+ * @mode:	Device mode
+ * @session_id: session ID for the connection session
+ *
+ * This function increments the number of active sessions maintained per device
+ * mode. In the case of STA/P2P CLI/IBSS upon connection indication it is
+ * incremented; In the case of SAP/P2P GO upon bss start it is incremented
+ *
+ * Return: None
+ */
+void cds_incr_active_session(hdd_context_t *hdd_ctx, tCDF_CON_MODE mode,
+				  uint8_t session_id)
+{
+	switch (mode) {
+	case CDF_STA_MODE:
+	case CDF_P2P_CLIENT_MODE:
+	case CDF_P2P_GO_MODE:
+	case CDF_SAP_MODE:
+		hdd_ctx->no_of_active_sessions[mode]++;
+		break;
+	default:
+		break;
+	}
+	cds_info("No.# of active sessions for mode %d = %d",
+		mode, hdd_ctx->no_of_active_sessions[mode]);
+	/*
+	 * Get PCL logic makes use of the connection info structure.
+	 * Let us set the PCL to the FW before updating the connection
+	 * info structure about the new connection.
+	 */
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	if (mode == CDF_STA_MODE) {
+		/* Set PCL of STA to the FW */
+		cds_soc_set_pcl(hdd_ctx, mode);
+		cds_info("Set PCL of STA to FW");
+	}
+	cds_incr_connection_count(hdd_ctx, session_id);
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+}
+
+/**
+ * cds_need_opportunistic_upgrade() - Tells us if we really
+ * need an upgrade to 2x2
+ * @hdd_ctx: HDD context
+ *
+ * This function returns if updrade to 2x2 is needed
+ *
+ * Return: CDS_NOP = upgrade is not needed, otherwise upgrade is
+ * needed
+ */
+enum cds_conc_next_action cds_need_opportunistic_upgrade(
+		hdd_context_t *hdd_ctx)
+{
+	uint32_t conn_index;
+	enum cds_conc_next_action upgrade = CDS_NOP;
+	uint8_t mac = 0;
+
+	if (wma_is_hw_dbs_capable() == false) {
+		cds_err("driver isn't dbs capable, no further action needed");
+		return upgrade;
+	}
+
+	/* Are both mac's still in use*/
+	for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS;
+		conn_index++) {
+		if ((conc_connection_list[conn_index].mac == 0) &&
+			conc_connection_list[conn_index].in_use) {
+			mac |= 1;
+			if (3 == mac)
+				goto done;
+		} else if ((conc_connection_list[conn_index].mac == 1) &&
+			conc_connection_list[conn_index].in_use) {
+			mac |= 2;
+			if (3 == mac)
+				goto done;
+		}
+	}
+#ifdef QCA_WIFI_3_0_EMU
+	/* For emulation only: if we have a connection on 2.4, stay in DBS */
+	if (CDS_IS_CHANNEL_24GHZ(conc_connection_list[0].chan))
+		goto done;
+#endif
+	/* Let's request for single MAC mode */
+	upgrade = CDS_MCC;
+	/* Is there any connection had an initial connection with 2x2 */
+	for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS;
+		conn_index++) {
+		if ((conc_connection_list[conn_index].original_nss == 1) &&
+			conc_connection_list[conn_index].in_use) {
+			upgrade = CDS_MCC_UPGRADE;
+			goto done;
+		}
+	}
+
+done:
+	return upgrade;
+}
+
+
+/**
+ * cds_set_pcl_for_existing_combo() - Set PCL for existing connection
+ * @hdd_ctx: HDD context
+ * @mode: Connection mode of type 'cds_con_mode'
+ *
+ * Set the PCL for an existing connection
+ *
+ * Return: None
+ */
+static void cds_set_pcl_for_existing_combo(hdd_context_t *hdd_ctx,
+		enum cds_con_mode mode)
+{
+	struct cds_conc_connection_info info;
+	tCDF_CON_MODE pcl_mode;
+
+	switch (mode) {
+	case CDS_STA_MODE:
+		pcl_mode = CDF_STA_MODE;
+		break;
+	case CDS_SAP_MODE:
+		pcl_mode = CDF_SAP_MODE;
+		break;
+	case CDS_P2P_CLIENT_MODE:
+		pcl_mode = CDF_P2P_CLIENT_MODE;
+		break;
+	case CDS_P2P_GO_MODE:
+		pcl_mode = CDF_P2P_GO_MODE;
+		break;
+	case CDS_IBSS_MODE:
+		pcl_mode = CDF_IBSS_MODE;
+		break;
+	default:
+		cds_err("Invalid mode to set PCL");
+		return;
+	};
+
+	if (cds_mode_specific_connection_count(
+				hdd_ctx, mode, NULL) > 0) {
+		/* Check, store and temp delete the mode's parameter */
+		cds_store_and_del_conn_info(hdd_ctx, mode,
+				&info);
+		/* Set the PCL to the FW since connection got updated */
+		cds_soc_set_pcl(hdd_ctx, pcl_mode);
+		cds_info("Set PCL to FW for mode:%d", mode);
+		/* Restore the connection info */
+		cds_restore_deleted_conn_info(hdd_ctx, &info);
+	}
+}
+
+/**
+ * cds_decr_session_set_pcl() - Decrement session count and set PCL
+ * @hdd_ctx: HDD context
+ * @mode: Connection mode
+ * @session_id: Session id
+ *
+ * Decrements the active session count and sets the PCL if a STA connection
+ * exists
+ *
+ * Return: None
+ */
+void cds_decr_session_set_pcl(hdd_context_t *hdd_ctx,
+						tCDF_CON_MODE mode,
+						uint8_t session_id)
+{
+	CDF_STATUS cdf_status;
+
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	cds_decr_active_session(hdd_ctx, mode, session_id);
+	/*
+	 * After the removal of this connection, we need to check if
+	 * a STA connection still exists. The reason for this is that
+	 * if one or more STA exists, we need to provide the updated
+	 * PCL to the FW for cases like LFR.
+	 *
+	 * Since cds_get_pcl provides PCL list based on the new
+	 * connection that is going to come up, we will find the
+	 * existing STA entry, save it and delete it temporarily.
+	 * After this we will get PCL as though as new STA connection
+	 * is coming up. This will give the exact PCL that needs to be
+	 * given to the FW. After setting the PCL, we need to restore
+	 * the entry that we have saved before.
+	 */
+	cds_set_pcl_for_existing_combo(hdd_ctx, CDS_STA_MODE);
+	/* do we need to change the HW mode */
+	if (cds_need_opportunistic_upgrade(hdd_ctx)) {
+		/* let's start the timer */
+		cdf_mc_timer_stop(&hdd_ctx->dbs_opportunistic_timer);
+		cdf_status = cdf_mc_timer_start(
+					&hdd_ctx->dbs_opportunistic_timer,
+					DBS_OPPORTUNISTIC_TIME *
+						1000);
+		if (!CDF_IS_STATUS_SUCCESS(cdf_status))
+			cds_err("Failed to start dbs opportunistic timer");
+	}
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+
+	return;
+}
+
+
+/**
+ * cds_decr_active_session() - decrements the number of active sessions
+ * @hdd_ctx:	HDD Context
+ * @mode:	Device mode
+ * @session_id: session ID for the connection session
+ *
+ * This function decrements the number of active sessions maintained per device
+ * mode. In the case of STA/P2P CLI/IBSS upon disconnection it is decremented
+ * In the case of SAP/P2P GO upon bss stop it is decremented
+ *
+ * Return: None
+ */
+void cds_decr_active_session(hdd_context_t *hdd_ctx, tCDF_CON_MODE mode,
+				  uint8_t session_id)
+{
+	switch (mode) {
+	case CDF_STA_MODE:
+	case CDF_P2P_CLIENT_MODE:
+	case CDF_P2P_GO_MODE:
+	case CDF_SAP_MODE:
+		if (hdd_ctx->no_of_active_sessions[mode])
+			hdd_ctx->no_of_active_sessions[mode]--;
+		break;
+	default:
+		break;
+	}
+	cds_info("No.# of active sessions for mode %d = %d",
+		mode, hdd_ctx->no_of_active_sessions[mode]);
+	cds_decr_connection_count(hdd_ctx, session_id);
+}
+
+/**
+ * cds_dbs_opportunistic_timer_handler() - handler of
+ * dbs_opportunistic_timer
+ * @data: HDD context
+ *
+ * handler for dbs_opportunistic_timer
+ *
+ * Return: None
+ */
+void cds_dbs_opportunistic_timer_handler(void *data)
+{
+	hdd_context_t *hdd_ctx = (hdd_context_t *) data;
+	enum cds_conc_next_action action = CDS_NOP;
+
+	if (NULL == hdd_ctx) {
+		cds_err("hdd_ctx is NULL");
+		return;
+	}
+
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	/* if we still need it */
+	action = cds_need_opportunistic_upgrade(hdd_ctx);
+	if (action) {
+		/* lets call for action */
+		cds_next_actions(hdd_ctx, action);
+	}
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+
+}
+
+/**
+ * cds_init_policy_mgr() - Initialize the policy manager
+ * related data structures
+ * @hdd_ctx:	HDD Context
+ *
+ * Initialize the policy manager related data structures
+ *
+ * Return: Success if the policy manager is initialized completely
+ */
+CDF_STATUS cds_init_policy_mgr(hdd_context_t *hdd_ctx)
+{
+	CDF_STATUS status;
+	p_cds_contextType p_cds_context;
+
+	cds_debug("Initializing the policy manager");
+
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		cds_err("Invalid CDS context");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	/* init conc_connection_list */
+	cdf_mem_zero(conc_connection_list, sizeof(conc_connection_list));
+
+	if (!CDF_IS_STATUS_SUCCESS(cdf_mutex_init(
+					&hdd_ctx->hdd_conc_list_lock))) {
+		cds_err("Failed to init hdd_conc_list_lock");
+		/* Lets us not proceed further */
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	sme_register_hw_mode_trans_cb(hdd_ctx->hHal,
+				cds_hw_mode_transition_cb);
+	status = cdf_mc_timer_init(&hdd_ctx->dbs_opportunistic_timer,
+				   CDF_TIMER_TYPE_SW,
+				   cds_dbs_opportunistic_timer_handler,
+				   (void *)hdd_ctx);
+	if (!CDF_IS_STATUS_SUCCESS(status)) {
+		cds_err("Failed to init DBS opportunistic timer");
+		return status;
+	}
+
+	status = cdf_event_init(&p_cds_context->connection_update_done_evt);
+	if (!CDF_IS_STATUS_SUCCESS(status)) {
+		cds_err("connection_update_done_evt init failed");
+		return status;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_get_connection_for_vdev_id() - provides the
+ * perticular connection with the requested vdev id
+ * @hdd_ctx:	HDD Context
+ * @vdev_id: vdev id of the connection
+ *
+ * This function provides the specific connection with the
+ * requested vdev id
+ *
+ * Return: index in the connection table
+ */
+uint32_t cds_get_connection_for_vdev_id(hdd_context_t *hdd_ctx,
+						uint32_t vdev_id)
+{
+	uint32_t conn_index = 0;
+	for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS;
+		 conn_index++) {
+		if ((conc_connection_list[conn_index].vdev_id == vdev_id) &&
+			conc_connection_list[conn_index].in_use) {
+			break;
+		}
+	}
+	return conn_index;
+}
+
+
+/**
+ * cds_get_connection_count() - provides the count of
+ * current connections
+ * @hdd_ctx:	HDD Context
+ *
+ *
+ * This function provides the count of current connections
+ *
+ * Return: connection count
+ */
+uint32_t cds_get_connection_count(hdd_context_t *hdd_ctx)
+{
+	uint32_t conn_index, count = 0;
+	for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS;
+		 conn_index++) {
+		if (conc_connection_list[conn_index].in_use)
+			count++;
+	}
+	return count;
+}
+
+/**
+ * cds_get_mode() - Get mode from type and subtype
+ * @type: type
+ * @subtype: subtype
+ *
+ * Get the concurrency mode from the type and subtype
+ * of the interface
+ *
+ * Return: cds_con_mode
+ */
+enum cds_con_mode cds_get_mode(uint8_t type, uint8_t subtype)
+{
+	enum cds_con_mode mode = CDS_MAX_NUM_OF_MODE;
+	if (type == WMI_VDEV_TYPE_AP) {
+		switch (subtype) {
+		case 0:
+			mode = CDS_SAP_MODE;
+			break;
+		case WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO:
+			mode = CDS_P2P_GO_MODE;
+			break;
+		default:
+		/* err msg*/
+			cds_err("Unknown subtype %d for type %d",
+				subtype, type);
+			break;
+		}
+	} else if (type == WMI_VDEV_TYPE_STA) {
+		switch (subtype) {
+		case 0:
+			mode = CDS_STA_MODE;
+			break;
+		case WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT:
+			mode = CDS_P2P_CLIENT_MODE;
+			break;
+		default:
+		/* err msg*/
+			cds_err("Unknown subtype %d for type %d",
+				subtype, type);
+			break;
+		}
+	} else if (type == WMI_VDEV_TYPE_IBSS) {
+		mode = CDS_IBSS_MODE;
+	} else {
+		/* err msg */
+		cds_err("Unknown type %d", type);
+	}
+
+	return mode;
+}
+
+/**
+ * cds_incr_connection_count() - adds the new connection to
+ * the current connections list
+ * @hdd_ctx:	HDD Context
+ *
+ *
+ * This function adds the new connection to the current
+ * connections list
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_incr_connection_count(hdd_context_t *hdd_ctx,
+					  uint32_t vdev_id)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t conn_index;
+	struct wma_txrx_node *wma_conn_table_entry;
+
+	conn_index = cds_get_connection_count(hdd_ctx);
+	if (hdd_ctx->config->gMaxConcurrentActiveSessions < conn_index) {
+		/* err msg */
+		cds_err("exceeded max connection limit %d",
+			hdd_ctx->config->gMaxConcurrentActiveSessions);
+		return status;
+	}
+
+	wma_conn_table_entry = wma_get_interface_by_vdev_id(vdev_id);
+
+	if (NULL == wma_conn_table_entry) {
+		/* err msg*/
+		cds_err("can't find vdev_id %d in WMA table", vdev_id);
+		return status;
+	}
+
+	/* add the entry */
+	cds_update_conc_list(conn_index,
+			cds_get_mode(wma_conn_table_entry->type,
+					wma_conn_table_entry->sub_type),
+			cds_freq_to_chan(wma_conn_table_entry->mhz),
+			wma_conn_table_entry->mac_id,
+			wma_conn_table_entry->chain_mask,
+			wma_conn_table_entry->tx_streams,
+			wma_conn_table_entry->rx_streams,
+			wma_conn_table_entry->nss, vdev_id, true);
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_update_connection_info() - updates the existing
+ * connection in the current connections list
+ * @hdd_ctx:	HDD Context
+ *
+ *
+ * This function adds the new connection to the current
+ * connections list
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_update_connection_info(hdd_context_t *hdd_ctx,
+					   uint32_t vdev_id)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t conn_index = 0;
+	bool found = false;
+	struct wma_txrx_node *wma_conn_table_entry;
+
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+		if (vdev_id == conc_connection_list[conn_index].vdev_id) {
+			/* debug msg */
+			found = true;
+			break;
+		}
+		conn_index++;
+	}
+	if (!found) {
+		/* err msg */
+		cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+		cds_err("can't find vdev_id %d in conc_connection_list",
+			vdev_id);
+		return status;
+	}
+
+	wma_conn_table_entry = wma_get_interface_by_vdev_id(vdev_id);
+
+	if (NULL == wma_conn_table_entry) {
+		/* err msg*/
+		cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+		cds_err("can't find vdev_id %d in WMA table", vdev_id);
+		return status;
+	}
+
+	/* add the entry */
+	cds_update_conc_list(conn_index,
+			cds_get_mode(wma_conn_table_entry->type,
+					wma_conn_table_entry->sub_type),
+			cds_freq_to_chan(wma_conn_table_entry->mhz),
+			wma_conn_table_entry->mac_id,
+			wma_conn_table_entry->chain_mask,
+			wma_conn_table_entry->tx_streams,
+			wma_conn_table_entry->rx_streams,
+			wma_conn_table_entry->nss, vdev_id, true);
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_decr_connection_count() - remove the old connection
+ * from the current connections list
+ * @hdd_ctx:	HDD Context
+ * @vdev_id: vdev id of the old connection
+ *
+ *
+ * This function removes the old connection from the current
+ * connections list
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_decr_connection_count(hdd_context_t *hdd_ctx,
+					  uint32_t vdev_id)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t conn_index = 0, next_conn_index = 0;
+	bool found = false;
+
+	while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+		if (vdev_id == conc_connection_list[conn_index].vdev_id) {
+			/* debug msg */
+			found = true;
+			break;
+		}
+		conn_index++;
+	}
+	if (!found) {
+		/* err msg */
+		cds_err("can't find vdev_id %d in conc_connection_list",
+			vdev_id);
+		return status;
+	}
+	next_conn_index = conn_index + 1;
+	while (CONC_CONNECTION_LIST_VALID_INDEX(next_conn_index)) {
+		conc_connection_list[conn_index].vdev_id =
+			conc_connection_list[next_conn_index].vdev_id;
+		conc_connection_list[conn_index].tx_spatial_stream =
+			conc_connection_list[next_conn_index].tx_spatial_stream;
+		conc_connection_list[conn_index].rx_spatial_stream =
+			conc_connection_list[next_conn_index].rx_spatial_stream;
+		conc_connection_list[conn_index].mode =
+			conc_connection_list[next_conn_index].mode;
+		conc_connection_list[conn_index].mac =
+			conc_connection_list[next_conn_index].mac;
+		conc_connection_list[conn_index].chan =
+			conc_connection_list[next_conn_index].chan;
+		conc_connection_list[conn_index].chain_mask =
+			conc_connection_list[next_conn_index].chain_mask;
+		conc_connection_list[conn_index].original_nss =
+			conc_connection_list[next_conn_index].original_nss;
+		conc_connection_list[conn_index].in_use =
+			conc_connection_list[next_conn_index].in_use;
+		conn_index++;
+		next_conn_index++;
+	}
+
+	/* clean up the entry */
+	cdf_mem_zero(&conc_connection_list[next_conn_index - 1],
+		sizeof(*conc_connection_list));
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_get_connection_channels() - provides the channel(s)
+ * on which current connection(s) is
+ * @hdd_ctx:	HDD Context
+ * @channels:	the channel(s) on which current connection(s) is
+ * @len:	Number of channels
+ * @order:	no order OR 2.4 Ghz channel followed by 5 Ghz
+ *	channel OR 5 Ghz channel followed by 2.4 Ghz channel
+ *
+ *
+ * This function provides the channel(s) on which current
+ * connection(s) is/are
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_get_connection_channels(hdd_context_t *hdd_ctx,
+			uint8_t *channels, uint32_t *len, uint8_t order)
+{
+	CDF_STATUS status = CDF_STATUS_SUCCESS;
+	uint32_t conn_index = 0, num_channels = 0;
+
+	if (NULL == hdd_ctx) {
+		/* err msg*/
+		cds_err("hdd_ctx is NULL");
+		status = CDF_STATUS_E_FAILURE;
+		return status;
+	}
+
+	if ((NULL == channels) || (NULL == len)) {
+		/* err msg*/
+		cds_err("channels or len is NULL");
+		status = CDF_STATUS_E_FAILURE;
+		return status;
+	}
+
+	if (0 == order) {
+		while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+			channels[num_channels++] =
+				conc_connection_list[conn_index++].chan;
+		}
+		*len = num_channels;
+	} else if (1 == order) {
+		while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+			if (CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[conn_index].chan)) {
+				channels[num_channels++] =
+					conc_connection_list[conn_index++].chan;
+			} else
+				conn_index++;
+		}
+		conn_index = 0;
+		while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+			if (CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[conn_index].chan)) {
+				channels[num_channels++] =
+					conc_connection_list[conn_index++].chan;
+			} else
+				conn_index++;
+		}
+		*len = num_channels;
+	} else if (2 == order) {
+		while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+			if (CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[conn_index].chan)) {
+				channels[num_channels++] =
+					conc_connection_list[conn_index++].chan;
+			} else
+				conn_index++;
+		}
+		conn_index = 0;
+		while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+			if (CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[conn_index].chan)) {
+				channels[num_channels++] =
+					conc_connection_list[conn_index++].chan;
+			} else
+				conn_index++;
+		}
+		*len = num_channels;
+	} else {
+		cds_err("unknown order %d", order);
+		status = CDF_STATUS_E_FAILURE;
+	}
+	return status;
+}
+
+/**
+ * cds_update_with_safe_channel_list() - provides the safe
+ * channel list
+ * @hdd_ctx:	HDD Context
+ * @pcl_channels: channel list
+ * @len: length of the list
+ *
+ * This function provides the safe channel list from the list
+ * provided after consulting the channel avoidance list
+ *
+ * Return: None
+ */
+#ifdef CONFIG_CNSS
+void cds_update_with_safe_channel_list(hdd_context_t *hdd_ctx,
+			uint8_t *pcl_channels, uint32_t *len)
+{
+	uint16_t unsafe_channel_list[MAX_NUM_CHAN];
+	uint8_t current_channel_list[MAX_NUM_CHAN];
+	uint16_t unsafe_channel_count = 0;
+	uint8_t is_unsafe = 1;
+	uint8_t i, j;
+	uint32_t safe_channel_count = 0, current_channel_count = 0;
+
+	if (len) {
+		current_channel_count = CDF_MIN(*len, MAX_NUM_CHAN);
+	} else {
+		cds_err("invalid number of channel length");
+		return;
+	}
+
+	cnss_get_wlan_unsafe_channel(unsafe_channel_list,
+				     &unsafe_channel_count,
+				     sizeof(unsafe_channel_list));
+
+	if (unsafe_channel_count) {
+		cdf_mem_copy(current_channel_list, pcl_channels,
+			current_channel_count);
+		cdf_mem_zero(pcl_channels,
+			sizeof(*pcl_channels)*current_channel_count);
+
+		for (i = 0; i < current_channel_count; i++) {
+			is_unsafe = 0;
+			for (j = 0; j < unsafe_channel_count; j++) {
+				if (current_channel_list[i] ==
+					unsafe_channel_list[j]) {
+					/* Found unsafe channel, update it */
+					is_unsafe = 1;
+					cds_warn("CH %d is not safe",
+						current_channel_list[i]);
+					break;
+				}
+			}
+			if (!is_unsafe) {
+				pcl_channels[safe_channel_count++] =
+					current_channel_list[i];
+			}
+		}
+		*len = safe_channel_count;
+	}
+	return;
+}
+#else
+void cds_update_with_safe_channel_list(hdd_context_t *hdd_ctx,
+			uint8_t *pcl_channels, uint32_t *len)
+{
+	return;
+}
+#endif
+/**
+ * cds_get_channel_list() - provides the channel list
+ * suggestion for new connection
+ * @hdd_ctx:	HDD Context
+ * @pcl:	The preferred channel list enum
+ * @pcl_channels: PCL channels
+ * @len: lenght of the PCL
+ *
+ * This function provides the actual channel list based on the
+ * current regulatory domain derived using preferred channel
+ * list enum obtained from one of the pcl_table
+ *
+ * Return: Channel List
+ */
+CDF_STATUS cds_get_channel_list(hdd_context_t *hdd_ctx,
+			enum cds_pcl_type pcl,
+			uint8_t *pcl_channels, uint32_t *len)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t num_channels = WNI_CFG_VALID_CHANNEL_LIST_LEN;
+	uint32_t chan_index = 0, chan_index_24 = 0, chan_index_5 = 0;
+	uint8_t channel_list[MAX_NUM_CHAN] = {0};
+	uint8_t channel_list_24[MAX_NUM_CHAN] = {0};
+	uint8_t channel_list_5[MAX_NUM_CHAN] = {0};
+
+	if (NULL == hdd_ctx) {
+		/* err msg*/
+		cds_err("hdd_ctx is NULL");
+		return status;
+	}
+
+	if ((NULL == pcl_channels) || (NULL == len)) {
+		/* err msg*/
+		cds_err("pcl_channels or len is NULL");
+		return status;
+	}
+
+	if (CDS_MAX_PCL_TYPE == pcl) {
+		/* msg */
+		cds_err("pcl is invalid");
+		return status;
+	}
+
+	if (CDS_NONE == pcl) {
+		/* msg */
+		cds_err("pcl is 0");
+		return CDF_STATUS_SUCCESS;
+	}
+	/* get the channel list for current domain */
+	status = sme_get_cfg_valid_channels(hdd_ctx->hHal, channel_list,
+			&num_channels);
+	if (CDF_STATUS_SUCCESS != status) {
+		/* err msg*/
+		cds_err("No valid channel");
+		return status;
+	}
+	/* Let's divide the list in 2.4 & 5 Ghz lists */
+	while ((channel_list[chan_index] <= 11) &&
+		(chan_index_24 < MAX_NUM_CHAN))
+		channel_list_24[chan_index_24++] = channel_list[chan_index++];
+	if (channel_list[chan_index] == 12) {
+		channel_list_24[chan_index_24++] = channel_list[chan_index++];
+		if (channel_list[chan_index] == 13) {
+			channel_list_24[chan_index_24++] =
+				channel_list[chan_index++];
+			if (channel_list[chan_index] == 14)
+				channel_list_24[chan_index_24++] =
+					channel_list[chan_index++];
+		}
+	}
+	while ((chan_index < num_channels) &&
+		(chan_index_5 < MAX_NUM_CHAN))
+		channel_list_5[chan_index_5++] = channel_list[chan_index++];
+
+	num_channels = 0;
+	switch (pcl) {
+	case CDS_24G:
+		cdf_mem_copy(pcl_channels, channel_list_24,
+			chan_index_24);
+		*len = chan_index_24;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_5G:
+		cdf_mem_copy(pcl_channels, channel_list_5,
+			chan_index_5);
+		*len = chan_index_5;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_CH:
+	case CDS_MCC_CH:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 0);
+		cdf_mem_copy(pcl_channels, channel_list, num_channels);
+		*len = num_channels;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_CH_24G:
+	case CDS_MCC_CH_24G:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 0);
+		cdf_mem_copy(pcl_channels, channel_list, num_channels);
+		*len = num_channels;
+		cdf_mem_copy(&pcl_channels[num_channels],
+			channel_list_24, chan_index_24);
+		*len += chan_index_24;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_CH_5G:
+	case CDS_MCC_CH_5G:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 0);
+		cdf_mem_copy(pcl_channels, channel_list,
+			num_channels);
+		*len = num_channels;
+		cdf_mem_copy(&pcl_channels[num_channels],
+			channel_list_5, chan_index_5);
+		*len += chan_index_5;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_24G_SCC_CH:
+	case CDS_24G_MCC_CH:
+		cdf_mem_copy(pcl_channels, channel_list_24,
+			chan_index_24);
+		*len = chan_index_24;
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 0);
+		cdf_mem_copy(&pcl_channels[chan_index_24],
+			channel_list, num_channels);
+		*len += num_channels;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_5G_SCC_CH:
+	case CDS_5G_MCC_CH:
+		cdf_mem_copy(pcl_channels, channel_list_5,
+			chan_index_5);
+		*len = chan_index_5;
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 0);
+		cdf_mem_copy(&pcl_channels[chan_index_5],
+			channel_list, num_channels);
+		*len += num_channels;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_ON_24_SCC_ON_5:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 1);
+		cdf_mem_copy(pcl_channels, channel_list,
+			num_channels);
+		*len = num_channels;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_ON_5_SCC_ON_24:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 2);
+		cdf_mem_copy(pcl_channels, channel_list, num_channels);
+		*len = num_channels;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_ON_24_SCC_ON_5_24G:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 1);
+		cdf_mem_copy(pcl_channels, channel_list, num_channels);
+		*len = num_channels;
+		cdf_mem_copy(&pcl_channels[num_channels],
+			channel_list_24, chan_index_24);
+		*len += chan_index_24;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_ON_24_SCC_ON_5_5G:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 1);
+		cdf_mem_copy(pcl_channels, channel_list, num_channels);
+		*len = num_channels;
+		cdf_mem_copy(&pcl_channels[num_channels],
+			channel_list_5, chan_index_5);
+		*len += chan_index_5;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_ON_5_SCC_ON_24_24G:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 2);
+		cdf_mem_copy(pcl_channels, channel_list, num_channels);
+		*len = num_channels;
+		cdf_mem_copy(&pcl_channels[num_channels],
+			channel_list_24, chan_index_24);
+		*len += chan_index_24;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	case CDS_SCC_ON_5_SCC_ON_24_5G:
+		cds_get_connection_channels(hdd_ctx,
+			channel_list, &num_channels, 2);
+		cdf_mem_copy(pcl_channels, channel_list, num_channels);
+		*len = num_channels;
+		cdf_mem_copy(&pcl_channels[num_channels],
+			channel_list_5, chan_index_5);
+		*len += chan_index_5;
+		status = CDF_STATUS_SUCCESS;
+		break;
+	default:
+		/* err msg */
+		cds_err("unknown pcl value %d", pcl);
+		break;
+	}
+
+	/* check the channel avoidance list */
+	cds_update_with_safe_channel_list(hdd_ctx, pcl_channels, len);
+
+	return status;
+}
+
+/**
+ * cds_map_concurrency_mode() - to map concurrency mode between sme and hdd
+ * @hdd_ctx: hdd context
+ * @old_mode: sme provided concurrency mode
+ * @new_mode: hdd provided concurrency mode
+ *
+ * This routine will map concurrency mode between sme and hdd
+ *
+ * Return: true or false
+ */
+bool cds_map_concurrency_mode(hdd_context_t *hdd_ctx,
+	tCDF_CON_MODE *old_mode, enum cds_con_mode *new_mode)
+{
+	bool status = true;
+
+	if (!hdd_ctx) {
+		cds_err("HDD context is NULL");
+		return false;
+	}
+
+	switch (*old_mode) {
+
+	case CDF_STA_MODE:
+		*new_mode = CDS_STA_MODE;
+		break;
+	case CDF_SAP_MODE:
+		*new_mode = CDS_SAP_MODE;
+		break;
+	case CDF_P2P_CLIENT_MODE:
+		*new_mode = CDS_P2P_CLIENT_MODE;
+		break;
+	case CDF_P2P_GO_MODE:
+		*new_mode = CDS_P2P_GO_MODE;
+		break;
+	case CDF_IBSS_MODE:
+		*new_mode = CDS_IBSS_MODE;
+		break;
+	default:
+		*new_mode = CDS_MAX_NUM_OF_MODE;
+		status = false;
+		break;
+	}
+	return status;
+}
+
+/**
+ * cds_get_pcl() - provides the preferred channel list for
+ * new connection
+ * @hdd_ctx:	HDD Context
+ * @mode:	Device mode
+ * @pcl_channels: PCL channels
+ * @len: lenght of the PCL
+ *
+ * This function provides the preferred channel list on which
+ * policy manager wants the new connection to come up. Various
+ * connection decision making entities will using this function
+ * to query the PCL info
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_get_pcl(hdd_context_t *hdd_ctx, enum cds_con_mode mode,
+			uint8_t *pcl_channels, uint32_t *len)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t num_connections = 0;
+	enum cds_conc_priority_mode first_index = 0;
+	enum cds_one_connection_mode second_index = 0;
+	enum cds_two_connection_mode third_index = 0;
+	enum cds_pcl_type pcl = CDS_NONE;
+	enum cds_conc_priority_mode conc_system_pref = 0;
+	/* find the current connection state from conc_connection_list*/
+	num_connections = cds_get_connection_count(hdd_ctx);
+	cds_debug("connections:%d pref:%d requested mode:%d",
+		num_connections, hdd_ctx->config->conc_system_pref, mode);
+
+	switch (hdd_ctx->config->conc_system_pref) {
+	case 0:
+		conc_system_pref = CDS_THROUGHPUT;
+		break;
+	case 1:
+		conc_system_pref = CDS_POWERSAVE;
+		break;
+	case 2:
+		conc_system_pref = CDS_LATENCY;
+		break;
+	default:
+		/* err msg */
+		cds_err("unknown conc_system_pref value %d",
+			hdd_ctx->config->conc_system_pref);
+		break;
+	}
+
+	switch (num_connections) {
+	case 0:
+		first_index =
+			cds_get_first_connection_pcl_table_index(hdd_ctx);
+		pcl = first_connection_pcl_table[mode][first_index];
+		break;
+	case 1:
+		second_index =
+			cds_get_second_connection_pcl_table_index(hdd_ctx);
+		if (CDS_MAX_ONE_CONNECTION_MODE == second_index) {
+			/* err msg */
+			cds_err("couldn't find index for 2nd connection pcl table");
+			return status;
+		}
+		if (wma_is_hw_dbs_capable() == true) {
+			pcl = second_connection_pcl_dbs_table
+				[second_index][mode][conc_system_pref];
+		} else {
+			pcl = second_connection_pcl_nodbs_table
+				[second_index][mode][conc_system_pref];
+		}
+
+		break;
+	case 2:
+		third_index =
+			cds_get_third_connection_pcl_table_index(hdd_ctx);
+		if (CDS_MAX_TWO_CONNECTION_MODE == third_index) {
+			/* err msg */
+			cds_err("couldn't find index for 3rd connection pcl table");
+			return status;
+		}
+		if (wma_is_hw_dbs_capable() == true) {
+			pcl = third_connection_pcl_dbs_table
+				[third_index][mode][conc_system_pref];
+		} else {
+			pcl = third_connection_pcl_nodbs_table
+				[third_index][mode][conc_system_pref];
+		}
+		break;
+	default:
+		/* err msg */
+		cds_err("unexpected num_connections value %d",
+			num_connections);
+		break;
+	}
+
+	cds_debug("index1:%d index2:%d index3:%d pcl:%d dbs:%d",
+		first_index, second_index, third_index,
+		pcl, wma_is_hw_dbs_capable());
+
+	/* once the PCL enum is obtained find out the exact channel list with
+	 * help from sme_get_cfg_valid_channels
+	 */
+	status = cds_get_channel_list(hdd_ctx, pcl, pcl_channels, len);
+	if (status == CDF_STATUS_SUCCESS) {
+		uint32_t i;
+		cds_debug("pcl len:%d", *len);
+		for (i = 0; i < *len; i++)
+			cds_debug("chan:%d", pcl_channels[i]);
+	}
+
+	return status;
+}
+
+/**
+ * cds_disallow_mcc() - Check for mcc
+ *
+ * @hdd_ctx:	HDD Context
+ * @channel: channel on which new connection is coming up
+ *
+ * When a new connection is about to come up check if current
+ * concurrency combination including the new connection is
+ * causing MCC
+ *
+ * Return: True/False
+ */
+bool cds_disallow_mcc(hdd_context_t *hdd_ctx, uint8_t channel)
+{
+	uint32_t index = 0;
+	bool match = false;
+	while (CONC_CONNECTION_LIST_VALID_INDEX(index)) {
+		if (wma_is_hw_dbs_capable() == false) {
+			if (conc_connection_list[index].chan !=
+				channel) {
+				match = true;
+				break;
+			}
+		} else if (CDS_IS_CHANNEL_5GHZ
+			(conc_connection_list[index].chan)) {
+			if (conc_connection_list[index].chan != channel) {
+				match = true;
+				break;
+			}
+		}
+		index++;
+	}
+	return match;
+}
+
+/**
+ * cds_allow_new_home_channel() - Check for allowed number of
+ * home channels
+ *
+ * @hdd_ctx:	HDD Context
+ * @channel: channel on which new connection is coming up
+ * @num_connections: number of current connections
+ *
+ * When a new connection is about to come up check if current
+ * concurrency combination including the new connection is
+ * allowed or not based on the HW capability
+ *
+ * Return: True/False
+ */
+bool cds_allow_new_home_channel(hdd_context_t *hdd_ctx,
+			uint8_t channel, uint32_t num_connections)
+{
+	bool status = true;
+
+	if ((num_connections == 2) &&
+		(conc_connection_list[0].chan != conc_connection_list[1].chan)
+		&&
+		(conc_connection_list[0].mac == conc_connection_list[1].mac)) {
+		if (wma_is_hw_dbs_capable() == false) {
+			if ((channel != conc_connection_list[0].chan) &&
+				(channel != conc_connection_list[1].chan)) {
+				/* err msg */
+				cds_err("don't allow 3rd home channel on same MAC");
+				status = false;
+			}
+		} else if (((CDS_IS_CHANNEL_24GHZ(channel)) &&
+				(CDS_IS_CHANNEL_24GHZ
+				(conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_24GHZ
+				(conc_connection_list[1].chan))) ||
+				   ((CDS_IS_CHANNEL_5GHZ(channel)) &&
+				(CDS_IS_CHANNEL_5GHZ
+				(conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_5GHZ
+				(conc_connection_list[1].chan)))) {
+			/* err msg */
+			cds_err("don't allow 3rd home channel on same MAC");
+			status = false;
+		}
+#ifndef QCA_WIFI_3_0_EMU
+	}
+#else
+	} else if ((num_connections == 1) &&
+		(conc_connection_list[0].chan != channel)) {
+		if (((CDS_IS_CHANNEL_24GHZ(channel)) &&
+			(CDS_IS_CHANNEL_24GHZ
+			(conc_connection_list[0].chan))) ||
+			   ((CDS_IS_CHANNEL_5GHZ(channel)) &&
+			(CDS_IS_CHANNEL_5GHZ
+			(conc_connection_list[0].chan)))) {
+			/* err msg */
+			cds_err("don't allow 2nd home channel on same MAC");
+			status = false;
+		}
+	}
+#endif
+	return status;
+}
+
+/**
+ * cds_allow_concurrency() - Check for allowed concurrency
+ * combination
+ *
+ * @hdd_ctx:	HDD Context
+ * @mode:	new connection mode
+ * @channel: channel on which new connection is coming up
+ * @bw: Bandwidth requested by the connection (optional)
+ *
+ * When a new connection is about to come up check if current
+ * concurrency combination including the new connection is
+ * allowed or not based on the HW capability
+ *
+ * Return: True/False
+ */
+bool cds_allow_concurrency(hdd_context_t *hdd_ctx, enum cds_con_mode mode,
+				uint8_t channel, enum hw_mode_bandwidth bw)
+{
+	uint32_t num_connections = 0, count = 0, index = 0;
+	bool status = false, match = false;
+	uint32_t list[MAX_NUMBER_OF_CONC_CONNECTIONS];
+
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	/* find the current connection state from conc_connection_list*/
+	num_connections = cds_get_connection_count(hdd_ctx);
+
+	if (cds_max_concurrent_connections_reached()) {
+		cds_err("Reached max concurrent connections: %d",
+			hdd_ctx->config->gMaxConcurrentActiveSessions);
+		goto done;
+	}
+
+	if (channel) {
+		/* don't allow 3rd home channel on same MAC */
+		if (!cds_allow_new_home_channel(hdd_ctx, channel,
+			num_connections))
+				goto done;
+
+		/* don't allow MCC if SAP/GO on DFS channel or about to come up
+		* on DFS channel
+		*/
+		count = cds_mode_specific_connection_count(hdd_ctx,
+				CDS_P2P_GO_MODE, list);
+		while (index < count) {
+			if ((CDS_IS_DFS_CH(
+				conc_connection_list[list[index]].chan)) &&
+				(CDS_IS_CHANNEL_5GHZ(channel)) &&
+				(channel !=
+				conc_connection_list[list[index]].chan)) {
+				/* err msg */
+				cds_err("don't allow MCC if SAP/GO on DFS channel");
+				goto done;
+			}
+			index++;
+		}
+
+		index = 0;
+		count = cds_mode_specific_connection_count(hdd_ctx,
+				CDS_SAP_MODE, list);
+		while (index < count) {
+			if ((CDS_IS_DFS_CH(
+				conc_connection_list[list[index]].chan)) &&
+				(CDS_IS_CHANNEL_5GHZ(channel)) &&
+				(channel !=
+				conc_connection_list[list[index]].chan)) {
+				/* err msg */
+				cds_err("don't allow MCC if SAP/GO on DFS channel");
+				goto done;
+			}
+			index++;
+		}
+
+		index = 0;
+		if ((CDS_P2P_GO_MODE == mode) || (CDS_SAP_MODE == mode)) {
+			if (CDS_IS_DFS_CH(channel))
+				match = cds_disallow_mcc(hdd_ctx, channel);
+		}
+		if (true == match) {
+			cds_err("No MCC, SAP/GO about to come up on DFS channel");
+			goto done;
+		}
+	}
+
+	/* don't allow IBSS + STA MCC */
+	/* don't allow IBSS + STA SCC if IBSS is on DFS channel */
+	count = cds_mode_specific_connection_count(hdd_ctx,
+			CDS_STA_MODE, list);
+	if ((CDS_IBSS_MODE == mode) &&
+		(cds_mode_specific_connection_count(hdd_ctx,
+		CDS_IBSS_MODE, list)) && count) {
+		/* err msg */
+		cds_err("No 2nd IBSS, we already have STA + IBSS");
+		goto done;
+	}
+	if ((CDS_IBSS_MODE == mode) &&
+		(CDS_IS_DFS_CH(channel)) && count) {
+		/* err msg */
+		cds_err("No IBSS + STA SCC/MCC, IBSS is on DFS channel");
+		goto done;
+	}
+	if (CDS_IBSS_MODE == mode) {
+		if (wma_is_hw_dbs_capable() == true) {
+			if (num_connections > 1) {
+				/* err msg */
+				cds_err("No IBSS, we have concurrent connections already");
+				goto done;
+			}
+			if (CDS_STA_MODE != conc_connection_list[0].mode) {
+				/* err msg */
+				cds_err("No IBSS, we have a non STA connection");
+				goto done;
+			}
+			if (channel &&
+				(conc_connection_list[0].chan != channel) &&
+				CDS_IS_SAME_BAND_CHANNELS(
+				conc_connection_list[0].chan, channel)) {
+				/* err msg */
+				cds_err("No IBSS + STA MCC");
+				goto done;
+			}
+		} else if (num_connections) {
+			/* err msg */
+			cds_err("No IBSS, we have one connection already");
+			goto done;
+		}
+	}
+	count = cds_mode_specific_connection_count(hdd_ctx,
+			CDS_STA_MODE, list);
+	if ((CDS_STA_MODE == mode) &&
+		(cds_mode_specific_connection_count(hdd_ctx,
+		CDS_IBSS_MODE, list)) && count) {
+		/* err msg */
+		cds_err("No 2nd STA, we already have STA + IBSS");
+		goto done;
+	}
+
+	if ((CDS_STA_MODE == mode) &&
+		(cds_mode_specific_connection_count(hdd_ctx,
+		CDS_IBSS_MODE, list))) {
+		if (wma_is_hw_dbs_capable() == true) {
+			if (num_connections > 1) {
+				/* err msg */
+				cds_err("No 2nd STA, we already have IBSS concurrency");
+				goto done;
+			}
+			if (channel &&
+				(CDS_IS_DFS_CH(conc_connection_list[0].chan))
+				&& (CDS_IS_CHANNEL_5GHZ(channel))) {
+				/* err msg */
+				cds_err("No IBSS + STA SCC/MCC, IBSS is on DFS channel");
+				goto done;
+			}
+			if ((conc_connection_list[0].chan != channel) &&
+				CDS_IS_SAME_BAND_CHANNELS(
+				conc_connection_list[0].chan, channel)) {
+				/* err msg */
+				cds_err("No IBSS + STA MCC");
+				goto done;
+			}
+		} else {
+			/* err msg */
+			cds_err("No STA, we have IBSS connection already");
+			goto done;
+		}
+	}
+
+	/* can we allow vht160 */
+	if (num_connections &&
+		((bw == HW_MODE_80_PLUS_80_MHZ) || (bw == HW_MODE_160_MHZ))) {
+		/* err msg */
+		cds_err("No VHT160, we have one connection already");
+		goto done;
+	}
+	status = true;
+
+done:
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+	return status;
+}
+
+/**
+ * cds_get_first_connection_pcl_table_index() - provides the
+ * row index to firstConnectionPclTable to get to the correct
+ * pcl
+ * @hdd_ctx:	HDD Context
+ *
+ * This function provides the row index to
+ * firstConnectionPclTable. The index is the preference config.
+ *
+ * Return: table index
+ */
+enum cds_conc_priority_mode cds_get_first_connection_pcl_table_index(
+					hdd_context_t *hdd_ctx)
+{
+	if (hdd_ctx->config->conc_system_pref >= CDS_MAX_CONC_PRIORITY_MODE)
+		return CDS_THROUGHPUT;
+	return hdd_ctx->config->conc_system_pref;
+}
+
+/**
+ * cds_get_second_connection_pcl_table_index() - provides the
+ * row index to secondConnectionPclTable to get to the correct
+ * pcl
+ * @hdd_ctx:	HDD Context
+ *
+ * This function provides the row index to
+ * secondConnectionPclTable. The index is derived based on
+ * current connection, band on which it is on & chain mask it is
+ * using, as obtained from conc_connection_list.
+ *
+ * Return: table index
+ */
+enum cds_one_connection_mode cds_get_second_connection_pcl_table_index(
+					hdd_context_t *hdd_ctx)
+{
+	enum cds_one_connection_mode index = CDS_MAX_ONE_CONNECTION_MODE;
+
+	if (CDS_STA_MODE == conc_connection_list[0].mode) {
+		if (CDS_IS_CHANNEL_24GHZ(conc_connection_list[0].chan)) {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_STA_24_1x1;
+			else
+				index = CDS_STA_24_2x2;
+		} else {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_STA_5_1x1;
+			else
+				index = CDS_STA_5_2x2;
+		}
+	} else if (CDS_SAP_MODE == conc_connection_list[0].mode) {
+		if (CDS_IS_CHANNEL_24GHZ(conc_connection_list[0].chan)) {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_SAP_24_1x1;
+			else
+				index = CDS_SAP_24_2x2;
+		} else {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_SAP_5_1x1;
+			else
+				index = CDS_SAP_5_2x2;
+		}
+	} else if (CDS_P2P_CLIENT_MODE == conc_connection_list[0].mode) {
+		if (CDS_IS_CHANNEL_24GHZ(conc_connection_list[0].chan)) {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_P2P_CLI_24_1x1;
+			else
+				index = CDS_P2P_CLI_24_2x2;
+		} else {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_P2P_CLI_5_1x1;
+			else
+				index = CDS_P2P_CLI_5_2x2;
+		}
+	} else if (CDS_P2P_GO_MODE == conc_connection_list[0].mode) {
+		if (CDS_IS_CHANNEL_24GHZ(conc_connection_list[0].chan)) {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_P2P_GO_24_1x1;
+			else
+				index = CDS_P2P_GO_24_2x2;
+		} else {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_P2P_GO_5_1x1;
+			else
+				index = CDS_P2P_GO_5_2x2;
+		}
+	} else if (CDS_IBSS_MODE == conc_connection_list[0].mode) {
+		if (CDS_IS_CHANNEL_24GHZ(conc_connection_list[0].chan)) {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_IBSS_24_1x1;
+			else
+				index = CDS_IBSS_24_2x2;
+		} else {
+			if (CDS_ONE_ONE == conc_connection_list[0].chain_mask)
+				index = CDS_IBSS_5_1x1;
+			else
+				index = CDS_IBSS_5_2x2;
+		}
+	}
+
+	cds_debug("mode:%d chan:%d chain:%d index:%d",
+		conc_connection_list[0].mode, conc_connection_list[0].chan,
+		conc_connection_list[0].chain_mask, index);
+
+	return index;
+}
+
+/**
+ * cds_get_third_connection_pcl_table_index() - provides the
+ * row index to thirdConnectionPclTable to get to the correct
+ * pcl
+ * @hdd_ctx:	HDD Context
+ *
+ * This function provides the row index to
+ * thirdConnectionPclTable. The index is derived based on
+ * current connection, band on which it is on & chain mask it is
+ * using, as obtained from conc_connection_list.
+ *
+ * Return: table index
+ */
+enum cds_two_connection_mode cds_get_third_connection_pcl_table_index(
+	hdd_context_t *hdd_ctx)
+{
+	enum cds_one_connection_mode index = CDS_MAX_TWO_CONNECTION_MODE;
+
+	/* STA + SAP */
+	if (((CDS_STA_MODE == conc_connection_list[0].mode) &&
+		(CDS_SAP_MODE == conc_connection_list[1].mode)) ||
+		((CDS_SAP_MODE == conc_connection_list[0].mode) &&
+		(CDS_STA_MODE == conc_connection_list[1].mode))) {
+		/* SCC */
+		if (conc_connection_list[0].chan ==
+			conc_connection_list[1].chan) {
+			if (CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[0].chan)) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_STA_SAP_SCC_24_1x1;
+				else
+					index = CDS_STA_SAP_SCC_24_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_STA_SAP_SCC_5_1x1;
+				else
+					index = CDS_STA_SAP_SCC_5_2x2;
+			}
+		/* MCC */
+		} else if (conc_connection_list[0].mac ==
+				conc_connection_list[1].mac) {
+			if ((CDS_IS_CHANNEL_24GHZ
+				(conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_24GHZ
+				(conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+				conc_connection_list[0].chain_mask)
+					index = CDS_STA_SAP_MCC_24_1x1;
+				else
+					index = CDS_STA_SAP_MCC_24_2x2;
+			} else if ((CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+				conc_connection_list[0].chain_mask)
+					index = CDS_STA_SAP_MCC_5_1x1;
+				else
+					index = CDS_STA_SAP_MCC_5_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+				conc_connection_list[0].chain_mask)
+					index = CDS_STA_SAP_MCC_24_5_1x1;
+				else
+					index = CDS_STA_SAP_MCC_24_5_2x2;
+			}
+		/* DBS */
+		} else
+			index = CDS_STA_SAP_DBS_1x1;
+	} else    /* STA + P2P GO */
+	if (((CDS_STA_MODE == conc_connection_list[0].mode) &&
+		(CDS_P2P_GO_MODE == conc_connection_list[1].mode)) ||
+		((CDS_P2P_GO_MODE == conc_connection_list[0].mode) &&
+		(CDS_STA_MODE == conc_connection_list[1].mode))) {
+		/* SCC */
+		if (conc_connection_list[0].chan ==
+		conc_connection_list[1].chan) {
+			if (CDS_IS_CHANNEL_24GHZ
+				(conc_connection_list[0].chan)) {
+				if (CDS_ONE_ONE ==
+				conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_GO_SCC_24_1x1;
+				else
+					index = CDS_STA_P2P_GO_SCC_24_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+				conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_GO_SCC_5_1x1;
+				else
+					index = CDS_STA_P2P_GO_SCC_5_2x2;
+			}
+		/* MCC */
+		} else if (conc_connection_list[0].mac ==
+			conc_connection_list[1].mac) {
+			if ((CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_24GHZ
+				(conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_GO_MCC_24_1x1;
+				else
+					index = CDS_STA_P2P_GO_MCC_24_2x2;
+			} else if ((CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_GO_MCC_5_1x1;
+				else
+					index = CDS_STA_P2P_GO_MCC_5_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+				conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_GO_MCC_24_5_1x1;
+				else
+					index = CDS_STA_P2P_GO_MCC_24_5_2x2;
+			}
+		/* DBS */
+		} else
+			index = CDS_STA_P2P_GO_DBS_1x1;
+	} else    /* STA + P2P CLI */
+	if (((CDS_STA_MODE == conc_connection_list[0].mode) &&
+		(CDS_P2P_CLIENT_MODE == conc_connection_list[1].mode)) ||
+		((CDS_P2P_CLIENT_MODE == conc_connection_list[0].mode) &&
+		(CDS_STA_MODE == conc_connection_list[1].mode))) {
+		/* SCC */
+		if (conc_connection_list[0].chan ==
+		conc_connection_list[1].chan) {
+			if (CDS_IS_CHANNEL_24GHZ
+				(conc_connection_list[0].chan)) {
+				if (CDS_ONE_ONE ==
+				conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_CLI_SCC_24_1x1;
+				else
+					index = CDS_STA_P2P_CLI_SCC_24_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+				conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_CLI_SCC_5_1x1;
+				else
+					index = CDS_STA_P2P_CLI_SCC_5_2x2;
+			}
+		/* MCC */
+		} else if (conc_connection_list[0].mac ==
+			conc_connection_list[1].mac) {
+			if ((CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_CLI_MCC_24_1x1;
+				else
+					index = CDS_STA_P2P_CLI_MCC_24_2x2;
+			} else if ((CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_CLI_MCC_5_1x1;
+				else
+					index = CDS_STA_P2P_CLI_MCC_5_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_STA_P2P_CLI_MCC_24_5_1x1;
+				else
+					index = CDS_STA_P2P_CLI_MCC_24_5_2x2;
+			}
+		/* DBS */
+		} else
+			index = CDS_STA_P2P_CLI_DBS_1x1;
+	} else    /* P2P GO + P2P CLI */
+	if (((CDS_P2P_GO_MODE == conc_connection_list[0].mode) &&
+		(CDS_P2P_CLIENT_MODE == conc_connection_list[1].mode)) ||
+		((CDS_P2P_CLIENT_MODE == conc_connection_list[0].mode) &&
+		(CDS_P2P_GO_MODE == conc_connection_list[1].mode))) {
+		/* SCC */
+		if (conc_connection_list[0].chan ==
+			conc_connection_list[1].chan) {
+			if (CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[0].chan)) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_P2P_CLI_SCC_24_1x1;
+				else
+					index = CDS_P2P_GO_P2P_CLI_SCC_24_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_P2P_CLI_SCC_5_1x1;
+				else
+					index = CDS_P2P_GO_P2P_CLI_SCC_5_2x2;
+			}
+		/* MCC */
+		} else if (conc_connection_list[0].mac ==
+			conc_connection_list[1].mac) {
+			if ((CDS_IS_CHANNEL_24GHZ(
+			conc_connection_list[0].chan)) &&
+			(CDS_IS_CHANNEL_24GHZ(
+			conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_P2P_CLI_MCC_24_1x1;
+				else
+					index = CDS_P2P_GO_P2P_CLI_MCC_24_2x2;
+			} else if ((CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_P2P_CLI_MCC_5_1x1;
+				else
+					index = CDS_P2P_GO_P2P_CLI_MCC_5_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_P2P_CLI_MCC_24_5_1x1;
+				else
+					index = CDS_P2P_GO_P2P_CLI_MCC_24_5_2x2;
+			}
+		/* DBS */
+		} else
+			index = CDS_P2P_GO_P2P_CLI_DBS_1x1;
+	} else    /* STA + P2P CLI */
+	if (((CDS_SAP_MODE == conc_connection_list[0].mode) &&
+		(CDS_P2P_GO_MODE == conc_connection_list[1].mode)) ||
+		((CDS_P2P_GO_MODE == conc_connection_list[0].mode) &&
+		(CDS_SAP_MODE == conc_connection_list[1].mode))) {
+		/* SCC */
+		if (conc_connection_list[0].chan ==
+			conc_connection_list[1].chan) {
+			if (CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[0].chan)) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_SAP_SCC_24_1x1;
+				else
+					index = CDS_P2P_GO_SAP_SCC_24_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_SAP_SCC_5_1x1;
+				else
+					index = CDS_P2P_GO_SAP_SCC_5_2x2;
+			}
+		/* MCC */
+		} else if (conc_connection_list[0].mac ==
+			conc_connection_list[1].mac) {
+			if ((CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_24GHZ(
+				conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_SAP_MCC_24_1x1;
+				else
+					index = CDS_P2P_GO_SAP_MCC_24_2x2;
+			} else if ((CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[0].chan)) &&
+				(CDS_IS_CHANNEL_5GHZ(
+				conc_connection_list[1].chan))) {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_SAP_MCC_5_1x1;
+				else
+					index = CDS_P2P_GO_SAP_MCC_5_2x2;
+			} else {
+				if (CDS_ONE_ONE ==
+					conc_connection_list[0].chain_mask)
+					index = CDS_P2P_GO_SAP_MCC_24_5_1x1;
+				else
+					index = CDS_P2P_GO_SAP_MCC_24_5_2x2;
+			}
+		/* DBS */
+		} else
+			index = CDS_P2P_GO_SAP_DBS_1x1;
+	}
+
+	cds_debug("mode0:%d mode1:%d chan0:%d chan1:%d chain:%d index:%d",
+		conc_connection_list[0].mode, conc_connection_list[1].mode,
+		conc_connection_list[0].chan, conc_connection_list[1].chan,
+		conc_connection_list[0].chain_mask, index);
+
+	return index;
+}
+
+/**
+ * cds_mode_switch_dbs_to_mcc() - initiates a mode switch
+ * from DBS to MCC
+ * @hdd_ctx:	HDD Context
+ *
+ * This function initiates a mode switch from DBS to MCC if any
+ * change in concurrency scenario or some other external entity
+ * (looking for range, thermal mitigation etc.) made an explicit
+ * request. Notifies FW as well
+ *
+ * Return: CDF_STATUS enum
+ */
+CDF_STATUS cds_mode_switch_dbs_to_mcc(hdd_context_t *hdd_ctx)
+{
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_mode_switch_mcc_to_dbs() - initiates a mode switch
+ * from MCC to DBS
+ * @hdd_ctx:	HDD Context
+ *
+ * This function initiates a mode switch from MCC to DBS if any
+ * change in concurrency scenario or some other external entity
+ * (powersave, thermal mitigation etc.) made an explicit
+ * request. Notifies FW as well
+ *
+ * Return: CDF_STATUS enum
+ */
+CDF_STATUS cds_mode_switch_mcc_to_dbs(hdd_context_t *hdd_ctx)
+{
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_current_connections_update() - initiates actions
+ * needed on current connections once channel has been decided
+ * for the new connection
+ * @hdd_ctx:	HDD Context
+ * @channel: Channel on which new connection will be
+ *
+ * This function initiates initiates actions
+ * needed on current connections once channel has been decided
+ * for the new connection. Notifies UMAC & FW as well
+ *
+ * Return: CDF_STATUS enum
+ */
+CDF_STATUS cds_current_connections_update(
+				hdd_context_t *hdd_ctx,
+				uint8_t channel)
+{
+	enum cds_conc_next_action next_action = CDS_NOP;
+	uint32_t num_connections = 0;
+	enum cds_one_connection_mode second_index = 0;
+	enum cds_two_connection_mode third_index = 0;
+	enum cds_band band;
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+
+	if (wma_is_hw_dbs_capable() == false) {
+		cds_err("driver isn't dbs capable, no further action needed");
+		return CDF_STATUS_E_NOSUPPORT;
+	}
+	if (CDS_IS_CHANNEL_24GHZ(channel))
+		band = CDS_BAND_24;
+	else
+		band = CDS_BAND_5;
+
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	num_connections = cds_get_connection_count(hdd_ctx);
+
+	cds_debug("num_connections=%d channel=%d",
+		num_connections, channel);
+
+	switch (num_connections) {
+	case 0:
+		next_action = CDS_NOP;
+#ifdef QCA_WIFI_3_0_EMU
+		/* For emulation only: if it is a connection on 2.4,
+		 * request DBS
+		 */
+		if (CDS_IS_CHANNEL_24GHZ(channel))
+			next_action = CDS_DBS;
+#endif
+		break;
+	case 1:
+		second_index =
+			cds_get_second_connection_pcl_table_index(hdd_ctx);
+		if (CDS_MAX_ONE_CONNECTION_MODE == second_index) {
+			/* err msg */
+			cds_err("couldn't find index for 2nd connection next action table");
+			goto done;
+		}
+		next_action =
+			next_action_two_connection_table[second_index][band];
+		break;
+	case 2:
+		third_index =
+			cds_get_third_connection_pcl_table_index(hdd_ctx);
+		if (CDS_MAX_TWO_CONNECTION_MODE == third_index) {
+			/* err msg */
+			cds_err("couldn't find index for 3rd connection next action table");
+			goto done;
+		}
+		next_action =
+			next_action_three_connection_table[third_index][band];
+		break;
+	default:
+		/* err msg */
+		cds_err("unexpected num_connections value %d", num_connections);
+		break;
+	}
+
+	if (CDS_NOP != next_action)
+		status = cds_next_actions(hdd_ctx, next_action);
+	else
+		status = CDF_STATUS_E_NOSUPPORT;
+
+	cds_debug("index2=%d index3=%d next_action=%d, band=%d status=%d",
+		second_index, third_index, next_action, band, status);
+
+done:
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+	return status;
+}
+
+/**
+ * cds_wait_for_nss_update() - finds out if we need to wait
+ * for all nss update to finish before requesting for HW mode
+ * update
+ * @hdd_ctx:	HDD Context
+ * @action: next action to happen at policy mgr after
+ *		beacon update
+ *
+ * This function finds out if we need to wait
+ * for all nss update to finish before requesting for HW mode
+ * update
+ *
+ * Return: boolean. True = wait for nss update, False = go ahead
+ * with HW mode update
+ */
+bool cds_wait_for_nss_update(hdd_context_t *hdd_ctx, uint8_t action)
+{
+	uint32_t conn_index = 0;
+	bool wait = false;
+	if (CDS_DBS == action) {
+		for (conn_index = 0;
+			conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS;
+			conn_index++) {
+			if ((conc_connection_list
+				[conn_index].original_nss == 1) &&
+				(conc_connection_list
+				[conn_index].tx_spatial_stream == 2) &&
+				(conc_connection_list
+				[conn_index].rx_spatial_stream == 2) &&
+				conc_connection_list[conn_index].in_use) {
+				wait = true;
+				break;
+			}
+		}
+	} else if (CDS_MCC == action) {
+		for (conn_index = 0;
+			conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS;
+			conn_index++) {
+			if ((conc_connection_list
+				[conn_index].original_nss == 1) &&
+				(conc_connection_list
+				[conn_index].tx_spatial_stream == 1) &&
+				(conc_connection_list
+				[conn_index].rx_spatial_stream == 1) &&
+				conc_connection_list[conn_index].in_use) {
+				wait = true;
+				break;
+			}
+		}
+	}
+	return wait;
+}
+
+/**
+ * cds_nss_update_cb() - callback from SME confirming nss
+ * update
+ * @hdd_ctx:	HDD Context
+ * @tx_status: tx completion status for updated beacon with new
+ *		nss value
+ * @vdev_id: vdev id for the specific connection
+ * @next_action: next action to happen at policy mgr after
+ *		beacon update
+ *
+ * This function is the callback registered with SME at nss
+ * update request time
+ *
+ * Return: None
+ */
+void cds_nss_update_cb(void *context, uint8_t tx_status, uint8_t vdev_id,
+				uint8_t next_action)
+{
+	hdd_context_t *hdd_ctx = (hdd_context_t *)context;
+	uint32_t conn_index = 0;
+	bool wait = true;
+
+	if (CDF_STATUS_E_FAILURE == tx_status) {
+		cds_err("nss update failed for vdev %d", vdev_id);
+		return;
+	}
+	if (NULL == hdd_ctx) {
+		cds_err("NULL hdd_ctx");
+		return;
+	}
+
+	/**
+	 * Check if we are ok to request for HW mode change now
+	 */
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	conn_index = cds_get_connection_for_vdev_id(hdd_ctx, vdev_id);
+	if (MAX_NUMBER_OF_CONC_CONNECTIONS == conn_index) {
+		cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+		cds_err("connection not found for vdev %d", vdev_id);
+		return;
+	}
+	switch (next_action) {
+	case CDS_DBS:
+		conc_connection_list[conn_index].tx_spatial_stream = 1;
+		conc_connection_list[conn_index].rx_spatial_stream = 1;
+		wait = cds_wait_for_nss_update(hdd_ctx, next_action);
+		break;
+	case CDS_MCC:
+		conc_connection_list[conn_index].tx_spatial_stream = 2;
+		conc_connection_list[conn_index].rx_spatial_stream = 2;
+		wait = cds_wait_for_nss_update(hdd_ctx, next_action);
+		break;
+	default:
+		cds_err("unexpected action %d", next_action);
+		break;
+	}
+	if (!wait)
+		cds_next_actions(hdd_ctx, next_action);
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+	return;
+}
+
+/**
+ * cds_complete_action() - initiates actions needed on
+ * current connections once channel has been decided for the new
+ * connection
+ * @hdd_ctx:	HDD Context
+ * @new_nss: the new nss value
+ * @next_action: next action to happen at policy mgr after
+ *		beacon update
+ *
+ * This function initiates initiates actions
+ * needed on current connections once channel has been decided
+ * for the new connection. Notifies UMAC & FW as well
+ *
+ * Return: CDF_STATUS enum
+ */
+CDF_STATUS cds_complete_action(hdd_context_t *hdd_ctx,
+				uint8_t  new_nss, uint8_t next_action)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t index = 0, count = 0;
+	uint32_t list[MAX_NUMBER_OF_CONC_CONNECTIONS];
+	uint32_t conn_index = 0;
+
+	if (wma_is_hw_dbs_capable() == false) {
+		cds_err("driver isn't dbs capable, no further action needed");
+		return CDF_STATUS_E_NOSUPPORT;
+	}
+
+	/* cds_complete_action() is called by cds_next_actions().
+	 * All other callers of cds_next_actions() have taken mutex
+	 * protection. So, not taking any lock inside cds_complete_action()
+	 * during conc_connection_list access.
+	 */
+	count = cds_mode_specific_connection_count(hdd_ctx,
+			CDS_P2P_GO_MODE, list);
+	while (index < count) {
+		conn_index = cds_get_connection_for_vdev_id(hdd_ctx,
+				conc_connection_list[list[index]].vdev_id);
+		if (MAX_NUMBER_OF_CONC_CONNECTIONS == conn_index) {
+			cds_err("connection not found for vdev %d",
+				conc_connection_list[list[index]].vdev_id);
+			continue;
+		}
+
+		if (1 == conc_connection_list[list[index]].original_nss) {
+			status = sme_nss_update_request(hdd_ctx->hHal,
+					conc_connection_list
+					[list[index]].vdev_id, new_nss,
+					cds_nss_update_cb,
+					next_action, hdd_ctx);
+			if (!CDF_IS_STATUS_SUCCESS(status)) {
+				cds_err("sme_nss_update_request() failed for vdev %d",
+				conc_connection_list[list[index]].vdev_id);
+			}
+		}
+		index++;
+	}
+
+	index = 0;
+	count = cds_mode_specific_connection_count(hdd_ctx,
+			CDS_SAP_MODE, list);
+	while (index < count) {
+		if (1 == conc_connection_list[list[index]].original_nss) {
+			status = sme_nss_update_request(hdd_ctx->hHal,
+					conc_connection_list
+					[list[index]].vdev_id, new_nss,
+					cds_nss_update_cb,
+					next_action, hdd_ctx);
+			if (!CDF_IS_STATUS_SUCCESS(status)) {
+				cds_err("sme_nss_update_request() failed for vdev %d",
+				conc_connection_list[list[index]].vdev_id);
+			}
+		}
+		index++;
+	}
+	if (!CDF_IS_STATUS_SUCCESS(status))
+		status = cds_next_actions(hdd_ctx, next_action);
+
+	return status;
+}
+
+/**
+ * cds_next_actions() - initiates actions needed on current
+ * connections once channel has been decided for the new
+ * connection
+ * @hdd_ctx:	HDD Context
+ * @action: action to be executed
+ *
+ * This function initiates initiates actions
+ * needed on current connections once channel has been decided
+ * for the new connection. Notifies UMAC & FW as well
+ *
+ * Return: CDF_STATUS enum
+ */
+CDF_STATUS cds_next_actions(hdd_context_t *hdd_ctx,
+				enum cds_conc_next_action action)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	struct sir_hw_mode_params hw_mode;
+
+	if (wma_is_hw_dbs_capable() == false) {
+		cds_err("driver isn't dbs capable, no further action needed");
+		return CDF_STATUS_E_NOSUPPORT;
+	}
+
+	/* check for the current HW index to see if really need any action */
+	status = wma_get_current_hw_mode(&hw_mode);
+	if (!CDF_IS_STATUS_SUCCESS(status)) {
+		cds_err("wma_get_current_hw_mode failed");
+		return status;
+	}
+	/**
+	 *  if already in DBS no need to request DBS or if already in
+	 *  non dbs no need request for non dbs again. Might be needed
+	 *  to extend the logic when multiple dbs HW mode is available
+	 */
+	if ((((CDS_DBS_DOWNGRADE == action) || (CDS_DBS == action))
+		&& hw_mode.dbs_cap) ||
+		(((CDS_MCC_UPGRADE == action) || (CDS_MCC == action))
+		&& !hw_mode.dbs_cap)) {
+		cds_err("driver is already in %s mode, no further action needed",
+				(hw_mode.dbs_cap) ? "dbs" : "non dbs");
+		return CDF_STATUS_E_ALREADY;
+	}
+
+	switch (action) {
+	case CDS_DBS_DOWNGRADE:
+		/*
+		* check if we have a beaconing entity that is using 2x2. If yes,
+		* update the beacon template & notify FW. Once FW confirms
+		*  beacon updated, send down the HW mode change req
+		*/
+		status = cds_complete_action(hdd_ctx, 1, CDS_DBS);
+		break;
+	case CDS_DBS:
+		status = cds_soc_set_hw_mode(hdd_ctx, HW_MODE_SS_1x1,
+						HW_MODE_80_MHZ,
+						HW_MODE_SS_1x1, HW_MODE_40_MHZ,
+						HW_MODE_DBS,
+						HW_MODE_AGILE_DFS_NONE);
+		break;
+	case CDS_MCC_UPGRADE:
+		/*
+		* check if we have a beaconing entity that advertised 2x2
+		* intially. If yes, update the beacon template & notify FW.
+		* Once FW confirms beacon updated, send the HW mode change req
+		*/
+		status = cds_complete_action(hdd_ctx, 0, CDS_MCC);
+		break;
+	case CDS_MCC:
+		status = cds_soc_set_hw_mode(hdd_ctx, HW_MODE_SS_2x2,
+						HW_MODE_80_MHZ,
+						HW_MODE_SS_0x0, HW_MODE_BW_NONE,
+						HW_MODE_DBS_NONE,
+						HW_MODE_AGILE_DFS_NONE);
+		break;
+	default:
+		/* err msg */
+		cds_err("unexpected action value %d", action);
+		status = CDF_STATUS_E_FAILURE;
+		break;
+	}
+
+	return status;
+}
+
+/**
+ * cds_cfg80211_get_concurrency_matrix() - to retrieve concurrency matrix
+ * @wiphy: pointer phy adapter
+ * @wdev: pointer to wireless device structure
+ * @data: pointer to data buffer
+ * @data_len: length of data
+ *
+ * This routine will give concurrency matrix
+ *
+ * Return: int status code
+ */
+static int __cds_cfg80211_get_concurrency_matrix(struct wiphy *wiphy,
+					 struct wireless_dev *wdev,
+					 const void *data,
+					 int data_len)
+{
+	uint32_t feature_set_matrix[CDS_MAX_FEATURE_SET] = {0};
+	uint8_t i, feature_sets, max_feature_sets;
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_MAX + 1];
+	struct sk_buff *reply_skb;
+	hdd_context_t *hdd_ctx = wiphy_priv(wiphy);
+	int ret;
+
+	ENTER();
+
+	if (CDF_FTM_MODE == hdd_get_conparam()) {
+		cds_err("Command not allowed in FTM mode");
+		return -EPERM;
+	}
+
+	ret = wlan_hdd_validate_context(hdd_ctx);
+	if (0 != ret) {
+		cds_err("HDD context is not valid");
+		return ret;
+	}
+
+	if (nla_parse(tb, QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_MAX,
+			data, data_len, NULL)) {
+		cds_err("Invalid ATTR");
+		return -EINVAL;
+	}
+
+	/* Parse and fetch max feature set */
+	if (!tb[QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_CONFIG_PARAM_SET_SIZE_MAX]) {
+		cds_err("Attr max feature set size failed");
+		return -EINVAL;
+	}
+	max_feature_sets = nla_get_u32(tb[
+		QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_CONFIG_PARAM_SET_SIZE_MAX]);
+	cds_info("Max feature set size (%d)", max_feature_sets);
+
+	/* Fill feature combination matrix */
+	feature_sets = 0;
+	feature_set_matrix[feature_sets++] = WIFI_FEATURE_INFRA |
+		WIFI_FEATURE_P2P;
+	/* Add more feature combinations here */
+
+	feature_sets = CDF_MIN(feature_sets, max_feature_sets);
+	cds_info("Number of feature sets (%d)", feature_sets);
+	cds_info("Feature set matrix");
+	for (i = 0; i < feature_sets; i++)
+		cds_info("[%d] 0x%02X", i, feature_set_matrix[i]);
+
+	reply_skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, sizeof(u32) +
+			sizeof(u32) * feature_sets + NLMSG_HDRLEN);
+
+	if (reply_skb) {
+		if (nla_put_u32(reply_skb,
+			QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET_SIZE,
+			feature_sets) ||
+			nla_put(reply_skb,
+			      QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET,
+			      sizeof(u32) * feature_sets,
+			      feature_set_matrix)) {
+			cds_err("nla put fail");
+			kfree_skb(reply_skb);
+			return -EINVAL;
+		}
+		return cfg80211_vendor_cmd_reply(reply_skb);
+	}
+	cds_err("Feature set matrix: buffer alloc fail");
+	return -ENOMEM;
+}
+
+/**
+ * cds_cfg80211_get_concurrency_matrix() - get concurrency matrix
+ * @wiphy:   pointer to wireless wiphy structure.
+ * @wdev:    pointer to wireless_dev structure.
+ * @data:    Pointer to the data to be passed via vendor interface
+ * @data_len:Length of the data to be passed
+ *
+ * Return:   Return the Success or Failure code.
+ */
+int
+cds_cfg80211_get_concurrency_matrix(struct wiphy *wiphy,
+					struct wireless_dev *wdev,
+					const void *data, int data_len)
+{
+	int ret = 0;
+
+	cds_ssr_protect(__func__);
+	ret = __cds_cfg80211_get_concurrency_matrix(wiphy, wdev, data,
+			data_len);
+	cds_ssr_unprotect(__func__);
+	return ret;
+}
+
+/**
+ * cds_get_concurrency_mode() - return concurrency mode
+ *
+ * This routine is used to retrieve concurrency mode
+ *
+ * Return: uint32_t value of concurrency mask
+ */
+uint32_t cds_get_concurrency_mode(void)
+{
+	hdd_context_t *hdd_ctx;
+
+	hdd_ctx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (NULL != hdd_ctx) {
+		cds_info("concurrency_mode = 0x%x",
+			hdd_ctx->concurrency_mode);
+		return hdd_ctx->concurrency_mode;
+	}
+
+	/* we are in an invalid state :( */
+	cds_err("Invalid context");
+	return CDF_STA_MASK;
+}
+
+/**
+ * cds_sap_restart_handle() - to handle restarting of SAP
+ * @work: name of the work
+ *
+ * Purpose of this function is to trigger sap start. this function
+ * will be called from workqueue.
+ *
+ * Return: void.
+ */
+static void cds_sap_restart_handle(struct work_struct *work)
+{
+	hdd_adapter_t *sap_adapter;
+	hdd_context_t *hdd_ctx = container_of(work, hdd_context_t,
+					sap_start_work);
+	cds_ssr_protect(__func__);
+	if (0 != wlan_hdd_validate_context(hdd_ctx)) {
+		cds_err("HDD context is not valid");
+		cds_ssr_unprotect(__func__);
+		return;
+	}
+	sap_adapter = hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP);
+	if (sap_adapter == NULL) {
+		cds_err("sap_adapter is NULL");
+		cds_ssr_unprotect(__func__);
+		return;
+	}
+	wlan_hdd_start_sap(sap_adapter);
+
+	cds_change_sap_restart_required_status(hdd_ctx, false);
+	cds_ssr_unprotect(__func__);
+}
+
+/**
+ * cds_check_and_restart_sap() - Check and restart sap if required
+ * @hdd_ctx: pointer to HDD context
+ * @roam_result: Roam result
+ * @hdd_sta_ctx: HDD station context
+ *
+ * This routine will restart the SAP if restart is pending
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_check_and_restart_sap(hdd_context_t *hdd_ctx,
+			eCsrRoamResult roam_result,
+			hdd_station_ctx_t *hdd_sta_ctx)
+{
+	hdd_adapter_t *sap_adapter = NULL;
+	hdd_ap_ctx_t *hdd_ap_ctx = NULL;
+	uint8_t default_sap_channel = 6;
+
+	if (!(hdd_ctx->config->conc_custom_rule1 &&
+			(true == cds_is_sap_restart_required(hdd_ctx))))
+		return CDF_STATUS_SUCCESS;
+
+	sap_adapter = hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP);
+	if (sap_adapter == NULL) {
+		cds_err("sap_adapter is NULL");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	if (test_bit(SOFTAP_BSS_STARTED, &sap_adapter->event_flags)) {
+		cds_err("SAP is already in started state");
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	hdd_ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(sap_adapter);
+	if (hdd_ap_ctx == NULL) {
+		cds_err("HDD sap context is NULL");
+		return CDF_STATUS_E_FAILURE;
+	}
+	if ((eCSR_ROAM_RESULT_ASSOCIATED == roam_result) &&
+			hdd_sta_ctx->conn_info.operationChannel <
+			SIR_11A_CHANNEL_BEGIN) {
+		cds_err("Starting SAP on chnl [%d] after STA assoc complete",
+			hdd_sta_ctx->conn_info.operationChannel);
+		hdd_ap_ctx->operatingChannel =
+			hdd_sta_ctx->conn_info.operationChannel;
+	} else {
+		/* start on default SAP channel */
+		hdd_ap_ctx->operatingChannel =
+			default_sap_channel;
+		cds_err("Starting SAP on channel [%d] after STA assoc failed",
+			default_sap_channel);
+	}
+	hdd_ap_ctx->sapConfig.ch_params.ch_width =
+		hdd_ap_ctx->sapConfig.ch_width_orig;
+	sme_set_ch_params(WLAN_HDD_GET_HAL_CTX(sap_adapter),
+			hdd_ap_ctx->sapConfig.SapHw_mode,
+			hdd_ap_ctx->operatingChannel,
+			hdd_ap_ctx->sapConfig.sec_ch,
+			&hdd_ap_ctx->sapConfig.ch_params);
+	/*
+	 * Create a workqueue and let the workqueue handle the restart
+	 * of sap task. if we directly call sap restart function without
+	 * creating workqueue then our main thread might go to sleep
+	 * which is not acceptable.
+	 */
+#ifdef CONFIG_CNSS
+	cnss_init_work(&hdd_ctx->sap_start_work,
+			cds_sap_restart_handle);
+#else
+	INIT_WORK(&hdd_ctx->sap_start_work,
+			cds_sap_restart_handle);
+#endif
+	schedule_work(&hdd_ctx->sap_start_work);
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_sta_sap_concur_handle() - This function will handle Station and sap
+ * concurrency.
+ * @hdd_ctx: pointer to hdd context.
+ * @sta_adapter: pointer to station adapter.
+ * @roam_profile: pointer to station's roam profile.
+ *
+ * This function will find the AP to which station is likely to make the
+ * the connection, if that AP's channel happens to be different than
+ * SAP's channel then this function will stop the SAP.
+ *
+ * Return: true or false based on function's overall success.
+ */
+static bool cds_sta_sap_concur_handle(hdd_context_t *hdd_ctx,
+		hdd_adapter_t *sta_adapter,
+		tCsrRoamProfile *roam_profile)
+{
+	hdd_adapter_t *ap_adapter = hdd_get_adapter(hdd_ctx,
+			WLAN_HDD_SOFTAP);
+	bool are_cc_channels_same = false;
+	tScanResultHandle scan_cache = NULL;
+	CDF_STATUS status;
+
+	if ((ap_adapter != NULL) &&
+		test_bit(SOFTAP_BSS_STARTED, &ap_adapter->event_flags)) {
+		status =
+			wlan_hdd_check_custom_con_channel_rules(sta_adapter,
+					ap_adapter, roam_profile, &scan_cache,
+					&are_cc_channels_same);
+		if (CDF_STATUS_SUCCESS != status) {
+			cds_err("wlan_hdd_check_custom_con_channel_rules failed!");
+			/* Not returning */
+		}
+		status = sme_scan_result_purge(
+				WLAN_HDD_GET_HAL_CTX(sta_adapter),
+				scan_cache);
+		if (CDF_STATUS_SUCCESS != status) {
+			cds_err("sme_scan_result_purge failed!");
+			/* Not returning */
+		}
+		/*
+		 * are_cc_channels_same will be false incase if SAP and STA
+		 * channel is different or STA channel is zero.
+		 * incase if STA channel is zero then lets stop the AP and
+		 * restart flag set, so later whenever STA channel is defined
+		 * we can restart our SAP in that channel.
+		 */
+		if (false == are_cc_channels_same) {
+			cds_info("Stop AP due to mismatch with STA channel");
+			wlan_hdd_stop_sap(ap_adapter);
+			cds_change_sap_restart_required_status(hdd_ctx, true);
+			return false;
+		} else {
+			cds_info("sap channels are same");
+		}
+	}
+	return true;
+}
+
+#ifdef FEATURE_WLAN_CH_AVOID
+/**
+ * cds_sta_p2pgo_concur_handle() - This function will handle Station and GO
+ * concurrency.
+ * @hdd_ctx: pointer to hdd context.
+ * @sta_adapter: pointer to station adapter.
+ * @roam_profile: pointer to station's roam profile.
+ * @roam_id: reference to roam_id variable being passed.
+ *
+ * This function will find the AP to which station is likely to make the
+ * the connection, if that AP's channel happens to be different than our
+ * P2PGO's channel then this function will send avoid frequency event to
+ * framework to make P2PGO stop and also caches station's connect request.
+ *
+ * Return: true or false based on function's overall success.
+ */
+static bool cds_sta_p2pgo_concur_handle(hdd_context_t *hdd_ctx,
+		hdd_adapter_t *sta_adapter,
+		tCsrRoamProfile *roam_profile,
+		uint32_t *roam_id)
+{
+	hdd_adapter_t *p2pgo_adapter = hdd_get_adapter(hdd_ctx,
+			WLAN_HDD_P2P_GO);
+	bool are_cc_channels_same = false;
+	tScanResultHandle scan_cache = NULL;
+	uint32_t p2pgo_channel_num, freq;
+	tHddAvoidFreqList hdd_avoid_freq_list;
+	CDF_STATUS status;
+	bool ret;
+
+	if ((p2pgo_adapter != NULL) &&
+		test_bit(SOFTAP_BSS_STARTED, &p2pgo_adapter->event_flags)) {
+		status =
+			wlan_hdd_check_custom_con_channel_rules(sta_adapter,
+					p2pgo_adapter, roam_profile,
+					&scan_cache, &are_cc_channels_same);
+		if (CDF_STATUS_SUCCESS != status) {
+			cds_err("wlan_hdd_check_custom_con_channel_rules failed");
+			/* Not returning */
+		}
+		/*
+		 * are_cc_channels_same will be false incase if P2PGO and STA
+		 * channel is different or STA channel is zero.
+		 */
+		if (false == are_cc_channels_same) {
+			if (true == cds_is_sta_connection_pending(hdd_ctx)) {
+				MTRACE(cdf_trace(CDF_MODULE_ID_HDD,
+					TRACE_CODE_HDD_CLEAR_JOIN_REQ,
+					sta_adapter->sessionId, *roam_id));
+				ret = sme_clear_joinreq_param(
+					WLAN_HDD_GET_HAL_CTX(sta_adapter),
+					sta_adapter->sessionId);
+				if (true != ret) {
+					cds_err("sme_clear_joinreq_param failed");
+					/* Not returning */
+				}
+				cds_change_sta_conn_pending_status(hdd_ctx,
+						false);
+				cds_info("===>Clear pending join req");
+			}
+			MTRACE(cdf_trace(CDF_MODULE_ID_HDD,
+					TRACE_CODE_HDD_STORE_JOIN_REQ,
+					sta_adapter->sessionId, *roam_id));
+			/* store the scan cache here */
+			ret = sme_store_joinreq_param(
+					WLAN_HDD_GET_HAL_CTX(sta_adapter),
+					roam_profile,
+					scan_cache,
+					roam_id,
+					sta_adapter->sessionId);
+			if (true != ret) {
+				cds_err("sme_store_joinreq_param failed");
+				/* Not returning */
+			}
+			cds_change_sta_conn_pending_status(hdd_ctx, true);
+			/*
+			 * fill frequency avoidance event and send it up, so
+			 * p2pgo stop event should get trigger from upper layer
+			 */
+			p2pgo_channel_num =
+				WLAN_HDD_GET_AP_CTX_PTR(p2pgo_adapter)->
+				operatingChannel;
+			if (p2pgo_channel_num <= 14) {
+				freq = ieee80211_channel_to_frequency(
+						p2pgo_channel_num,
+						IEEE80211_BAND_2GHZ);
+			} else {
+				freq = ieee80211_channel_to_frequency(
+						p2pgo_channel_num,
+						IEEE80211_BAND_5GHZ);
+			}
+			cdf_mem_zero(&hdd_avoid_freq_list,
+					sizeof(hdd_avoid_freq_list));
+			hdd_avoid_freq_list.avoidFreqRangeCount = 1;
+			hdd_avoid_freq_list.avoidFreqRange[0].startFreq = freq;
+			hdd_avoid_freq_list.avoidFreqRange[0].endFreq = freq;
+			wlan_hdd_send_avoid_freq_event(hdd_ctx,
+					&hdd_avoid_freq_list);
+			cds_info("===>Sending chnl_avoid ch[%d] freq[%d]",
+				p2pgo_channel_num, freq);
+			cds_info("=>Stop GO due to mismatch with STA channel");
+			return false;
+		} else {
+			cds_info("===>p2pgo channels are same");
+			status = sme_scan_result_purge(
+					WLAN_HDD_GET_HAL_CTX(sta_adapter),
+					scan_cache);
+			if (CDF_STATUS_SUCCESS != status) {
+				cds_err("sme_scan_result_purge failed");
+				/* Not returning */
+			}
+		}
+	}
+	return true;
+}
+#endif
+
+/**
+ * cds_handle_conc_rule1() - Check if concurrency rule1 is enabled
+ * @hdd_ctx: HDD context
+ * @adapter: HDD adpater
+ * @roam_profile: Profile for connection
+ *
+ * Check if concurrency rule1 is enabled. As per rule1, if station is trying to
+ * connect to some AP in 2.4Ghz and SAP is already in started state then SAP
+ * should restart in station's
+ *
+ * Return: None
+ */
+void cds_handle_conc_rule1(hdd_context_t *hdd_ctx,
+		hdd_adapter_t *adapter,
+		tCsrRoamProfile *roam_profile)
+{
+	bool ret;
+
+	/*
+	 * Custom concurrency rule1: As per this rule if station is
+	 * trying to connect to some AP in 2.4Ghz and SAP is already
+	 * in started state then SAP should restart in station's
+	 * channel.
+	 */
+	if (hdd_ctx->config->conc_custom_rule1 &&
+			(WLAN_HDD_INFRA_STATION == adapter->device_mode)) {
+		ret = cds_sta_sap_concur_handle(hdd_ctx, adapter,
+				roam_profile);
+		if (true != ret) {
+			cds_err("cds_sta_sap_concur_handle failed");
+			/* Nothing to do for now */
+		}
+	}
+}
+
+#ifdef FEATURE_WLAN_CH_AVOID
+/**
+ * cds_handle_conc_rule2() - Check if concurrency rule2 is enabled
+ * @hdd_ctx: HDD context
+ * @adapter: HDD adpater
+ * @roam_profile: Profile for connection
+ *
+ * Check if concurrency rule1 is enabled. As per rule1, if station is trying to
+ * connect to some AP in 5Ghz and P2PGO is already in started state then P2PGO
+ * should restart in station's channel
+ *
+ * Return: None
+ */
+bool cds_handle_conc_rule2(hdd_context_t *hdd_ctx,
+		hdd_adapter_t *adapter,
+		tCsrRoamProfile *roam_profile,
+		uint32_t *roam_id)
+{
+	/*
+	 * Custom concurrency rule2: As per this rule if station is
+	 * trying to connect to some AP in 5Ghz and P2PGO is already in
+	 * started state then P2PGO should restart in station's channel.
+	 */
+	if (hdd_ctx->config->conc_custom_rule2 &&
+		(WLAN_HDD_INFRA_STATION == adapter->device_mode)) {
+		if (false == cds_sta_p2pgo_concur_handle(hdd_ctx,
+					adapter, roam_profile, roam_id)) {
+			cds_err("P2PGO-STA chnl diff, cache join req");
+			return false;
+		}
+	}
+	return true;
+}
+#endif
+
+/**
+ * cds_get_channel_from_scan_result() - to get channel from scan result
+ * @adapter: station adapter
+ * @roam_profile: pointer to roam profile
+ * @channel: channel to be filled
+ *
+ * This routine gets channel which most likely a candidate to which STA
+ * will make connection.
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_get_channel_from_scan_result(hdd_adapter_t *adapter,
+			tCsrRoamProfile *roam_profile, uint8_t *channel)
+{
+	CDF_STATUS status;
+	tScanResultHandle scan_cache = NULL;
+
+	status = sme_get_ap_channel_from_scan_cache(
+				WLAN_HDD_GET_HAL_CTX(adapter),
+				roam_profile, &scan_cache,
+				channel);
+	sme_scan_result_purge(WLAN_HDD_GET_HAL_CTX(adapter), scan_cache);
+	return status;
+}
+
+/**
+ * cds_handle_conc_multiport() - to handle multiport concurrency
+ * @session_id: Session ID
+ * @channel: Channel number
+ *
+ * This routine will handle STA side concurrency when policy manager
+ * is enabled.
+ *
+ * Return: true or false
+ */
+bool cds_handle_conc_multiport(uint8_t session_id,
+		uint8_t channel)
+{
+	bool ret = true;
+	CDF_STATUS status;
+	p_cds_contextType cds_context;
+	hdd_adapter_t *adapter;
+	hdd_context_t *hdd_ctx;
+
+	cds_context = cds_get_global_context();
+	if (!cds_context) {
+		cds_err("Invalid CDS context");
+		return false;
+	}
+
+	hdd_ctx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (!hdd_ctx) {
+		cds_err("Invalid HDD context");
+		return false;
+	}
+
+	adapter = hdd_get_adapter_by_vdev(hdd_ctx, session_id);
+	if (!adapter) {
+		cds_err("Invalid HDD adapter");
+		return false;
+	}
+
+	if (channel == 0) {
+		cds_err("Invalid channel number 0");
+		return false;
+	}
+	/* Take care of 160MHz and 80+80Mhz later */
+	ret = cds_allow_concurrency(hdd_ctx,
+		cds_convert_device_mode_to_hdd_type(
+			adapter->device_mode),
+		channel, HW_MODE_20_MHZ);
+	if (false == ret) {
+		cds_err("Connection failed due to conc check fail");
+		return false;
+	}
+
+	status = cdf_event_reset(&cds_context->connection_update_done_evt);
+	if (!CDF_IS_STATUS_SUCCESS(status))
+		cds_err("clearing event failed");
+
+	status = cds_current_connections_update(hdd_ctx, channel);
+	if (CDF_STATUS_E_FAILURE == status) {
+		cds_err("connections update failed");
+		return false;
+	}
+	/*
+	 * wait only if status is successful. connection update API
+	 * will return success only in case if DBS update is required.
+	 */
+	if (CDF_STATUS_SUCCESS == status) {
+		status = cdf_wait_single_event(
+			    &cds_context->connection_update_done_evt, 500);
+		if (!CDF_IS_STATUS_SUCCESS(status)) {
+			cds_err("wait for event failed");
+			return false;
+		}
+	}
+	return true;
+}
+
+#ifdef FEATURE_WLAN_FORCE_SAP_SCC
+/**
+ * cds_restart_softap() - restart SAP on STA channel to support
+ * STA + SAP concurrency.
+ *
+ * @hdd_ctx: pointer to hdd context
+ * @pHostapdAdapter: pointer to hdd adapter
+ *
+ * Return: None
+ */
+void cds_restart_softap(hdd_context_t *hdd_ctx,
+		hdd_adapter_t *pHostapdAdapter)
+{
+	tHddAvoidFreqList hdd_avoid_freq_list;
+
+	/* generate vendor specific event */
+	cdf_mem_zero((void *)&hdd_avoid_freq_list, sizeof(tHddAvoidFreqList));
+	hdd_avoid_freq_list.avoidFreqRange[0].startFreq =
+		cds_chan_to_freq(pHostapdAdapter->sessionCtx.ap.
+				operatingChannel);
+	hdd_avoid_freq_list.avoidFreqRange[0].endFreq =
+		cds_chan_to_freq(pHostapdAdapter->sessionCtx.ap.
+				operatingChannel);
+	hdd_avoid_freq_list.avoidFreqRangeCount = 1;
+	wlan_hdd_send_avoid_freq_event(hdd_ctx, &hdd_avoid_freq_list);
+}
+
+/**
+ * cds_force_sap_on_scc() - Force SAP on SCC
+ * @hdd_ctx: Pointer to HDD context
+ * @roam_result: Roam result
+ *
+ * Restarts SAP on SCC if its operating channel is different from that of the
+ * STA-AP interface
+ *
+ * Return: None
+ */
+void cds_force_sap_on_scc(hdd_context_t *hdd_ctx, eCsrRoamResult roam_result)
+{
+	hdd_adapter_t *hostapd_adapter;
+
+	if (!(eCSR_ROAM_RESULT_ASSOCIATED == roam_result &&
+		hdd_ctx->config->SapSccChanAvoidance)) {
+		cds_err("Not able to force SAP on SCC");
+		return;
+	}
+	hostapd_adapter = hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP);
+	if (hostapd_adapter != NULL) {
+		/* Restart SAP if its operating channel is different
+		 * from AP channel.
+		 */
+		if (hostapd_adapter->sessionCtx.ap.operatingChannel !=
+				pRoamInfo->pBssDesc->channelId) {
+			cds_err("Restart SAP: SAP channel-%d, STA channel-%d",
+				hostapd_adapter->sessionCtx.ap.operatingChannel,
+				pRoamInfo->pBssDesc->channelId);
+			cds_restart_softap(hdd_ctx, hostapd_adapter);
+		}
+	}
+}
+#endif /* FEATURE_WLAN_FORCE_SAP_SCC */
+
+#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
+
+/**
+ * cds_check_sta_ap_concurrent_ch_intf() - Restart SAP in STA-AP case
+ * @data: Pointer to STA adapter
+ *
+ * Restarts the SAP interface in STA-AP concurrency scenario
+ *
+ * Restart: None
+ */
+static void cds_check_sta_ap_concurrent_ch_intf(void *data)
+{
+	hdd_adapter_t *ap_adapter = NULL, *sta_adapter = (hdd_adapter_t *) data;
+	hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(sta_adapter);
+	tHalHandle *hal_handle;
+	hdd_ap_ctx_t *hdd_ap_ctx;
+	uint16_t intf_ch = 0;
+
+	if ((hdd_ctx->config->WlanMccToSccSwitchMode ==
+				CDF_MCC_TO_SCC_SWITCH_DISABLE)
+			|| !(cds_concurrent_open_sessions_running()
+			    || !(cds_get_concurrency_mode() ==
+					(CDF_STA_MASK | CDF_SAP_MASK))))
+		return;
+
+	ap_adapter = hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP);
+	if (ap_adapter == NULL)
+		return;
+
+	if (!test_bit(SOFTAP_BSS_STARTED, &ap_adapter->event_flags))
+		return;
+
+	hdd_ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(ap_adapter);
+	hal_handle = WLAN_HDD_GET_HAL_CTX(ap_adapter);
+
+	if (hal_handle == NULL)
+		return;
+
+#ifdef WLAN_FEATURE_MBSSID
+	intf_ch = wlansap_check_cc_intf(hdd_ap_ctx->sapContext);
+#else
+	intf_ch = wlansap_check_cc_intf(hdd_ctx->pcds_context);
+#endif
+	if (intf_ch == 0)
+		return;
+
+	hdd_ap_ctx->sapConfig.channel = intf_ch;
+	hdd_ap_ctx->sapConfig.ch_params.ch_width =
+		hdd_ap_ctx->sapConfig.ch_width_orig;
+	sme_set_ch_params(hal_handle,
+			hdd_ap_ctx->sapConfig.SapHw_mode,
+			hdd_ap_ctx->sapConfig.channel,
+			hdd_ap_ctx->sapConfig.sec_ch,
+			&hdd_ap_ctx->sapConfig.ch_params);
+	cds_restart_sap(ap_adapter);
+}
+/**
+ * cds_check_concurrent_intf_and_restart_sap() - Check concurrent change intf
+ * @hdd_ctx: Pointer to HDD context
+ * @hdd_sta_ctx: Pointer to HDD STA context
+ *
+ * Checks the concurrent change interface and restarts SAP
+ * Return: None
+ */
+void cds_check_concurrent_intf_and_restart_sap(hdd_context_t *hdd_ctx,
+		hdd_station_ctx_t *hdd_sta_ctx, hdd_adapter_t *adapter)
+{
+	if ((hdd_ctx->config->WlanMccToSccSwitchMode
+				!= CDF_MCC_TO_SCC_SWITCH_DISABLE) &&
+			((0 == hdd_ctx->config->conc_custom_rule1) &&
+			 (0 == hdd_ctx->config->conc_custom_rule2))
+#ifdef FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE
+			&& !CDS_IS_DFS_CH(hdd_sta_ctx->conn_info.
+				operationChannel)
+#endif
+	   ) {
+		cdf_create_work(0, &hdd_ctx->sta_ap_intf_check_work,
+				cds_check_sta_ap_concurrent_ch_intf,
+				(void *)adapter);
+		cdf_sched_work(0, &hdd_ctx->sta_ap_intf_check_work);
+		cds_info("Checking for Concurrent Change interference");
+	}
+}
+#endif /* FEATURE_WLAN_MCC_TO_SCC_SWITCH */
+
+/**
+ * cds_is_mcc_in_24G() - Function to check for MCC in 2.4GHz
+ * @hdd_ctx:    Pointer to HDD context
+ *
+ * This function is used to check for MCC operation in 2.4GHz band.
+ * STA, P2P and SAP adapters are only considered.
+ *
+ * Return: Non zero value if MCC is detected in 2.4GHz band
+ *
+ */
+uint8_t cds_is_mcc_in_24G(hdd_context_t *hdd_ctx)
+{
+	CDF_STATUS status;
+	hdd_adapter_t *hdd_adapter = NULL;
+	hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
+	uint8_t ret = 0;
+	hdd_station_ctx_t *sta_ctx;
+	hdd_ap_ctx_t *ap_ctx;
+	uint8_t ch1 = 0, ch2 = 0;
+	uint8_t channel = 0;
+	hdd_hostapd_state_t *hostapd_state;
+
+	status =  hdd_get_front_adapter(hdd_ctx, &adapter_node);
+
+	/* loop through all adapters and check MCC for STA,P2P,SAP adapters */
+	while (NULL != adapter_node && CDF_STATUS_SUCCESS == status) {
+		hdd_adapter = adapter_node->pAdapter;
+
+		if (!((hdd_adapter->device_mode >= WLAN_HDD_INFRA_STATION)
+					|| (hdd_adapter->device_mode
+						<= WLAN_HDD_P2P_GO))) {
+			/* skip for other adapters */
+			status = hdd_get_next_adapter(hdd_ctx,
+					adapter_node, &next);
+			adapter_node = next;
+			continue;
+		}
+		if (WLAN_HDD_INFRA_STATION ==
+				hdd_adapter->device_mode ||
+				WLAN_HDD_P2P_CLIENT ==
+				hdd_adapter->device_mode) {
+			sta_ctx =
+				WLAN_HDD_GET_STATION_CTX_PTR(
+						hdd_adapter);
+			if (eConnectionState_Associated ==
+					sta_ctx->conn_info.connState)
+				channel =
+					sta_ctx->conn_info.
+					operationChannel;
+		} else if (WLAN_HDD_P2P_GO ==
+				hdd_adapter->device_mode ||
+				WLAN_HDD_SOFTAP ==
+				hdd_adapter->device_mode) {
+			ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(hdd_adapter);
+			hostapd_state =
+				WLAN_HDD_GET_HOSTAP_STATE_PTR(
+						hdd_adapter);
+			if (hostapd_state->bssState == BSS_START &&
+					hostapd_state->cdf_status ==
+					CDF_STATUS_SUCCESS)
+				channel = ap_ctx->operatingChannel;
+		}
+
+		if ((ch1 == 0) ||
+				((ch2 != 0) && (ch2 != channel))) {
+			ch1 = channel;
+		} else if ((ch2 == 0) ||
+				((ch1 != 0) && (ch1 != channel))) {
+			ch2 = channel;
+		}
+
+		if ((ch1 != 0 && ch2 != 0) && (ch1 != ch2) &&
+				((ch1 <= SIR_11B_CHANNEL_END) &&
+				 (ch2 <= SIR_11B_CHANNEL_END))) {
+			cds_err("MCC in 2.4Ghz on channels %d and %d",
+				ch1, ch2);
+			return 1;
+		}
+		status = hdd_get_next_adapter(hdd_ctx,
+				adapter_node, &next);
+		adapter_node = next;
+	}
+	return ret;
+}
+
+/**
+ * cds_set_mas() - Function to set MAS value to UMAC
+ * @adapter:            Pointer to HDD adapter
+ * @mas_value:          0-Disable, 1-Enable MAS
+ *
+ * This function passes down the value of MAS to UMAC
+ *
+ * Return: Configuration message posting status, SUCCESS or Fail
+ *
+ */
+int32_t cds_set_mas(hdd_adapter_t *adapter, uint8_t mas_value)
+{
+	hdd_context_t *hdd_ctx = NULL;
+	CDF_STATUS ret_status;
+
+	hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+	if (!hdd_ctx)
+		return -EFAULT;
+
+	if (mas_value) {
+		/* Miracast is ON. Disable MAS and configure P2P quota */
+		if (hdd_ctx->config->enableMCCAdaptiveScheduler) {
+			if (cfg_set_int(hdd_ctx->hHal,
+					WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, 0)
+					!= eSIR_SUCCESS) {
+				cds_err("Could not pass on WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED to CCM");
+			}
+			ret_status = sme_set_mas(false);
+			if (CDF_STATUS_SUCCESS != ret_status) {
+				cds_err("Failed to disable MAS");
+				return -EBUSY;
+			}
+		}
+
+		/* Config p2p quota */
+		if (adapter->device_mode == WLAN_HDD_INFRA_STATION)
+			cds_set_mcc_p2p_quota(adapter,
+					100 - HDD_DEFAULT_MCC_P2P_QUOTA);
+		else if (adapter->device_mode == WLAN_HDD_P2P_GO)
+			cds_go_set_mcc_p2p_quota(adapter,
+					HDD_DEFAULT_MCC_P2P_QUOTA);
+		else
+			cds_set_mcc_p2p_quota(adapter,
+					HDD_DEFAULT_MCC_P2P_QUOTA);
+	} else {
+		/* Reset p2p quota */
+		if (adapter->device_mode == WLAN_HDD_P2P_GO)
+			cds_go_set_mcc_p2p_quota(adapter,
+					HDD_RESET_MCC_P2P_QUOTA);
+		else
+			cds_set_mcc_p2p_quota(adapter,
+					HDD_RESET_MCC_P2P_QUOTA);
+
+		/* Miracast is OFF. Enable MAS and reset P2P quota */
+		if (hdd_ctx->config->enableMCCAdaptiveScheduler) {
+			if (cfg_set_int(hdd_ctx->hHal,
+					WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, 1)
+					!= eSIR_SUCCESS) {
+				cds_err("Could not pass on WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED to CCM");
+			}
+
+			/* Enable MAS */
+			ret_status = sme_set_mas(true);
+			if (CDF_STATUS_SUCCESS != ret_status) {
+				cds_err("Unable to enable MAS");
+				return -EBUSY;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * cds_set_mcc_p2p_quota() - Function to set quota for P2P
+ * @hostapd_adapter:    Pointer to HDD adapter
+ * @set_value:          Qouta value for the interface
+ *
+ * This function is used to set the quota for P2P cases
+ *
+ * Return: Configuration message posting status, SUCCESS or Fail
+ *
+ */
+int32_t cds_set_mcc_p2p_quota(hdd_adapter_t *hostapd_adapater,
+		uint32_t set_value)
+{
+	uint8_t first_adapter_operating_channel = 0;
+	uint8_t second_adapter_opertaing_channel = 0;
+	hdd_adapter_t *sta_adapter = NULL;
+	int32_t ret = 0; /* success */
+
+	uint32_t concurrent_state = cds_get_concurrency_mode();
+
+	/*
+	 * Check if concurrency mode is active.
+	 * Need to modify this code to support MCC modes other than STA/P2P
+	 */
+	if ((concurrent_state == (CDF_STA_MASK | CDF_P2P_CLIENT_MASK)) ||
+		(concurrent_state == (CDF_STA_MASK | CDF_P2P_GO_MASK))) {
+		cds_info("STA & P2P are both enabled");
+		/*
+		 * The channel numbers for both adapters and the time
+		 * quota for the 1st adapter, i.e., one specified in cmd
+		 * are formatted as a bit vector then passed on to WMA
+		 * +***********************************************************+
+		 * |bit 31-24  | bit 23-16  |   bits 15-8   |   bits 7-0       |
+		 * |  Unused   | Quota for  | chan. # for   |   chan. # for    |
+		 * |           | 1st chan.  | 1st chan.     |   2nd chan.      |
+		 * +***********************************************************+
+		 */
+		/* Get the operating channel of the specified vdev */
+		first_adapter_operating_channel =
+			hdd_get_operating_channel
+			(
+			 hostapd_adapater->pHddCtx,
+			 hostapd_adapater->device_mode
+			);
+		cds_info("1st channel No.:%d and quota:%dms",
+			first_adapter_operating_channel, set_value);
+		/* Move the time quota for first channel to bits 15-8 */
+		set_value = set_value << 8;
+		/*
+		 * Store the channel number of 1st channel at bits 7-0
+		 * of the bit vector
+		 */
+		set_value = set_value | first_adapter_operating_channel;
+		/* Find out the 2nd MCC adapter and its operating channel */
+		if (hostapd_adapater->device_mode == WLAN_HDD_INFRA_STATION) {
+			/*
+			 * iwpriv cmd was issued on wlan0;
+			 * get p2p0 vdev channel
+			 */
+			if ((concurrent_state & CDF_P2P_CLIENT_MASK) != 0) {
+				/* The 2nd MCC vdev is P2P client */
+				sta_adapter = hdd_get_adapter(
+						hostapd_adapater->pHddCtx,
+						WLAN_HDD_P2P_CLIENT);
+			} else {
+				/* The 2nd MCC vdev is P2P GO */
+				sta_adapter = hdd_get_adapter(
+						hostapd_adapater->pHddCtx,
+						WLAN_HDD_P2P_GO);
+			}
+		} else {
+			/*
+			 * iwpriv cmd was issued on p2p0;
+			 * get wlan0 vdev channel
+			 */
+			sta_adapter = hdd_get_adapter(hostapd_adapater->pHddCtx,
+					WLAN_HDD_INFRA_STATION);
+		}
+		if (sta_adapter != NULL) {
+			second_adapter_opertaing_channel =
+				hdd_get_operating_channel
+				(
+				 sta_adapter->pHddCtx,
+				 sta_adapter->device_mode
+				);
+			cds_info("2nd vdev channel No. is:%d",
+				second_adapter_opertaing_channel);
+
+			if (second_adapter_opertaing_channel == 0 ||
+					first_adapter_operating_channel == 0) {
+				cds_err("Invalid channel");
+				return -EINVAL;
+			}
+			/*
+			 * Now move the time quota and channel number of the
+			 * 1st adapter to bits 23-16 and bits 15-8 of the bit
+			 * vector, respectively.
+			 */
+			set_value = set_value << 8;
+			/*
+			 * Store the channel number for 2nd MCC vdev at bits
+			 * 7-0 of set_value
+			 */
+			set_value = set_value |
+				second_adapter_opertaing_channel;
+			ret = wma_cli_set_command(hostapd_adapater->sessionId,
+					WMA_VDEV_MCC_SET_TIME_QUOTA,
+					set_value, VDEV_CMD);
+		} else {
+			cds_err("NULL adapter handle. Exit");
+		}
+	} else {
+		cds_info("MCC is not active. Exit w/o setting latency");
+	}
+	return ret;
+}
+
+/**
+ * cds_change_mcc_go_beacon_interval() - Change MCC beacon interval
+ * @pHostapdAdapter: HDD adapter
+ *
+ * Updates the beacon parameters of the GO in MCC scenario
+ *
+ * Return: Success or Failure depending on the overall function behavior
+ */
+CDF_STATUS cds_change_mcc_go_beacon_interval(hdd_adapter_t *pHostapdAdapter)
+{
+	CDF_STATUS cdf_ret_status = CDF_STATUS_E_FAILURE;
+	void *hHal;
+
+	cds_info("UPDATE Beacon Params");
+
+	if (CDF_SAP_MODE == cds_get_conparam()) {
+		hHal = WLAN_HDD_GET_HAL_CTX(pHostapdAdapter);
+		if (NULL == hHal) {
+			cds_err("Hal ctx is null");
+			return CDF_STATUS_E_FAULT;
+		}
+		cdf_ret_status =
+			sme_change_mcc_beacon_interval(hHal,
+					pHostapdAdapter->
+					sessionId);
+		if (cdf_ret_status == CDF_STATUS_E_FAILURE) {
+			cds_err("Failed to update Beacon Params");
+			return CDF_STATUS_E_FAILURE;
+		}
+	}
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_go_set_mcc_p2p_quota() - Function to set quota for P2P GO
+ * @hostapd_adapter:    Pointer to HDD adapter
+ * @set_value:          Qouta value for the interface
+ *
+ * This function is used to set the quota for P2P GO cases
+ *
+ * Return: Configuration message posting status, SUCCESS or Fail
+ *
+ */
+int32_t cds_go_set_mcc_p2p_quota(hdd_adapter_t *hostapd_adapter,
+		uint32_t set_value)
+{
+	uint8_t first_adapter_operating_channel = 0;
+	uint8_t second_adapter_opertaing_channel = 0;
+	uint32_t concurrent_state = 0;
+	hdd_adapter_t *sta_adapter = NULL;
+	int32_t ret = 0; /* success */
+
+	/*
+	 * Check if concurrency mode is active.
+	 * Need to modify this code to support MCC modes other than
+	 * STA/P2P GO
+	 */
+
+	concurrent_state = cds_get_concurrency_mode();
+	if (concurrent_state == (CDF_STA_MASK | CDF_P2P_GO_MASK)) {
+		cds_info("STA & P2P are both enabled");
+
+		/*
+		 * The channel numbers for both adapters and the time
+		 * quota for the 1st adapter, i.e., one specified in cmd
+		 * are formatted as a bit vector then passed on to WMA
+		 * +************************************************+
+		 * |bit 31-24 |bit 23-16  |  bits 15-8  |bits 7-0   |
+		 * |  Unused  |  Quota for| chan. # for |chan. # for|
+		 * |          |  1st chan.| 1st chan.   |2nd chan.  |
+		 * +************************************************+
+		 */
+
+		/* Get the operating channel of the specified vdev */
+		first_adapter_operating_channel =
+			hdd_get_operating_channel(hostapd_adapter->pHddCtx,
+					hostapd_adapter->device_mode);
+
+		cds_info("1st channel No.:%d and quota:%dms",
+			first_adapter_operating_channel, set_value);
+
+		/* Move the time quota for first adapter to bits 15-8 */
+		set_value = set_value << 8;
+		/*
+		 * Store the operating channel number of 1st adapter at
+		 * the lower 8-bits of bit vector.
+		 */
+		set_value = set_value | first_adapter_operating_channel;
+		if (hostapd_adapter->device_mode ==
+				WLAN_HDD_INFRA_STATION) {
+			/* iwpriv cmd issued on wlan0; get p2p0 vdev chan */
+			if ((concurrent_state & CDF_P2P_CLIENT_MASK) != 0) {
+				/* The 2nd MCC vdev is P2P client */
+				sta_adapter = hdd_get_adapter
+					(
+					 hostapd_adapter->pHddCtx,
+					 WLAN_HDD_P2P_CLIENT
+					);
+			} else {
+				/* The 2nd MCC vdev is P2P GO */
+				sta_adapter = hdd_get_adapter
+					(
+					 hostapd_adapter->pHddCtx,
+					 WLAN_HDD_P2P_GO
+					);
+			}
+		} else {
+			/* iwpriv cmd issued on p2p0; get channel for wlan0 */
+			sta_adapter = hdd_get_adapter
+				(
+				 hostapd_adapter->pHddCtx,
+				 WLAN_HDD_INFRA_STATION
+				);
+		}
+		if (sta_adapter != NULL) {
+			second_adapter_opertaing_channel =
+				hdd_get_operating_channel
+				(
+				 sta_adapter->pHddCtx,
+				 sta_adapter->device_mode
+				);
+			cds_info("2nd vdev channel No. is:%d",
+				second_adapter_opertaing_channel);
+
+			if (second_adapter_opertaing_channel == 0 ||
+					first_adapter_operating_channel == 0) {
+				cds_err("Invalid channel");
+				return -EINVAL;
+			}
+
+			/*
+			 * Move the time quota and operating channel number
+			 * for the first adapter to bits 23-16 & bits 15-8
+			 * of set_value vector, respectively.
+			 */
+			set_value = set_value << 8;
+			/*
+			 * Store the channel number for 2nd MCC vdev at bits
+			 * 7-0 of set_value vector as per the bit format above.
+			 */
+			set_value = set_value |
+				second_adapter_opertaing_channel;
+			ret = wma_cli_set_command(hostapd_adapter->sessionId,
+					WMA_VDEV_MCC_SET_TIME_QUOTA,
+					set_value, VDEV_CMD);
+		} else {
+			cds_err("NULL adapter handle. Exit");
+		}
+	} else {
+		cds_info("MCC is not active. Exit w/o setting latency");
+	}
+	return ret;
+}
+
+/**
+ * cds_set_mcc_latency() - Set MCC latency
+ * @adapter: Pointer to HDD adapter
+ * @set_value: Latency value
+ *
+ * Sets the MCC latency value during STA-P2P concurrency
+ *
+ * Return: None
+ */
+void cds_set_mcc_latency(hdd_adapter_t *adapter, int set_value)
+{
+	uint32_t concurrent_state = 0;
+	uint8_t first_adapter_operating_channel = 0;
+	int ret = 0;            /* success */
+
+	cds_info("iwpriv cmd to set MCC latency with val %dms",
+		set_value);
+	/**
+	 * Check if concurrency mode is active.
+	 * Need to modify this code to support MCC modes other than STA/P2P
+	 */
+	concurrent_state = cds_get_concurrency_mode();
+	if ((concurrent_state == (CDF_STA_MASK | CDF_P2P_CLIENT_MASK)) ||
+		(concurrent_state == (CDF_STA_MASK | CDF_P2P_GO_MASK))) {
+		cds_info("STA & P2P are both enabled");
+		/*
+		 * The channel number and latency are formatted in
+		 * a bit vector then passed on to WMA layer.
+		 * +**********************************************+
+		 * |bits 31-16 |      bits 15-8    |  bits 7-0    |
+		 * |  Unused   | latency - Chan. 1 |  channel no. |
+		 * +**********************************************+
+		 */
+		/* Get the operating channel of the designated vdev */
+		first_adapter_operating_channel =
+			hdd_get_operating_channel
+			(adapter->pHddCtx, adapter->device_mode);
+		/* Move the time latency for the adapter to bits 15-8 */
+		set_value = set_value << 8;
+		/* Store the channel number at bits 7-0 of the bit vector */
+		set_value =
+			set_value | first_adapter_operating_channel;
+		/* Send command to WMA */
+		ret = wma_cli_set_command(adapter->sessionId,
+				WMA_VDEV_MCC_SET_TIME_LATENCY,
+				set_value, VDEV_CMD);
+	} else {
+		cds_info("%s: MCC is not active. Exit w/o setting latency",
+			__func__);
+	}
+}
+
+#if defined(FEATURE_WLAN_MCC_TO_SCC_SWITCH) || \
+			defined(FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE)
+/**
+ * cds_restart_sap() - This function is used to restart SAP in
+ *                          driver internally
+ *
+ * @ap_adapter: Pointer to SAP hdd_adapter_t structure
+ *
+ * Return: None
+ */
+void cds_restart_sap(hdd_adapter_t *ap_adapter)
+{
+	hdd_ap_ctx_t *hdd_ap_ctx;
+	hdd_hostapd_state_t *hostapd_state;
+	CDF_STATUS cdf_status;
+	hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(ap_adapter);
+#ifdef CFG80211_DEL_STA_V2
+	struct tagCsrDelStaParams delStaParams;
+#endif
+	tsap_Config_t *sap_config;
+
+	hdd_ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(ap_adapter);
+	sap_config = &ap_adapter->sessionCtx.ap.sapConfig;
+
+	mutex_lock(&hdd_ctx->sap_lock);
+	if (test_bit(SOFTAP_BSS_STARTED, &ap_adapter->event_flags)) {
+#ifdef CFG80211_DEL_STA_V2
+		delStaParams.mac = NULL;
+		delStaParams.subtype = SIR_MAC_MGMT_DEAUTH >> 4;
+		delStaParams.reason_code = eCsrForcedDeauthSta;
+		wlan_hdd_cfg80211_del_station(ap_adapter->wdev.wiphy,
+				ap_adapter->dev,
+				&delStaParams);
+#else
+		wlan_hdd_cfg80211_del_station(ap_adapter->wdev.wiphy,
+				ap_adapter->dev, NULL);
+#endif
+		hdd_cleanup_actionframe(hdd_ctx, ap_adapter);
+		hostapd_state = WLAN_HDD_GET_HOSTAP_STATE_PTR(ap_adapter);
+		cdf_event_reset(&hostapd_state->cdf_stop_bss_event);
+		if (CDF_STATUS_SUCCESS == wlansap_stop_bss(
+#ifdef WLAN_FEATURE_MBSSID
+					hdd_ap_ctx->sapContext
+#else
+					hdd_ctx->pcds_context
+#endif
+					)) {
+			cdf_status =
+				cdf_wait_single_event(&hostapd_state->
+						cdf_stop_bss_event,
+						BSS_WAIT_TIMEOUT);
+
+			if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+				cds_err("SAP Stop Failed");
+				goto end;
+			}
+		}
+		clear_bit(SOFTAP_BSS_STARTED, &ap_adapter->event_flags);
+		cds_decr_session_set_pcl(hdd_ctx,
+			ap_adapter->device_mode, ap_adapter->sessionId);
+		cds_err("SAP Stop Success");
+
+		if (0 != wlan_hdd_cfg80211_update_apies(ap_adapter)) {
+			cds_err("SAP Not able to set AP IEs");
+			wlansap_reset_sap_config_add_ie(sap_config,
+					eUPDATE_IE_ALL);
+			goto end;
+		}
+
+		if (wlansap_start_bss(
+#ifdef WLAN_FEATURE_MBSSID
+					hdd_ap_ctx->sapContext,
+#else
+					hdd_ctx->pcds_context,
+#endif
+					hdd_hostapd_sap_event_cb,
+					&hdd_ap_ctx->sapConfig,
+					ap_adapter->dev) !=
+				CDF_STATUS_SUCCESS) {
+			cds_err("SAP Start Bss fail");
+			goto end;
+		}
+
+		cds_info("Waiting for SAP to start");
+		cdf_status =
+			cdf_wait_single_event(&hostapd_state->cdf_event,
+					BSS_WAIT_TIMEOUT);
+		if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
+			cds_err("SAP Start failed");
+			goto end;
+		}
+		cds_err("SAP Start Success");
+		set_bit(SOFTAP_BSS_STARTED, &ap_adapter->event_flags);
+		cds_incr_active_session(hdd_ctx, ap_adapter->device_mode,
+					 ap_adapter->sessionId);
+		hostapd_state->bCommit = true;
+	}
+end:
+	mutex_unlock(&hdd_ctx->sap_lock);
+	return;
+}
+#endif
+
+#ifdef FEATURE_WLAN_STA_AP_MODE_DFS_DISABLE
+/**
+ * cds_check_and_restart_sap_with_non_dfs_acs() - Restart SAP with non dfs acs
+ * @hdd_ctx: HDD context
+ *
+ * Restarts SAP in non-DFS ACS mode when STA-AP mode DFS is not supported
+ *
+ * Return: None
+ */
+void cds_check_and_restart_sap_with_non_dfs_acs(hdd_context_t *hdd_ctx)
+{
+	hdd_adapter_t *ap_adapter;
+
+	if (cds_get_concurrency_mode() != (CDF_STA_MASK | CDF_SAP_MASK)) {
+		cds_info("Concurrency mode is not SAP");
+		return;
+	}
+
+	ap_adapter = hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP);
+	if (ap_adapter != NULL &&
+			test_bit(SOFTAP_BSS_STARTED,
+				&ap_adapter->event_flags)
+			&& CDS_IS_DFS_CH(ap_adapter->sessionCtx.ap.
+				operatingChannel)) {
+
+		cds_warn("STA-AP Mode DFS not supported. Restart SAP with Non DFS ACS");
+		ap_adapter->sessionCtx.ap.sapConfig.channel =
+			AUTO_CHANNEL_SELECT;
+		ap_adapter->sessionCtx.ap.sapConfig.
+			acs_cfg.acs_mode = true;
+
+		cds_restart_sap(ap_adapter);
+	}
+}
+#endif
+#ifdef MPC_UT_FRAMEWORK
+CDF_STATUS cds_update_connection_info_utfw(hdd_context_t *hdd_ctx,
+		uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams,
+		uint32_t chain_mask, uint32_t type, uint32_t sub_type,
+		uint32_t channelid, uint32_t mac_id)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t conn_index = 0, found = 0;
+
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
+		if (vdev_id == conc_connection_list[conn_index].vdev_id) {
+			/* debug msg */
+			found = 1;
+			break;
+		}
+		conn_index++;
+	}
+	if (!found) {
+		/* err msg */
+		cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+		cds_err("can't find vdev_id %d in conc_connection_list",
+			vdev_id);
+		return status;
+	}
+	cds_info("--> updating entry at index[%d]", conn_index);
+
+	cds_update_conc_list(conn_index,
+			cds_get_mode(type, sub_type),
+			channelid, mac_id, chain_mask, tx_streams,
+			rx_streams, 0, vdev_id, true);
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS cds_incr_connection_count_utfw(hdd_context_t *hdd_ctx,
+		uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams,
+		uint32_t chain_mask, uint32_t type, uint32_t sub_type,
+		uint32_t channelid, uint32_t mac_id)
+{
+	CDF_STATUS status = CDF_STATUS_E_FAILURE;
+	uint32_t conn_index = 0;
+
+	cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+	conn_index = cds_get_connection_count(hdd_ctx);
+	if (MAX_NUMBER_OF_CONC_CONNECTIONS <= conn_index) {
+		/* err msg */
+		cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+		cds_err("exceeded max connection limit %d",
+			MAX_NUMBER_OF_CONC_CONNECTIONS);
+		return status;
+	}
+	cds_info("--> filling entry at index[%d]", conn_index);
+
+	cds_update_conc_list(conn_index,
+			     cds_get_mode(type, sub_type),
+			     channelid, mac_id, chain_mask, tx_streams,
+			     rx_streams, 0, vdev_id, true);
+	cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+CDF_STATUS cds_decr_connection_count_utfw(hdd_context_t *hdd_ctx,
+	uint32_t del_all, uint32_t vdev_id)
+{
+	CDF_STATUS status;
+
+	if (del_all) {
+		status = cds_init_policy_mgr(hdd_ctx);
+		if (!CDF_IS_STATUS_SUCCESS(status)) {
+			cds_err("Policy manager initialization failed");
+			return CDF_STATUS_E_FAILURE;
+		}
+	} else {
+		cdf_mutex_acquire(&hdd_ctx->hdd_conc_list_lock);
+		cds_decr_connection_count(hdd_ctx, vdev_id);
+		cdf_mutex_release(&hdd_ctx->hdd_conc_list_lock);
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+struct cds_conc_connection_info *cds_get_conn_info(hdd_context_t *hdd_ctx,
+		uint32_t *len)
+{
+	struct cds_conc_connection_info *conn_ptr = &conc_connection_list[0];
+	*len = MAX_NUMBER_OF_CONC_CONNECTIONS;
+
+	return conn_ptr;
+}
+
+enum cds_pcl_type get_pcl_from_first_conn_table(
+		enum cds_con_mode type,
+		enum cds_conc_priority_mode sys_pref)
+{
+	if ((sys_pref >= CDS_MAX_CONC_PRIORITY_MODE) ||
+		(type >= CDS_MAX_NUM_OF_MODE))
+		return CDS_MAX_PCL_TYPE;
+	return first_connection_pcl_table[type][sys_pref];
+}
+
+enum cds_pcl_type get_pcl_from_second_conn_table(
+	enum cds_one_connection_mode idx, enum cds_con_mode type,
+	enum cds_conc_priority_mode sys_pref, uint8_t dbs_capable)
+{
+	if ((idx >= CDS_MAX_ONE_CONNECTION_MODE) ||
+		(sys_pref >= CDS_MAX_CONC_PRIORITY_MODE) ||
+		(type >= CDS_MAX_NUM_OF_MODE))
+		return CDS_MAX_PCL_TYPE;
+	if (dbs_capable)
+		return second_connection_pcl_dbs_table[idx][type][sys_pref];
+	else
+		return second_connection_pcl_nodbs_table[idx][type][sys_pref];
+}
+
+enum cds_pcl_type get_pcl_from_third_conn_table(
+	enum cds_two_connection_mode idx, enum cds_con_mode type,
+	enum cds_conc_priority_mode sys_pref, uint8_t dbs_capable)
+{
+	if ((idx >= CDS_MAX_TWO_CONNECTION_MODE) ||
+		(sys_pref >= CDS_MAX_CONC_PRIORITY_MODE) ||
+		(type >= CDS_MAX_NUM_OF_MODE))
+		return CDS_MAX_PCL_TYPE;
+	if (dbs_capable)
+		return third_connection_pcl_dbs_table[idx][type][sys_pref];
+	else
+		return third_connection_pcl_nodbs_table[idx][type][sys_pref];
+}
+#endif
+
+/**
+ * cds_convert_device_mode_to_hdd_type() - provides the
+ * type translation from HDD to policy manager type
+ * @device_mode: Generic connection mode type
+ *
+ *
+ * This function provides the type translation
+ *
+ * Return: cds_con_mode enum
+ */
+enum cds_con_mode cds_convert_device_mode_to_hdd_type(
+					device_mode_t device_mode)
+{
+	enum cds_con_mode mode = CDS_MAX_NUM_OF_MODE;
+	switch (device_mode) {
+	case WLAN_HDD_INFRA_STATION:
+		mode = CDS_STA_MODE;
+		break;
+	case WLAN_HDD_P2P_CLIENT:
+		mode = CDS_P2P_CLIENT_MODE;
+		break;
+	case WLAN_HDD_P2P_GO:
+		mode = CDS_P2P_GO_MODE;
+		break;
+	case WLAN_HDD_SOFTAP:
+		mode = CDS_SAP_MODE;
+		break;
+	case WLAN_HDD_IBSS:
+		mode = CDS_IBSS_MODE;
+		break;
+	default:
+		cds_err("Unsupported mode (%d)",
+			device_mode);
+	}
+	return mode;
+}

+ 166 - 0
core/cds/src/cds_get_bin.c

@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <cds_get_bin.h>
+#include <cds_api.h>
+#include <cds_sched.h>
+#include <wlan_hdd_misc.h>
+#include <wlan_hdd_main.h>
+
+tCDF_CON_MODE cds_get_conparam(void)
+{
+	tCDF_CON_MODE con_mode;
+	con_mode = hdd_get_conparam();
+	return con_mode;
+}
+
+bool cds_concurrent_open_sessions_running(void)
+{
+	uint8_t i = 0;
+	uint8_t j = 0;
+	hdd_context_t *pHddCtx;
+
+	pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (NULL != pHddCtx) {
+		for (i = 0; i < CDF_MAX_NO_OF_MODE; i++) {
+			j += pHddCtx->no_of_open_sessions[i];
+		}
+	}
+
+	return j > 1;
+}
+
+#ifdef WLAN_FEATURE_MBSSID
+bool cds_concurrent_beaconing_sessions_running(void)
+{
+	uint8_t i = 0;
+	hdd_context_t *pHddCtx;
+
+	pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (NULL != pHddCtx) {
+		i = pHddCtx->no_of_open_sessions[CDF_SAP_MODE] +
+		    pHddCtx->no_of_open_sessions[CDF_P2P_GO_MODE] +
+		    pHddCtx->no_of_open_sessions[CDF_IBSS_MODE];
+	}
+	return i > 1;
+}
+#endif
+
+/**---------------------------------------------------------------------------
+*
+*   \brief cds_max_concurrent_connections_reached()
+*
+*   This function checks for presence of concurrency where more than
+*   one connection exists and it returns true if the max concurrency is
+*   reached.
+*
+*   Example:
+*   STA + STA (wlan0 and wlan1 are connected) - returns true
+*   STA + STA (wlan0 connected and wlan1 disconnected) - returns false
+*   DUT with P2P-GO + P2P-CLIENT connection) - returns true
+*
+*   \param  - None
+*
+*   \return - true or false
+*
+* --------------------------------------------------------------------------*/
+bool cds_max_concurrent_connections_reached(void)
+{
+	uint8_t i = 0, j = 0;
+	hdd_context_t *pHddCtx;
+
+	pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (NULL != pHddCtx) {
+		for (i = 0; i < CDF_MAX_NO_OF_MODE; i++)
+			j += pHddCtx->no_of_active_sessions[i];
+		return j >
+			(pHddCtx->config->
+			 gMaxConcurrentActiveSessions - 1);
+	}
+
+	return false;
+}
+
+void cds_clear_concurrent_session_count(void)
+{
+	uint8_t i = 0;
+	hdd_context_t *pHddCtx;
+
+	pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (NULL != pHddCtx) {
+		for (i = 0; i < CDF_MAX_NO_OF_MODE; i++)
+			pHddCtx->no_of_active_sessions[i] = 0;
+	}
+}
+
+/**---------------------------------------------------------------------------
+*
+*   \brief cds_is_multiple_active_sta_sessions()
+*
+*   This function checks for presence of multiple active sta connections
+*   and it returns true if the more than 1 active sta connection exists.
+*
+*   \param  - None
+*
+*   \return - true or false
+*
+* --------------------------------------------------------------------------*/
+bool cds_is_multiple_active_sta_sessions(void)
+{
+	hdd_context_t *pHddCtx;
+	uint8_t j = 0;
+
+	pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (NULL != pHddCtx)
+		j = pHddCtx->no_of_active_sessions[CDF_STA_MODE];
+
+	return j > 1;
+}
+
+/**---------------------------------------------------------------------------
+*
+*   \brief cds_is_sta_active_connection_exists()
+*
+*   This function checks for the presence of active sta connection
+*   and it returns true if exists.
+*
+*   \param  - None
+*
+*   \return - true or false
+*
+* --------------------------------------------------------------------------*/
+bool cds_is_sta_active_connection_exists(void)
+{
+	hdd_context_t *pHddCtx;
+	uint8_t j = 0;
+
+	pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (NULL != pHddCtx)
+		j = pHddCtx->no_of_active_sessions[CDF_STA_MODE];
+
+	return j ? true : false;
+}

+ 545 - 0
core/cds/src/cds_ieee80211_common_i.h

@@ -0,0 +1,545 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef CDS_COMMON__IEEE80211_I_H_
+#define CDS_COMMON__IEEE80211_I_H_
+
+/* These defines should match the table from ah_internal.h */
+typedef enum {
+	DFS_UNINIT_DOMAIN = 0,  /* Uninitialized dfs domain */
+	DFS_FCC_DOMAIN = 1,     /* FCC3 dfs domain */
+	DFS_ETSI_DOMAIN = 2,    /* ETSI dfs domain */
+	DFS_MKK4_DOMAIN = 3     /* Japan dfs domain */
+} HAL_DFS_DOMAIN;
+
+/* XXX not really a mode; there are really multiple PHY's */
+enum ieee80211_phymode {
+	IEEE80211_MODE_AUTO = 0,        /* autoselect */
+	IEEE80211_MODE_11A = 1, /* 5GHz, OFDM */
+	IEEE80211_MODE_11B = 2, /* 2GHz, CCK */
+	IEEE80211_MODE_11G = 3, /* 2GHz, OFDM */
+	IEEE80211_MODE_FH = 4,  /* 2GHz, GFSK */
+	IEEE80211_MODE_TURBO_A = 5,     /* 5GHz, OFDM, 2x clock dynamic turbo */
+	IEEE80211_MODE_TURBO_G = 6,     /* 2GHz, OFDM, 2x clock dynamic turbo */
+	IEEE80211_MODE_11NA_HT20 = 7,   /* 5Ghz, HT20 */
+	IEEE80211_MODE_11NG_HT20 = 8,   /* 2Ghz, HT20 */
+	IEEE80211_MODE_11NA_HT40PLUS = 9,       /* 5Ghz, HT40 (ext ch +1) */
+	IEEE80211_MODE_11NA_HT40MINUS = 10,     /* 5Ghz, HT40 (ext ch -1) */
+	IEEE80211_MODE_11NG_HT40PLUS = 11,      /* 2Ghz, HT40 (ext ch +1) */
+	IEEE80211_MODE_11NG_HT40MINUS = 12,     /* 2Ghz, HT40 (ext ch -1) */
+	IEEE80211_MODE_11NG_HT40 = 13,  /* 2Ghz, Auto HT40 */
+	IEEE80211_MODE_11NA_HT40 = 14,  /* 2Ghz, Auto HT40 */
+	IEEE80211_MODE_11AC_VHT20 = 15, /* 5Ghz, VHT20 */
+	IEEE80211_MODE_11AC_VHT40PLUS = 16,     /* 5Ghz, VHT40 (Ext ch +1) */
+	IEEE80211_MODE_11AC_VHT40MINUS = 17,    /* 5Ghz  VHT40 (Ext ch -1) */
+	IEEE80211_MODE_11AC_VHT40 = 18, /* 5Ghz, VHT40 */
+	IEEE80211_MODE_11AC_VHT80 = 19, /* 5Ghz, VHT80 */
+	IEEE80211_MODE_2G_AUTO = 20,    /* 2G 11 b/g/n  autoselect */
+	IEEE80211_MODE_5G_AUTO = 21,    /* 5G 11 a/n/ac autoselect */
+	IEEE80211_MODE_11AGN = 22,   /* Support 11N in both 2G and 5G */
+};
+#define IEEE80211_MODE_MAX      (IEEE80211_MODE_11AC_VHT80 + 1)
+
+enum ieee80211_opmode {
+	IEEE80211_M_STA = 1,    /* infrastructure station */
+	IEEE80211_M_IBSS = 0,   /* IBSS (adhoc) station */
+	IEEE80211_M_AHDEMO = 3, /* Old lucent compatible adhoc demo */
+	IEEE80211_M_HOSTAP = 6, /* Software Access Point */
+	IEEE80211_M_MONITOR = 8,        /* Monitor mode */
+	IEEE80211_M_WDS = 2,    /* WDS link */
+	IEEE80211_M_BTAMP = 9,  /* VAP for BT AMP */
+
+	IEEE80211_M_P2P_GO = 33,        /* P2P GO */
+	IEEE80211_M_P2P_CLIENT = 34,    /* P2P Client */
+	IEEE80211_M_P2P_DEVICE = 35,    /* P2P Device */
+
+	IEEE80211_OPMODE_MAX = IEEE80211_M_BTAMP,       /* Highest numbered opmode in the list */
+
+	IEEE80211_M_ANY = 0xFF  /* Any of the above; used by NDIS 6.x */
+};
+
+/*
+ * 802.11n
+ */
+#define IEEE80211_CWM_EXTCH_BUSY_THRESHOLD 30
+
+enum ieee80211_cwm_mode {
+	IEEE80211_CWM_MODE20,
+	IEEE80211_CWM_MODE2040,
+	IEEE80211_CWM_MODE40,
+	IEEE80211_CWM_MODEMAX
+};
+
+enum ieee80211_cwm_extprotspacing {
+	IEEE80211_CWM_EXTPROTSPACING20,
+	IEEE80211_CWM_EXTPROTSPACING25,
+	IEEE80211_CWM_EXTPROTSPACINGMAX
+};
+
+enum ieee80211_cwm_width {
+	IEEE80211_CWM_WIDTH20,
+	IEEE80211_CWM_WIDTH40,
+	IEEE80211_CWM_WIDTH80,
+	IEEE80211_CWM_WIDTHINVALID = 0xff       /* user invalid value */
+};
+
+enum ieee80211_cwm_extprotmode {
+	IEEE80211_CWM_EXTPROTNONE,      /* no protection */
+	IEEE80211_CWM_EXTPROTCTSONLY,   /* CTS to self */
+	IEEE80211_CWM_EXTPROTRTSCTS,    /* RTS-CTS */
+	IEEE80211_CWM_EXTPROTMAX
+};
+
+enum ieee80211_fixed_rate_mode {
+	IEEE80211_FIXED_RATE_NONE = 0,
+	IEEE80211_FIXED_RATE_MCS = 1,   /* HT rates */
+	IEEE80211_FIXED_RATE_LEGACY = 2,        /* legacy rates */
+	IEEE80211_FIXED_RATE_VHT = 3    /* VHT rates */
+};
+
+/* Holds the fixed rate information for each VAP */
+struct ieee80211_fixed_rate {
+	enum ieee80211_fixed_rate_mode mode;
+	uint32_t series;
+	uint32_t retries;
+};
+
+/*
+ * 802.11g protection mode.
+ */
+enum ieee80211_protmode {
+	IEEE80211_PROT_NONE = 0,        /* no protection */
+	IEEE80211_PROT_CTSONLY = 1,     /* CTS to self */
+	IEEE80211_PROT_RTSCTS = 2,      /* RTS-CTS */
+};
+
+/*
+ * Roaming mode is effectively who controls the operation
+ * of the 802.11 state machine when operating as a station.
+ * State transitions are controlled either by the driver
+ * (typically when management frames are processed by the
+ * hardware/firmware), the host (auto/normal operation of
+ * the 802.11 layer), or explicitly through ioctl requests
+ * when applications like wpa_supplicant want control.
+ */
+enum ieee80211_roamingmode {
+	IEEE80211_ROAMING_DEVICE = 0,   /* driver/hardware control */
+	IEEE80211_ROAMING_AUTO = 1,     /* 802.11 layer control */
+	IEEE80211_ROAMING_MANUAL = 2,   /* application control */
+};
+
+/*
+ * Scanning mode controls station scanning work; this is
+ * used only when roaming mode permits the host to select
+ * the bss to join/channel to use.
+ */
+enum ieee80211_scanmode {
+	IEEE80211_SCAN_DEVICE = 0,      /* driver/hardware control */
+	IEEE80211_SCAN_BEST = 1,        /* 802.11 layer selects best */
+	IEEE80211_SCAN_FIRST = 2,       /* take first suitable candidate */
+};
+
+#define IEEE80211_NWID_LEN      32
+#define IEEE80211_CHAN_MAX      255
+#define IEEE80211_CHAN_BYTES    32      /* howmany(IEEE80211_CHAN_MAX, NBBY) */
+#define IEEE80211_CHAN_ANY      (-1)    /* token for ``any channel'' */
+#define IEEE80211_CHAN_ANYC \
+	((struct ieee80211_channel *) IEEE80211_CHAN_ANY)
+
+#define IEEE80211_CHAN_DEFAULT          11
+#define IEEE80211_CHAN_DEFAULT_11A      52
+#define IEEE80211_CHAN_ADHOC_DEFAULT1   10
+#define IEEE80211_CHAN_ADHOC_DEFAULT2   11
+
+#define IEEE80211_RADAR_11HCOUNT        5
+#define IEEE80211_RADAR_TEST_MUTE_CHAN_11A      36      /* Move to channel 36 for mute test */
+#define IEEE80211_RADAR_TEST_MUTE_CHAN_11NHT20  36
+#define IEEE80211_RADAR_TEST_MUTE_CHAN_11NHT40U 36
+#define IEEE80211_RADAR_TEST_MUTE_CHAN_11NHT40D 40      /* Move to channel 40 for HT40D mute test */
+#define IEEE80211_RADAR_DETECT_DEFAULT_DELAY    60000   /* STA ignore AP beacons during this period in millisecond */
+
+#define IEEE80211_2GCSA_TBTTCOUNT        3
+
+/* bits 0-3 are for private use by drivers */
+/* channel attributes */
+#define IEEE80211_CHAN_TURBO            0x00000010      /* Turbo channel */
+#define IEEE80211_CHAN_CCK              0x00000020      /* CCK channel */
+#define IEEE80211_CHAN_OFDM             0x00000040      /* OFDM channel */
+#define IEEE80211_CHAN_2GHZ             0x00000080      /* 2 GHz spectrum channel. */
+#define IEEE80211_CHAN_5GHZ             0x00000100      /* 5 GHz spectrum channel */
+#define IEEE80211_CHAN_PASSIVE          0x00000200      /* Only passive scan allowed */
+#define IEEE80211_CHAN_DYN              0x00000400      /* Dynamic CCK-OFDM channel */
+#define IEEE80211_CHAN_GFSK             0x00000800      /* GFSK channel (FHSS PHY) */
+#define IEEE80211_CHAN_RADAR_DFS        0x00001000      /* Radar found on channel */
+#define IEEE80211_CHAN_STURBO           0x00002000      /* 11a static turbo channel only */
+#define IEEE80211_CHAN_HALF             0x00004000      /* Half rate channel */
+#define IEEE80211_CHAN_QUARTER          0x00008000      /* Quarter rate channel */
+#define IEEE80211_CHAN_HT20             0x00010000      /* HT 20 channel */
+#define IEEE80211_CHAN_HT40PLUS         0x00020000      /* HT 40 with extension channel above */
+#define IEEE80211_CHAN_HT40MINUS        0x00040000      /* HT 40 with extension channel below */
+#define IEEE80211_CHAN_HT40INTOL        0x00080000      /* HT 40 Intolerant */
+#define IEEE80211_CHAN_VHT20            0x00100000      /* VHT 20 channel */
+#define IEEE80211_CHAN_VHT40PLUS        0x00200000      /* VHT 40 with extension channel above */
+#define IEEE80211_CHAN_VHT40MINUS       0x00400000      /* VHT 40 with extension channel below */
+#define IEEE80211_CHAN_VHT80            0x00800000      /* VHT 80 channel */
+
+/* flagext */
+#define IEEE80211_CHAN_RADAR_FOUND    0x01
+#define IEEE80211_CHAN_DFS              0x0002  /* DFS required on channel */
+#define IEEE80211_CHAN_DFS_CLEAR        0x0008  /* if channel has been checked for DFS */
+#define IEEE80211_CHAN_11D_EXCLUDED     0x0010  /* excluded in 11D */
+#define IEEE80211_CHAN_CSA_RECEIVED     0x0020  /* Channel Switch Announcement received on this channel */
+#define IEEE80211_CHAN_DISALLOW_ADHOC   0x0040  /* ad-hoc is not allowed */
+#define IEEE80211_CHAN_DISALLOW_HOSTAP  0x0080  /* Station only channel */
+
+/*
+ * Useful combinations of channel characteristics.
+ */
+#define IEEE80211_CHAN_FHSS \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_GFSK)
+#define IEEE80211_CHAN_A \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM)
+#define IEEE80211_CHAN_B \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK)
+#define IEEE80211_CHAN_PUREG \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM)
+#define IEEE80211_CHAN_G \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN)
+#define IEEE80211_CHAN_108A \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_TURBO)
+#define IEEE80211_CHAN_108G \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_TURBO)
+#define IEEE80211_CHAN_ST \
+	(IEEE80211_CHAN_108A | IEEE80211_CHAN_STURBO)
+
+#define IEEE80211_IS_CHAN_11AC_2G(_c) \
+	(IEEE80211_IS_CHAN_2GHZ((_c)) && IEEE80211_IS_CHAN_VHT((_c)))
+#define IEEE80211_CHAN_11AC_VHT20_2G \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_VHT20)
+#define IEEE80211_CHAN_11AC_VHT40_2G \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS)
+#define IEEE80211_CHAN_11AC_VHT80_2G \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_VHT80)
+
+#define IEEE80211_IS_CHAN_11AC_VHT20_2G(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT20_2G) == IEEE80211_CHAN_11AC_VHT20_2G)
+#define IEEE80211_IS_CHAN_11AC_VHT40_2G(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT40_2G) != 0)
+#define IEEE80211_IS_CHAN_11AC_VHT80_2G(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT80_2G) == IEEE80211_CHAN_11AC_VHT80_2G)
+
+#define IEEE80211_CHAN_11NG_HT20 \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_HT20)
+#define IEEE80211_CHAN_11NA_HT20 \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HT20)
+#define IEEE80211_CHAN_11NG_HT40PLUS \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_HT40PLUS)
+#define IEEE80211_CHAN_11NG_HT40MINUS \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_HT40MINUS)
+#define IEEE80211_CHAN_11NA_HT40PLUS \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HT40PLUS)
+#define IEEE80211_CHAN_11NA_HT40MINUS \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HT40MINUS)
+
+#define IEEE80211_CHAN_ALL \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_GFSK | \
+	 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_DYN | \
+	 IEEE80211_CHAN_HT20 | IEEE80211_CHAN_HT40PLUS | IEEE80211_CHAN_HT40MINUS | \
+	 IEEE80211_CHAN_VHT20 | IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS | IEEE80211_CHAN_VHT80 | \
+	 IEEE80211_CHAN_HALF | IEEE80211_CHAN_QUARTER)
+#define IEEE80211_CHAN_ALLTURBO	\
+	(IEEE80211_CHAN_ALL | IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO)
+
+#define IEEE80211_IS_CHAN_FHSS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_FHSS) == IEEE80211_CHAN_FHSS)
+#define IEEE80211_IS_CHAN_A(_c)	\
+	(((_c)->ic_flags & IEEE80211_CHAN_A) == IEEE80211_CHAN_A)
+#define IEEE80211_IS_CHAN_B(_c)	\
+	(((_c)->ic_flags & IEEE80211_CHAN_B) == IEEE80211_CHAN_B)
+#define IEEE80211_IS_CHAN_PUREG(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_PUREG) == IEEE80211_CHAN_PUREG)
+#define IEEE80211_IS_CHAN_G(_c)	\
+	(((_c)->ic_flags & IEEE80211_CHAN_G) == IEEE80211_CHAN_G)
+#define IEEE80211_IS_CHAN_ANYG(_c) \
+	(IEEE80211_IS_CHAN_PUREG(_c) || IEEE80211_IS_CHAN_G(_c))
+#define IEEE80211_IS_CHAN_ST(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_ST) == IEEE80211_CHAN_ST)
+#define IEEE80211_IS_CHAN_108A(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_108A) == IEEE80211_CHAN_108A)
+#define IEEE80211_IS_CHAN_108G(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_108G) == IEEE80211_CHAN_108G)
+
+#define IEEE80211_IS_CHAN_2GHZ(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_2GHZ) != 0)
+#define IEEE80211_IS_CHAN_5GHZ(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_5GHZ) != 0)
+#define IEEE80211_IS_CHAN_OFDM(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_OFDM) != 0)
+#define IEEE80211_IS_CHAN_CCK(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_CCK) != 0)
+#define IEEE80211_IS_CHAN_GFSK(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_GFSK) != 0)
+#define IEEE80211_IS_CHAN_TURBO(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_TURBO) != 0)
+#define IEEE80211_IS_CHAN_WEATHER_RADAR(_c) \
+	((((_c)->ic_freq >= 5600) && ((_c)->ic_freq <= 5650)) \
+	 || (((_c)->ic_flags & IEEE80211_CHAN_HT40PLUS) && (5580 == (_c)->ic_freq)))
+#define IEEE80211_IS_CHAN_STURBO(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_STURBO) != 0)
+#define IEEE80211_IS_CHAN_DTURBO(_c) \
+	(((_c)->ic_flags & \
+	  (IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO)) == IEEE80211_CHAN_TURBO)
+#define IEEE80211_IS_CHAN_HALF(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_HALF) != 0)
+#define IEEE80211_IS_CHAN_QUARTER(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_QUARTER) != 0)
+#define IEEE80211_IS_CHAN_PASSIVE(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_PASSIVE) != 0)
+
+#define IEEE80211_IS_CHAN_DFS(_c) \
+	(((_c)->ic_flagext & (IEEE80211_CHAN_DFS|IEEE80211_CHAN_DFS_CLEAR)) == IEEE80211_CHAN_DFS)
+#define IEEE80211_IS_CHAN_DFSFLAG(_c) \
+	(((_c)->ic_flagext & IEEE80211_CHAN_DFS) == IEEE80211_CHAN_DFS)
+#define IEEE80211_IS_CHAN_DISALLOW_ADHOC(_c) \
+	(((_c)->ic_flagext & IEEE80211_CHAN_DISALLOW_ADHOC) != 0)
+#define IEEE80211_IS_CHAN_11D_EXCLUDED(_c) \
+	(((_c)->ic_flagext & IEEE80211_CHAN_11D_EXCLUDED) != 0)
+#define IEEE80211_IS_CHAN_CSA(_c) \
+	(((_c)->ic_flagext & IEEE80211_CHAN_CSA_RECEIVED) != 0)
+#define IEEE80211_IS_CHAN_ODD(_c) \
+	(((_c)->ic_freq == 5170) || ((_c)->ic_freq == 5190) || \
+	 ((_c)->ic_freq == 5210) || ((_c)->ic_freq == 5230))
+#define IEEE80211_IS_CHAN_DISALLOW_HOSTAP(_c) \
+	(((_c)->ic_flagext & IEEE80211_CHAN_DISALLOW_HOSTAP) != 0)
+
+#define IEEE80211_IS_CHAN_11NG_HT20(_c)	\
+	(((_c)->ic_flags & IEEE80211_CHAN_11NG_HT20) == IEEE80211_CHAN_11NG_HT20)
+#define IEEE80211_IS_CHAN_11NA_HT20(_c)	\
+	(((_c)->ic_flags & IEEE80211_CHAN_11NA_HT20) == IEEE80211_CHAN_11NA_HT20)
+#define IEEE80211_IS_CHAN_11NG_HT40PLUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11NG_HT40PLUS) == IEEE80211_CHAN_11NG_HT40PLUS)
+#define IEEE80211_IS_CHAN_11NG_HT40MINUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11NG_HT40MINUS) == IEEE80211_CHAN_11NG_HT40MINUS)
+#define IEEE80211_IS_CHAN_11NA_HT40PLUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11NA_HT40PLUS) == IEEE80211_CHAN_11NA_HT40PLUS)
+#define IEEE80211_IS_CHAN_11NA_HT40MINUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11NA_HT40MINUS) == IEEE80211_CHAN_11NA_HT40MINUS)
+
+#define IEEE80211_IS_CHAN_11N(_c) \
+	(((_c)->ic_flags & (IEEE80211_CHAN_HT20 | IEEE80211_CHAN_HT40PLUS | IEEE80211_CHAN_HT40MINUS)) != 0)
+#define IEEE80211_IS_CHAN_11N_HT20(_c) \
+	(((_c)->ic_flags & (IEEE80211_CHAN_HT20)) != 0)
+#define IEEE80211_IS_CHAN_11N_HT40(_c) \
+	(((_c)->ic_flags & (IEEE80211_CHAN_HT40PLUS | IEEE80211_CHAN_HT40MINUS)) != 0)
+#define IEEE80211_IS_CHAN_11NG(_c) \
+	(IEEE80211_IS_CHAN_2GHZ((_c)) && IEEE80211_IS_CHAN_11N((_c)))
+#define IEEE80211_IS_CHAN_11NA(_c) \
+	(IEEE80211_IS_CHAN_5GHZ((_c)) && IEEE80211_IS_CHAN_11N((_c)))
+#define IEEE80211_IS_CHAN_11N_HT40PLUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_HT40PLUS) != 0)
+#define IEEE80211_IS_CHAN_11N_HT40MINUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_HT40MINUS) != 0)
+
+#define IEEE80211_IS_CHAN_HT20_CAPABLE(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_HT20) == IEEE80211_CHAN_HT20)
+#define IEEE80211_IS_CHAN_HT40PLUS_CAPABLE(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_HT40PLUS) == IEEE80211_CHAN_HT40PLUS)
+#define IEEE80211_IS_CHAN_HT40MINUS_CAPABLE(_c)	\
+	(((_c)->ic_flags & IEEE80211_CHAN_HT40MINUS) == IEEE80211_CHAN_HT40MINUS)
+#define IEEE80211_IS_CHAN_HT40_CAPABLE(_c) \
+	(IEEE80211_IS_CHAN_HT40PLUS_CAPABLE(_c) || IEEE80211_IS_CHAN_HT40MINUS_CAPABLE(_c))
+#define IEEE80211_IS_CHAN_HT_CAPABLE(_c) \
+	(IEEE80211_IS_CHAN_HT20_CAPABLE(_c) || IEEE80211_IS_CHAN_HT40_CAPABLE(_c))
+#define IEEE80211_IS_CHAN_11N_CTL_CAPABLE(_c)  IEEE80211_IS_CHAN_HT20_CAPABLE(_c)
+#define IEEE80211_IS_CHAN_11N_CTL_U_CAPABLE(_c)	\
+	(((_c)->ic_flags & IEEE80211_CHAN_HT40PLUS) == IEEE80211_CHAN_HT40PLUS)
+#define IEEE80211_IS_CHAN_11N_CTL_L_CAPABLE(_c)	\
+	(((_c)->ic_flags & IEEE80211_CHAN_HT40MINUS) == IEEE80211_CHAN_HT40MINUS)
+#define IEEE80211_IS_CHAN_11N_CTL_40_CAPABLE(_c) \
+	(IEEE80211_IS_CHAN_11N_CTL_U_CAPABLE((_c)) || IEEE80211_IS_CHAN_11N_CTL_L_CAPABLE((_c)))
+
+#define IEEE80211_IS_CHAN_VHT(_c) \
+	(((_c)->ic_flags & (IEEE80211_CHAN_VHT20 | \
+			    IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS | IEEE80211_CHAN_VHT80)) != 0)
+#define IEEE80211_IS_CHAN_11AC(_c) \
+	( IEEE80211_IS_CHAN_5GHZ((_c)) && IEEE80211_IS_CHAN_VHT((_c)) )
+#define IEEE80211_CHAN_11AC_VHT20 \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT20)
+#define IEEE80211_CHAN_11AC_VHT40 \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS )
+#define IEEE80211_CHAN_11AC_VHT40PLUS \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT40PLUS)
+#define IEEE80211_CHAN_11AC_VHT40MINUS \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT40MINUS)
+#define IEEE80211_CHAN_11AC_VHT80 \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_VHT80)
+#define IEEE80211_IS_CHAN_11AC_VHT20(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT20) == IEEE80211_CHAN_11AC_VHT20)
+
+#define IEEE80211_IS_CHAN_11AC_VHT40(_c) \
+	(((_c)->ic_flags & (IEEE80211_CHAN_VHT40PLUS | IEEE80211_CHAN_VHT40MINUS)) !=0)
+#define IEEE80211_IS_CHAN_11AC_VHT40PLUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT40PLUS) == IEEE80211_CHAN_11AC_VHT40PLUS)
+#define IEEE80211_IS_CHAN_11AC_VHT40MINUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT40MINUS) == IEEE80211_CHAN_11AC_VHT40MINUS)
+#define IEEE80211_IS_CHAN_11AC_VHT80(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11AC_VHT80) == IEEE80211_CHAN_11AC_VHT80)
+
+#define IEEE80211_IS_CHAN_RADAR(_c)    \
+	(((_c)->ic_flags & IEEE80211_CHAN_RADAR_DFS) == IEEE80211_CHAN_RADAR_DFS)
+#define IEEE80211_CHAN_SET_RADAR(_c)	\
+	((_c)->ic_flags |= IEEE80211_CHAN_RADAR_DFS)
+#define IEEE80211_CHAN_CLR_RADAR(_c)	\
+	((_c)->ic_flags &= ~IEEE80211_CHAN_RADAR_DFS)
+#define IEEE80211_CHAN_SET_DISALLOW_ADHOC(_c)	\
+	((_c)->ic_flagext |= IEEE80211_CHAN_DISALLOW_ADHOC)
+#define IEEE80211_CHAN_SET_DISALLOW_HOSTAP(_c)	 \
+	((_c)->ic_flagext |= IEEE80211_CHAN_DISALLOW_HOSTAP)
+#define IEEE80211_CHAN_SET_DFS(_c)  \
+	((_c)->ic_flagext |= IEEE80211_CHAN_DFS)
+#define IEEE80211_CHAN_SET_DFS_CLEAR(_c)  \
+	((_c)->ic_flagext |= IEEE80211_CHAN_DFS_CLEAR)
+#define IEEE80211_CHAN_EXCLUDE_11D(_c)	\
+	((_c)->ic_flagext |= IEEE80211_CHAN_11D_EXCLUDED)
+
+/* channel encoding for FH phy */
+#define IEEE80211_FH_CHANMOD            80
+#define IEEE80211_FH_CHAN(set,pat)      (((set)-1)*IEEE80211_FH_CHANMOD+(pat))
+#define IEEE80211_FH_CHANSET(chan)      ((chan)/IEEE80211_FH_CHANMOD+1)
+#define IEEE80211_FH_CHANPAT(chan)      ((chan)%IEEE80211_FH_CHANMOD)
+
+/*
+ * 802.11 rate set.
+ */
+#define IEEE80211_RATE_SIZE     8       /* 802.11 standard */
+#define IEEE80211_RATE_MAXSIZE  36      /* max rates we'll handle */
+#define IEEE80211_HT_RATE_SIZE  128
+#define IEEE80211_RATE_SINGLE_STREAM_MCS_MAX     7      /* MCS7 */
+
+#define IEEE80211_RATE_MCS      0x8000
+#define IEEE80211_RATE_MCS_VAL  0x7FFF
+
+#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
+
+/*
+ * RSSI range
+ */
+#define IEEE80211_RSSI_MAX           -10        /* in db */
+#define IEEE80211_RSSI_MIN           -200
+
+/*
+ * 11n A-MPDU & A-MSDU limits
+ */
+#define IEEE80211_AMPDU_LIMIT_MIN           (1 * 1024)
+#define IEEE80211_AMPDU_LIMIT_MAX           (64 * 1024 - 1)
+#define IEEE80211_AMPDU_LIMIT_DEFAULT       IEEE80211_AMPDU_LIMIT_MAX
+#define IEEE80211_AMPDU_SUBFRAME_MIN        2
+#define IEEE80211_AMPDU_SUBFRAME_MAX        64
+#define IEEE80211_AMPDU_SUBFRAME_DEFAULT    32
+#define IEEE80211_AMSDU_LIMIT_MAX           4096
+#define IEEE80211_RIFS_AGGR_DIV             10
+#define IEEE80211_MAX_AMPDU_MIN             0
+#define IEEE80211_MAX_AMPDU_MAX             3
+
+/*
+ * 11ac A-MPDU limits
+ */
+#define IEEE80211_VHT_MAX_AMPDU_MIN         0
+#define IEEE80211_VHT_MAX_AMPDU_MAX         7
+
+struct ieee80211_rateset {
+	uint8_t rs_nrates;
+	uint8_t rs_rates[IEEE80211_RATE_MAXSIZE];
+};
+
+struct ieee80211_beacon_info {
+	uint8_t essid[IEEE80211_NWID_LEN + 1];
+	uint8_t esslen;
+	uint8_t rssi_ctl_0;
+	uint8_t rssi_ctl_1;
+	uint8_t rssi_ctl_2;
+	int numchains;
+};
+
+#define IEEE80211_ADDR_LEN  6   /* size of 802.11 address */
+
+struct ieee80211_ibss_peer_list {
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+};
+
+struct ieee80211_roam {
+	int8_t rssi11a;         /* rssi thresh for 11a bss */
+	int8_t rssi11b;         /* for 11g sta in 11b bss */
+	int8_t rssi11bOnly;     /* for 11b sta */
+	uint8_t pad1;
+	uint8_t rate11a;        /* rate thresh for 11a bss */
+	uint8_t rate11b;        /* for 11g sta in 11b bss */
+	uint8_t rate11bOnly;    /* for 11b sta */
+	uint8_t pad2;
+};
+
+#define IEEE80211_TID_SIZE      17      /* total number of TIDs */
+#define IEEE80211_NON_QOS_SEQ   16      /* index for non-QoS (including management) sequence number space */
+#define IEEE80211_SEQ_MASK      0xfff   /* sequence generator mask */
+#define MIN_SW_SEQ              0x100   /* minimum sequence for SW generate packect */
+
+/* crypto related defines*/
+#define IEEE80211_KEYBUF_SIZE   16
+#define IEEE80211_MICBUF_SIZE   (8+8)   /* space for both tx+rx keys */
+
+enum ieee80211_clist_cmd {
+	CLIST_UPDATE,
+	CLIST_DFS_UPDATE,
+	CLIST_NEW_COUNTRY,
+	CLIST_NOL_UPDATE
+};
+
+enum ieee80211_nawds_param {
+	IEEE80211_NAWDS_PARAM_NUM = 0,
+	IEEE80211_NAWDS_PARAM_MODE,
+	IEEE80211_NAWDS_PARAM_DEFCAPS,
+	IEEE80211_NAWDS_PARAM_OVERRIDE,
+};
+
+struct ieee80211_mib_cycle_cnts {
+	uint32_t tx_frame_count;
+	uint32_t rx_frame_count;
+	uint32_t rx_clear_count;
+	uint32_t cycle_count;
+	uint8_t is_rx_active;
+	uint8_t is_tx_active;
+};
+
+struct ieee80211_chanutil_info {
+	uint32_t rx_clear_count;
+	uint32_t cycle_count;
+	uint8_t value;
+	uint32_t beacon_count;
+	uint8_t beacon_intervals;
+};
+
+#endif /* CDS_COMMON__IEEE80211_I_H_ */

+ 215 - 0
core/cds/src/cds_mq.c

@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: cds_mq.c
+ *
+ * Connectivity driver services (CDS) message queue APIs
+ *
+ * Message Queue Definitions and API
+ */
+
+/* Include Files */
+#include <cds_mq.h>
+#include "cds_sched.h"
+#include <cds_api.h>
+#include "sir_types.h"
+
+/* Preprocessor definitions and constants */
+
+/* Type declarations */
+
+/* Function declarations and documenation */
+
+tSirRetStatus u_mac_post_ctrl_msg(void *pSirGlobal, void *pMb);
+
+/**
+ * cds_mq_init() - initialize cds message queue
+ * @pMq: Pointer to the message queue
+ *
+ * This function initializes the Message queue.
+ *
+ * Return: cdf status
+ */
+inline CDF_STATUS cds_mq_init(p_cds_mq_type pMq)
+{
+
+	if (pMq == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	/* Now initialize the lock */
+	spin_lock_init(&pMq->mqLock);
+
+	/* Now initialize the List data structure */
+	INIT_LIST_HEAD(&pMq->mqList);
+
+	return CDF_STATUS_SUCCESS;
+} /* cds_mq_init() */
+
+/**
+ * cds_mq_deinit() - de-initialize cds message queue
+ * @pMq: Pointer to the message queue
+ *
+ * This function de-initializes cds message queue
+ *
+ * Return: none
+ */
+inline void cds_mq_deinit(p_cds_mq_type pMq)
+{
+	if (pMq == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed", __func__);
+		return;
+	}
+
+	/* we don't have to do anything with the embedded list or spinlock */
+} /* cds_mq_deinit() */
+
+/**
+ * cds_mq_put() - add a message to the message queue
+ * @pMq: Pointer to the message queue
+ * @pMsgWrapper: Msg wrapper containing the message
+ *
+ * Return: none
+ */
+inline void cds_mq_put(p_cds_mq_type pMq, p_cds_msg_wrapper pMsgWrapper)
+{
+	unsigned long flags;
+
+	if ((pMq == NULL) || (pMsgWrapper == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&pMq->mqLock, flags);
+
+	list_add_tail(&pMsgWrapper->msgNode, &pMq->mqList);
+
+	spin_unlock_irqrestore(&pMq->mqLock, flags);
+
+} /* cds_mq_put() */
+
+/**
+ * cds_mq_get() - get a message with its wrapper from a message queue
+ * @pMq: Pointer to the message queue
+ *
+ * Return: pointer to the message wrapper
+ */
+inline p_cds_msg_wrapper cds_mq_get(p_cds_mq_type pMq)
+{
+	p_cds_msg_wrapper pMsgWrapper = NULL;
+
+	struct list_head *listptr;
+	unsigned long flags;
+
+	if (pMq == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed", __func__);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&pMq->mqLock, flags);
+
+	if (list_empty(&pMq->mqList)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_WARN,
+			  "%s: CDS Message Queue is empty", __func__);
+	} else {
+		listptr = pMq->mqList.next;
+		pMsgWrapper =
+			(p_cds_msg_wrapper) list_entry(listptr, cds_msg_wrapper,
+						       msgNode);
+		list_del(pMq->mqList.next);
+	}
+
+	spin_unlock_irqrestore(&pMq->mqLock, flags);
+
+	return pMsgWrapper;
+
+} /* cds_mq_get() */
+
+/**
+ * cds_is_mq_empty() - check if the message queue is empty
+ * @pMq: Pointer to the message queue
+ *
+ * Return: true if message queue is emtpy
+ *	   false otherwise
+ */
+inline bool cds_is_mq_empty(p_cds_mq_type pMq)
+{
+	bool state = false;
+	unsigned long flags;
+
+	if (pMq == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+
+	spin_lock_irqsave(&pMq->mqLock, flags);
+	state = list_empty(&pMq->mqList) ? true : false;
+	spin_unlock_irqrestore(&pMq->mqLock, flags);
+
+	return state;
+} /* cds_mq_get() */
+
+/**
+ * cds_send_mb_message_to_mac() - post a message to a message queue
+ * @pBuf: Pointer to buffer allocated by caller
+ *
+ * Return: cdf status
+ */
+CDF_STATUS cds_send_mb_message_to_mac(void *pBuf)
+{
+	CDF_STATUS cdf_ret_status = CDF_STATUS_E_FAILURE;
+	tSirRetStatus sirStatus;
+	v_CONTEXT_t cds_context;
+	void *hHal;
+
+	cds_context = cds_get_global_context();
+	if (NULL == cds_context) {
+		CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
+			  "%s: invalid cds_context", __func__);
+	} else {
+		hHal = cds_get_context(CDF_MODULE_ID_SME);
+		if (NULL == hHal) {
+			CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
+				  "%s: invalid hHal", __func__);
+		} else {
+			sirStatus = u_mac_post_ctrl_msg(hHal, pBuf);
+			if (eSIR_SUCCESS == sirStatus)
+				cdf_ret_status = CDF_STATUS_SUCCESS;
+		}
+	}
+
+	cdf_mem_free(pBuf);
+
+	return cdf_ret_status;
+}

+ 348 - 0
core/cds/src/cds_packet.c

@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**=========================================================================
+
+   \file        cds_packet.c
+
+   \brief       Connectivity driver services (CDS) network Packet APIs
+
+   Network Protocol packet/buffer support interfaces
+
+   ========================================================================*/
+
+/*--------------------------------------------------------------------------
+   Include Files
+   ------------------------------------------------------------------------*/
+#include <cds_packet.h>
+#include <i_cds_packet.h>
+#include <cdf_mc_timer.h>
+#include <cdf_trace.h>
+#include <wlan_hdd_main.h>
+#include "cdf_nbuf.h"
+#include "cdf_memory.h"
+
+#define TX_PKT_MIN_HEADROOM          (64)
+
+/* Protocol specific packet tracking feature */
+#define CDS_PKT_TRAC_ETH_TYPE_OFFSET (12)
+#define CDS_PKT_TRAC_IP_OFFSET       (14)
+#define CDS_PKT_TRAC_IP_HEADER_SIZE  (20)
+#define CDS_PKT_TRAC_DHCP_SRV_PORT   (67)
+#define CDS_PKT_TRAC_DHCP_CLI_PORT   (68)
+#define CDS_PKT_TRAC_EAPOL_ETH_TYPE  (0x888E)
+#ifdef QCA_PKT_PROTO_TRACE
+#define CDS_PKT_TRAC_MAX_STRING_LEN  (12)
+#define CDS_PKT_TRAC_MAX_TRACE_BUF   (50)
+#define CDS_PKT_TRAC_MAX_STRING_BUF  (64)
+
+/* protocol Storage Structure */
+typedef struct {
+	uint32_t order;
+	v_TIME_t event_time;
+	char event_string[CDS_PKT_TRAC_MAX_STRING_LEN];
+} cds_pkt_proto_trace_t;
+
+cds_pkt_proto_trace_t *trace_buffer = NULL;
+unsigned int trace_buffer_order = 0;
+cdf_spinlock_t trace_buffer_lock;
+#endif /* QCA_PKT_PROTO_TRACE */
+
+/**
+ * cds_pkt_return_packet  Free the cds Packet
+ * @ cds Packet
+ */
+CDF_STATUS cds_pkt_return_packet(cds_pkt_t *packet)
+{
+	/* Validate the input parameter pointer */
+	if (unlikely(packet == NULL)) {
+		return CDF_STATUS_E_INVAL;
+	}
+
+	/* Free up the Adf nbuf */
+	cdf_nbuf_free(packet->pkt_buf);
+
+	packet->pkt_buf = NULL;
+
+	/* Free up the Rx packet */
+	cdf_mem_free(packet);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**--------------------------------------------------------------------------
+
+   \brief cds_pkt_get_packet_length() - Get packet length for a cds Packet
+
+   This API returns the total length of the data in a cds Packet.
+
+   \param pPacket - the cds Packet to get the packet length from.
+
+   \param pPacketSize - location to return the total size of the data contained
+                       in the cds Packet.
+   \return
+
+   \sa
+
+   ---------------------------------------------------------------------------*/
+CDF_STATUS
+cds_pkt_get_packet_length(cds_pkt_t *pPacket, uint16_t *pPacketSize)
+{
+	/* Validate the parameter pointers */
+	if (unlikely((pPacket == NULL) || (pPacketSize == NULL)) ||
+	    (pPacket->pkt_buf == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "VPKT [%d]: NULL pointer", __LINE__);
+		return CDF_STATUS_E_INVAL;
+	}
+	/* return the requested information */
+	*pPacketSize = cdf_nbuf_len(pPacket->pkt_buf);
+	return CDF_STATUS_SUCCESS;
+}
+
+/*---------------------------------------------------------------------------
+* @brief cds_pkt_get_proto_type() -
+      Find protoco type from packet contents
+
+* skb Packet Pointer
+* tracking_map packet type want to track
+* dot11_type, type of dot11 frame
+   ---------------------------------------------------------------------------*/
+uint8_t cds_pkt_get_proto_type(struct sk_buff *skb, uint8_t tracking_map,
+			       uint8_t dot11_type)
+{
+	uint8_t pkt_proto_type = 0;
+	uint16_t ether_type;
+	uint16_t SPort;
+	uint16_t DPort;
+
+	if (dot11_type) {
+		if (dot11_type ==
+		    (CDS_PKT_TRAC_TYPE_MGMT_ACTION & tracking_map))
+			pkt_proto_type |= CDS_PKT_TRAC_TYPE_MGMT_ACTION;
+
+		/* Protocol type map */
+		return pkt_proto_type;
+	}
+
+	/* EAPOL Tracking enabled */
+	if (CDS_PKT_TRAC_TYPE_EAPOL & tracking_map) {
+		ether_type = (uint16_t) (*(uint16_t *)
+					 (skb->data +
+					  CDS_PKT_TRAC_ETH_TYPE_OFFSET));
+		if (CDS_PKT_TRAC_EAPOL_ETH_TYPE == CDF_SWAP_U16(ether_type)) {
+			pkt_proto_type |= CDS_PKT_TRAC_TYPE_EAPOL;
+		}
+	}
+
+	/* DHCP Tracking enabled */
+	if (CDS_PKT_TRAC_TYPE_DHCP & tracking_map) {
+		SPort = (uint16_t) (*(uint16_t *)
+				    (skb->data + CDS_PKT_TRAC_IP_OFFSET +
+				     CDS_PKT_TRAC_IP_HEADER_SIZE));
+		DPort = (uint16_t) (*(uint16_t *)
+				    (skb->data + CDS_PKT_TRAC_IP_OFFSET +
+				     CDS_PKT_TRAC_IP_HEADER_SIZE +
+				     sizeof(uint16_t)));
+		if (((CDS_PKT_TRAC_DHCP_SRV_PORT == CDF_SWAP_U16(SPort))
+		     && (CDS_PKT_TRAC_DHCP_CLI_PORT == CDF_SWAP_U16(DPort)))
+		    || ((CDS_PKT_TRAC_DHCP_CLI_PORT == CDF_SWAP_U16(SPort))
+			&& (CDS_PKT_TRAC_DHCP_SRV_PORT == CDF_SWAP_U16(DPort)))) {
+			pkt_proto_type |= CDS_PKT_TRAC_TYPE_DHCP;
+		}
+	}
+
+	/* Protocol type map */
+	return pkt_proto_type;
+}
+
+#ifdef QCA_PKT_PROTO_TRACE
+/*---------------------------------------------------------------------------
+* @brief cds_pkt_trace_buf_update() -
+      Update storage buffer with interest event string
+
+* event_string Event String may packet type or outstanding event
+   ---------------------------------------------------------------------------*/
+void cds_pkt_trace_buf_update(char *event_string)
+{
+	uint32_t slot;
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s %d, %s", __func__, __LINE__, event_string);
+	cdf_spinlock_acquire(&trace_buffer_lock);
+	slot = trace_buffer_order % CDS_PKT_TRAC_MAX_TRACE_BUF;
+	trace_buffer[slot].order = trace_buffer_order;
+	trace_buffer[slot].event_time = cdf_mc_timer_get_system_time();
+	cdf_mem_zero(trace_buffer[slot].event_string,
+		     sizeof(trace_buffer[slot].event_string));
+	cdf_mem_copy(trace_buffer[slot].event_string,
+		     event_string,
+		     (CDS_PKT_TRAC_MAX_STRING_LEN < strlen(event_string)) ?
+		     CDS_PKT_TRAC_MAX_STRING_LEN : strlen(event_string));
+	trace_buffer_order++;
+	cdf_spinlock_release(&trace_buffer_lock);
+
+	return;
+}
+
+/*---------------------------------------------------------------------------
+* @brief cds_pkt_trace_buf_dump() -
+      Dump stored information into kernel log
+   ---------------------------------------------------------------------------*/
+void cds_pkt_trace_buf_dump(void)
+{
+	uint32_t slot, idx;
+
+	cdf_spinlock_acquire(&trace_buffer_lock);
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "PACKET TRACE DUMP START Current Timestamp %u",
+		  (unsigned int)cdf_mc_timer_get_system_time());
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "ORDER :        TIME : EVT");
+	if (CDS_PKT_TRAC_MAX_TRACE_BUF > trace_buffer_order) {
+		for (slot = 0; slot < trace_buffer_order; slot++) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%5d :%12u : %s",
+				  trace_buffer[slot].order,
+				  (unsigned int)trace_buffer[slot].event_time,
+				  trace_buffer[slot].event_string);
+		}
+	} else {
+		for (idx = 0; idx < CDS_PKT_TRAC_MAX_TRACE_BUF; idx++) {
+			slot =
+				(trace_buffer_order +
+				 idx) % CDS_PKT_TRAC_MAX_TRACE_BUF;
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%5d :%12u : %s", trace_buffer[slot].order,
+				  (unsigned int)trace_buffer[slot].event_time,
+				  trace_buffer[slot].event_string);
+		}
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "PACKET TRACE DUMP END");
+	cdf_spinlock_release(&trace_buffer_lock);
+
+	return;
+}
+
+/*---------------------------------------------------------------------------
+* @brief cds_pkt_proto_trace_init() -
+      Initialize protocol trace functionality, allocate required resource
+   ---------------------------------------------------------------------------*/
+void cds_pkt_proto_trace_init(void)
+{
+	/* Init spin lock to protect global memory */
+	cdf_spinlock_init(&trace_buffer_lock);
+	trace_buffer_order = 0;
+	trace_buffer =
+		cdf_mem_malloc(CDS_PKT_TRAC_MAX_TRACE_BUF *
+			       sizeof(cds_pkt_proto_trace_t));
+	cdf_mem_zero((void *)trace_buffer,
+		     CDS_PKT_TRAC_MAX_TRACE_BUF *
+		     sizeof(cds_pkt_proto_trace_t));
+
+	/* Register callback function to NBUF
+	 * Lower layer event also will be reported to here */
+	cdf_nbuf_reg_trace_cb(cds_pkt_trace_buf_update);
+	return;
+}
+
+/*---------------------------------------------------------------------------
+* @brief cds_pkt_proto_trace_close() -
+      Free required resource
+   ---------------------------------------------------------------------------*/
+void cds_pkt_proto_trace_close(void)
+{
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "%s %d", __func__, __LINE__);
+	cdf_mem_free(trace_buffer);
+	cdf_spinlock_destroy(&trace_buffer_lock);
+
+	return;
+}
+#endif /* QCA_PKT_PROTO_TRACE */
+
+#ifdef MEMORY_DEBUG
+/*---------------------------------------------------------------------------
+* @brief cds_packet_alloc_debug() -
+      Allocate a network buffer for TX
+   ---------------------------------------------------------------------------*/
+CDF_STATUS cds_packet_alloc_debug(uint16_t size, void **data, void **ppPacket,
+				  uint8_t *file_name, uint32_t line_num)
+{
+	CDF_STATUS cdf_ret_status = CDF_STATUS_E_FAILURE;
+	cdf_nbuf_t nbuf;
+
+	nbuf =
+		cdf_nbuf_alloc_debug(NULL, roundup(size + TX_PKT_MIN_HEADROOM, 4),
+				     TX_PKT_MIN_HEADROOM, sizeof(uint32_t), false,
+				     file_name, line_num);
+
+	if (nbuf != NULL) {
+		cdf_nbuf_put_tail(nbuf, size);
+		cdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL);
+		*ppPacket = nbuf;
+		*data = cdf_nbuf_data(nbuf);
+		cdf_ret_status = CDF_STATUS_SUCCESS;
+	}
+
+	return cdf_ret_status;
+}
+#else
+/*---------------------------------------------------------------------------
+* @brief cds_packet_alloc() -
+      Allocate a network buffer for TX
+   ---------------------------------------------------------------------------*/
+CDF_STATUS cds_packet_alloc(uint16_t size, void **data, void **ppPacket)
+{
+	CDF_STATUS cdf_ret_status = CDF_STATUS_E_FAILURE;
+	cdf_nbuf_t nbuf;
+
+	nbuf = cdf_nbuf_alloc(NULL, roundup(size + TX_PKT_MIN_HEADROOM, 4),
+			      TX_PKT_MIN_HEADROOM, sizeof(uint32_t), false);
+
+	if (nbuf != NULL) {
+		cdf_nbuf_put_tail(nbuf, size);
+		cdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL);
+		*ppPacket = nbuf;
+		*data = cdf_nbuf_data(nbuf);
+		cdf_ret_status = CDF_STATUS_SUCCESS;
+	}
+
+	return cdf_ret_status;
+}
+
+#endif
+/*---------------------------------------------------------------------------
+* @brief cds_packet_free() -
+      Free input network buffer
+   ---------------------------------------------------------------------------*/
+void cds_packet_free(void *pPacket)
+{
+	cdf_nbuf_free((cdf_nbuf_t) pPacket);
+}

+ 1439 - 0
core/cds/src/cds_reg_service.c

@@ -0,0 +1,1439 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*============================================================================
+   FILE:         cds_reg_service.c
+   OVERVIEW:     This source file contains definitions for CDS regulatory APIs
+   DEPENDENCIES: None
+   ============================================================================*/
+
+#include <net/cfg80211.h>
+#include "cdf_types.h"
+#include "cds_reg_service.h"
+#include "cdf_trace.h"
+#include "sme_api.h"
+#include "wlan_hdd_main.h"
+#include "cds_regdomain.h"
+#include "cds_regdomain_common.h"
+
+#define WORLD_SKU_MASK          0x00F0
+#define WORLD_SKU_PREFIX        0x0060
+#define MAX_COUNTRY_COUNT       300
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+#define IEEE80211_CHAN_PASSIVE_SCAN IEEE80211_CHAN_NO_IR
+#define IEEE80211_CHAN_NO_IBSS IEEE80211_CHAN_NO_IR
+#endif
+
+static v_REGDOMAIN_t temp_reg_domain = REGDOMAIN_COUNT;
+
+/* true if init happens thru init time driver hint */
+static bool init_by_driver = false;
+/* true if init happens thru init time  callback from regulatory core.
+   this should be set to true during driver reload */
+static bool init_by_reg_core = false;
+
+#define REG_WAIT_TIME            50
+
+#define REG_RULE_2412_2462    REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+
+#define REG_RULE_2467_2472    REG_RULE(2467-10, 2472+10, 40, 0, 20, \
+			      NL80211_RRF_PASSIVE_SCAN)
+
+#define REG_RULE_2484         REG_RULE(2484-10, 2484+10, 40, 0, 20, \
+		       NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
+
+#define REG_RULE_5180_5320    REG_RULE(5180-10, 5320+10, 80, 0, 20, \
+		NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+
+#define REG_RULE_5500_5720    REG_RULE(5500-10, 5720+10, 80, 0, 20, \
+		NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+
+#define REG_RULE_5745_5925    REG_RULE(5745-10, 5925+10, 80, 0, 20, \
+		NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+
+static const struct ieee80211_regdomain cds_world_regdom_60_61_62 = {
+	.n_reg_rules = 6,
+	.alpha2 =  "00",
+	.reg_rules = {
+		REG_RULE_2412_2462,
+		REG_RULE_2467_2472,
+		REG_RULE_2484,
+		REG_RULE_5180_5320,
+		REG_RULE_5500_5720,
+		REG_RULE_5745_5925,
+	}
+};
+
+static const struct ieee80211_regdomain cds_world_regdom_63_65 = {
+	.n_reg_rules = 4,
+	.alpha2 =  "00",
+	.reg_rules = {
+		REG_RULE_2412_2462,
+		REG_RULE_2467_2472,
+		REG_RULE_5180_5320,
+		REG_RULE_5745_5925,
+	}
+};
+
+static const struct ieee80211_regdomain cds_world_regdom_64 = {
+	.n_reg_rules = 3,
+	.alpha2 =  "00",
+	.reg_rules = {
+		REG_RULE_2412_2462,
+		REG_RULE_5180_5320,
+		REG_RULE_5745_5925,
+	}
+};
+
+static const struct ieee80211_regdomain cds_world_regdom_66_69 = {
+	.n_reg_rules = 4,
+	.alpha2 =  "00",
+	.reg_rules = {
+		REG_RULE_2412_2462,
+		REG_RULE_5180_5320,
+		REG_RULE_5500_5720,
+		REG_RULE_5745_5925,
+	}
+};
+
+static const struct ieee80211_regdomain cds_world_regdom_67_68_6A_6C = {
+	.n_reg_rules = 5,
+	.alpha2 =  "00",
+	.reg_rules = {
+		REG_RULE_2412_2462,
+		REG_RULE_2467_2472,
+		REG_RULE_5180_5320,
+		REG_RULE_5500_5720,
+		REG_RULE_5745_5925,
+	}
+};
+
+typedef struct {
+	uint8_t regDomain;
+	country_code_t countryCode;
+} CountryInfo_t;
+
+typedef struct {
+	uint16_t countryCount;
+	CountryInfo_t countryInfo[MAX_COUNTRY_COUNT];
+} CountryInfoTable_t;
+
+static CountryInfoTable_t country_info_table = {
+	/* the first entry in the table is always the world domain */
+	138,
+	{
+		{REGDOMAIN_WORLD, {'0', '0'}}, /* WORLD DOMAIN */
+		{REGDOMAIN_FCC, {'A', 'D'}}, /* ANDORRA */
+		{REGDOMAIN_ETSI, {'A', 'E'}}, /* UAE */
+		{REGDOMAIN_ETSI, {'A', 'L'}}, /* ALBANIA */
+		{REGDOMAIN_ETSI, {'A', 'M'}}, /* ARMENIA */
+		{REGDOMAIN_ETSI, {'A', 'N'}}, /* NETHERLANDS ANTILLES */
+		{REGDOMAIN_FCC, {'A', 'R'}}, /* ARGENTINA */
+		{REGDOMAIN_FCC, {'A', 'S'}}, /* AMERICAN SOMOA */
+		{REGDOMAIN_ETSI, {'A', 'T'}}, /* AUSTRIA */
+		{REGDOMAIN_FCC, {'A', 'U'}}, /* AUSTRALIA */
+		{REGDOMAIN_ETSI, {'A', 'W'}}, /* ARUBA */
+		{REGDOMAIN_ETSI, {'A', 'Z'}}, /* AZERBAIJAN */
+		{REGDOMAIN_ETSI, {'B', 'A'}}, /* BOSNIA AND HERZEGOVINA */
+		{REGDOMAIN_FCC, {'B', 'B'}}, /* BARBADOS */
+		{REGDOMAIN_ETSI, {'B', 'D'}}, /* BANGLADESH */
+		{REGDOMAIN_ETSI, {'B', 'E'}}, /* BELGIUM */
+		{REGDOMAIN_ETSI, {'B', 'G'}}, /* BULGARIA */
+		{REGDOMAIN_ETSI, {'B', 'H'}}, /* BAHRAIN */
+		{REGDOMAIN_ETSI, {'B', 'L'}}, /* */
+		{REGDOMAIN_FCC, {'B', 'M'}}, /* BERMUDA */
+		{REGDOMAIN_ETSI, {'B', 'N'}}, /* BRUNEI DARUSSALAM */
+		{REGDOMAIN_ETSI, {'B', 'O'}}, /* BOLIVIA */
+		{REGDOMAIN_ETSI, {'B', 'R'}}, /* BRAZIL */
+		{REGDOMAIN_FCC, {'B', 'S'}}, /* BAHAMAS */
+		{REGDOMAIN_ETSI, {'B', 'Y'}}, /* BELARUS */
+		{REGDOMAIN_ETSI, {'B', 'Z'}}, /* BELIZE */
+		{REGDOMAIN_FCC, {'C', 'A'}}, /* CANADA */
+		{REGDOMAIN_ETSI, {'C', 'H'}}, /* SWITZERLAND */
+		{REGDOMAIN_ETSI, {'C', 'L'}}, /* CHILE */
+		{REGDOMAIN_FCC, {'C', 'N'}}, /* CHINA */
+		{REGDOMAIN_FCC, {'C', 'O'}}, /* COLOMBIA */
+		{REGDOMAIN_ETSI, {'C', 'R'}}, /* COSTA RICA */
+		{REGDOMAIN_ETSI, {'C', 'S'}},
+		{REGDOMAIN_ETSI, {'C', 'Y'}}, /* CYPRUS */
+		{REGDOMAIN_ETSI, {'C', 'Z'}}, /* CZECH REPUBLIC */
+		{REGDOMAIN_ETSI, {'D', 'E'}}, /* GERMANY */
+		{REGDOMAIN_ETSI, {'D', 'K'}}, /* DENMARK */
+		{REGDOMAIN_FCC, {'D', 'O'}}, /* DOMINICAN REPUBLIC */
+		{REGDOMAIN_ETSI, {'D', 'Z'}}, /* ALGERIA */
+		{REGDOMAIN_ETSI, {'E', 'C'}}, /* ECUADOR */
+		{REGDOMAIN_ETSI, {'E', 'E'}}, /* ESTONIA */
+		{REGDOMAIN_ETSI, {'E', 'G'}}, /* EGYPT */
+		{REGDOMAIN_ETSI, {'E', 'S'}}, /* SPAIN */
+		{REGDOMAIN_ETSI, {'F', 'I'}}, /* FINLAND */
+		{REGDOMAIN_ETSI, {'F', 'R'}}, /* FRANCE */
+		{REGDOMAIN_ETSI, {'G', 'B'}}, /* UNITED KINGDOM */
+		{REGDOMAIN_FCC, {'G', 'D'}}, /* GRENADA */
+		{REGDOMAIN_ETSI, {'G', 'E'}}, /* GEORGIA */
+		{REGDOMAIN_ETSI, {'G', 'F'}}, /* FRENCH GUIANA */
+		{REGDOMAIN_ETSI, {'G', 'L'}}, /* GREENLAND */
+		{REGDOMAIN_ETSI, {'G', 'P'}}, /* GUADELOUPE */
+		{REGDOMAIN_ETSI, {'G', 'R'}}, /* GREECE */
+		{REGDOMAIN_FCC, {'G', 'T'}}, /* GUATEMALA */
+		{REGDOMAIN_FCC, {'G', 'U'}}, /* GUAM */
+		{REGDOMAIN_ETSI, {'H', 'U'}}, /* HUNGARY */
+		{REGDOMAIN_FCC, {'I', 'D'}}, /* INDONESIA */
+		{REGDOMAIN_ETSI, {'I', 'E'}}, /* IRELAND */
+		{REGDOMAIN_ETSI, {'I', 'L'}}, /* ISRAEL */
+		{REGDOMAIN_ETSI, {'I', 'N'}}, /* INDIA */
+		{REGDOMAIN_ETSI, {'I', 'R'}}, /* IRAN, ISLAMIC REPUBLIC OF */
+		{REGDOMAIN_ETSI, {'I', 'S'}}, /* ICELNAD */
+		{REGDOMAIN_ETSI, {'I', 'T'}}, /* ITALY */
+		{REGDOMAIN_FCC, {'J', 'M'}}, /* JAMAICA */
+		{REGDOMAIN_JAPAN, {'J', 'P'}}, /* JAPAN */
+		{REGDOMAIN_ETSI, {'J', 'O'}}, /* JORDAN */
+		{REGDOMAIN_ETSI, {'K', 'E'}}, /* KENYA */
+		{REGDOMAIN_ETSI, {'K', 'H'}}, /* CAMBODIA */
+		{REGDOMAIN_ETSI, {'K', 'P'}}, /* KOREA, DEMOCRATIC PEOPLE's REPUBLIC OF */
+		{REGDOMAIN_ETSI, {'K', 'R'}}, /* KOREA, REPUBLIC OF */
+		{REGDOMAIN_ETSI, {'K', 'W'}}, /* KUWAIT */
+		{REGDOMAIN_ETSI, {'K', 'Z'}}, /* KAZAKHSTAN */
+		{REGDOMAIN_ETSI, {'L', 'B'}}, /* LEBANON */
+		{REGDOMAIN_ETSI, {'L', 'I'}}, /* LIECHTENSTEIN */
+		{REGDOMAIN_ETSI, {'L', 'K'}}, /* SRI-LANKA */
+		{REGDOMAIN_ETSI, {'L', 'T'}}, /* LITHUANIA */
+		{REGDOMAIN_ETSI, {'L', 'U'}}, /* LUXEMBOURG */
+		{REGDOMAIN_ETSI, {'L', 'V'}}, /* LATVIA */
+		{REGDOMAIN_ETSI, {'M', 'A'}}, /* MOROCCO */
+		{REGDOMAIN_ETSI, {'M', 'C'}}, /* MONACO */
+		{REGDOMAIN_ETSI, {'M', 'K'}}, /* MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF */
+		{REGDOMAIN_FCC, {'M', 'N'}}, /* MONGOLIA */
+		{REGDOMAIN_FCC, {'M', 'O'}}, /* MACAO */
+		{REGDOMAIN_FCC, {'M', 'P'}}, /* NORTHERN MARIANA ISLANDS */
+		{REGDOMAIN_ETSI, {'M', 'Q'}}, /* MARTINIQUE */
+		{REGDOMAIN_FCC, {'M', 'T'}}, /* MALTA */
+		{REGDOMAIN_ETSI, {'M', 'U'}}, /* MAURITIUS */
+		{REGDOMAIN_ETSI, {'M', 'W'}}, /* MALAWI */
+		{REGDOMAIN_FCC, {'M', 'X'}}, /* MEXICO */
+		{REGDOMAIN_ETSI, {'M', 'Y'}}, /* MALAYSIA */
+		{REGDOMAIN_ETSI, {'N', 'G'}}, /* NIGERIA */
+		{REGDOMAIN_FCC, {'N', 'I'}}, /* NICARAGUA */
+		{REGDOMAIN_ETSI, {'N', 'L'}}, /* NETHERLANDS */
+		{REGDOMAIN_ETSI, {'N', 'O'}}, /* NORWAY */
+		{REGDOMAIN_ETSI, {'N', 'P'}}, /* NEPAL */
+		{REGDOMAIN_FCC, {'N', 'Z'}}, /* NEW-ZEALAND */
+		{REGDOMAIN_FCC, {'O', 'M'}}, /* OMAN */
+		{REGDOMAIN_FCC, {'P', 'A'}}, /* PANAMA */
+		{REGDOMAIN_ETSI, {'P', 'E'}}, /* PERU */
+		{REGDOMAIN_ETSI, {'P', 'F'}}, /* FRENCH POLYNESIA */
+		{REGDOMAIN_ETSI, {'P', 'G'}}, /* PAPUA NEW GUINEA */
+		{REGDOMAIN_FCC, {'P', 'H'}}, /* PHILIPPINES */
+		{REGDOMAIN_ETSI, {'P', 'K'}}, /* PAKISTAN */
+		{REGDOMAIN_ETSI, {'P', 'L'}}, /* POLAND */
+		{REGDOMAIN_FCC, {'P', 'R'}}, /* PUERTO RICO */
+		{REGDOMAIN_FCC, {'P', 'S'}}, /* PALESTINIAN TERRITORY, OCCUPIED */
+		{REGDOMAIN_ETSI, {'P', 'T'}}, /* PORTUGAL */
+		{REGDOMAIN_FCC, {'P', 'Y'}}, /* PARAGUAY */
+		{REGDOMAIN_ETSI, {'Q', 'A'}}, /* QATAR */
+		{REGDOMAIN_ETSI, {'R', 'E'}}, /* REUNION */
+		{REGDOMAIN_ETSI, {'R', 'O'}}, /* ROMAINIA */
+		{REGDOMAIN_ETSI, {'R', 'S'}}, /* SERBIA */
+		{REGDOMAIN_ETSI, {'R', 'U'}}, /* RUSSIA */
+		{REGDOMAIN_FCC, {'R', 'W'}}, /* RWANDA */
+		{REGDOMAIN_ETSI, {'S', 'A'}}, /* SAUDI ARABIA */
+		{REGDOMAIN_ETSI, {'S', 'E'}}, /* SWEDEN */
+		{REGDOMAIN_ETSI, {'S', 'G'}}, /* SINGAPORE */
+		{REGDOMAIN_ETSI, {'S', 'I'}}, /* SLOVENNIA */
+		{REGDOMAIN_ETSI, {'S', 'K'}}, /* SLOVAKIA */
+		{REGDOMAIN_ETSI, {'S', 'V'}}, /* EL SALVADOR */
+		{REGDOMAIN_ETSI, {'S', 'Y'}}, /* SYRIAN ARAB REPUBLIC */
+		{REGDOMAIN_ETSI, {'T', 'H'}}, /* THAILAND */
+		{REGDOMAIN_ETSI, {'T', 'N'}}, /* TUNISIA */
+		{REGDOMAIN_ETSI, {'T', 'R'}}, /* TURKEY */
+		{REGDOMAIN_ETSI, {'T', 'T'}}, /* TRINIDAD AND TOBAGO */
+		{REGDOMAIN_FCC, {'T', 'W'}}, /* TAIWAN, PRIVINCE OF CHINA */
+		{REGDOMAIN_FCC, {'T', 'Z'}}, /* TANZANIA, UNITED REPUBLIC OF */
+		{REGDOMAIN_ETSI, {'U', 'A'}}, /* UKRAINE */
+		{REGDOMAIN_ETSI, {'U', 'G'}}, /* UGANDA */
+		{REGDOMAIN_FCC, {'U', 'S'}}, /* USA */
+		{REGDOMAIN_ETSI, {'U', 'Y'}}, /* URUGUAY */
+		{REGDOMAIN_FCC, {'U', 'Z'}}, /* UZBEKISTAN */
+		{REGDOMAIN_ETSI, {'V', 'E'}}, /* VENEZUELA */
+		{REGDOMAIN_FCC, {'V', 'I'}}, /* VIRGIN ISLANDS, US */
+		{REGDOMAIN_ETSI, {'V', 'N'}}, /* VIETNAM */
+		{REGDOMAIN_ETSI, {'Y', 'E'}}, /* YEMEN */
+		{REGDOMAIN_ETSI, {'Y', 'T'}}, /* MAYOTTE */
+		{REGDOMAIN_ETSI, {'Z', 'A'}}, /* SOUTH AFRICA */
+		{REGDOMAIN_ETSI, {'Z', 'W'}}, /* ZIMBABWE */
+	}
+};
+
+const tRfChannelProps rf_channels[NUM_RF_CHANNELS] = {
+	{2412, 1},
+	{2417, 2},
+	{2422, 3},
+	{2427, 4},
+	{2432, 5},
+	{2437, 6},
+	{2442, 7},
+	{2447, 8},
+	{2452, 9},
+	{2457, 10},
+	{2462, 11},
+	{2467, 12},
+	{2472, 13},
+	{2484, 14},
+	{4920, 240},
+	{4940, 244},
+	{4960, 248},
+	{4980, 252},
+	{5040, 208},
+	{5060, 212},
+	{5080, 216},
+	{5180, 36},
+	{5200, 40},
+	{5220, 44},
+	{5240, 48},
+	{5260, 52},
+	{5280, 56},
+	{5300, 60},
+	{5320, 64},
+	{5500, 100},
+	{5520, 104},
+	{5540, 108},
+	{5560, 112},
+	{5580, 116},
+	{5600, 120},
+	{5620, 124},
+	{5640, 128},
+	{5660, 132},
+	{5680, 136},
+	{5700, 140},
+	{5720, 144},
+	{5745, 149},
+	{5765, 153},
+	{5785, 157},
+	{5805, 161},
+	{5825, 165},
+	{5852, 170},
+	{5855, 171},
+	{5860, 172},
+	{5865, 173},
+	{5870, 174},
+	{5875, 175},
+	{5880, 176},
+	{5885, 177},
+	{5890, 178},
+	{5895, 179},
+	{5900, 180},
+	{5905, 181},
+	{5910, 182},
+	{5915, 183},
+	{5920, 184},
+	{2422, 3},
+	{2427, 4},
+	{2432, 5},
+	{2437, 6},
+	{2442, 7},
+	{2447, 8},
+	{2452, 9},
+	{2457, 10},
+	{2462, 11},
+	{4930, 242},
+	{4950, 246},
+	{4970, 250},
+	{5050, 210},
+	{5070, 214},
+	{5190, 38},
+	{5210, 42},
+	{5230, 46},
+	{5250, 50},
+	{5270, 54},
+	{5290, 58},
+	{5310, 62},
+	{5510, 102},
+	{5530, 106},
+	{5550, 110},
+	{5570, 114},
+	{5590, 118},
+	{5610, 122},
+	{5630, 126},
+	{5650, 130},
+	{5670, 134},
+	{5690, 138},
+	{5710, 142},
+	{5755, 151},
+	{5775, 155},
+	{5795, 159},
+	{5815, 163,                },
+};
+
+static t_reg_table reg_table;
+
+const sRegulatoryChannel *reg_channels =
+	reg_table.regDomains[0].channels;
+
+
+/**
+ * cds_is_wwr_sku() - is regdomain world sku
+ * @regd: integer regulatory domain
+ *
+ * Return: bool
+ */
+static inline bool cds_is_wwr_sku(u16 regd)
+{
+	return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
+	       (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
+		(regd == WORLD));
+}
+
+/**
+ * cds_is_world_regdomain() - whether world regdomain
+ * @regd: integer regulatory domain
+ *
+ * Return: bool
+ */
+bool cds_is_world_regdomain(uint32_t regd)
+{
+	return cds_is_wwr_sku(regd & ~WORLDWIDE_ROAMING_FLAG);
+}
+
+
+/**
+ * cds_world_regdomain() - which constant world regdomain
+ * @reg: regulatory data
+ *
+ * Return: regdomain ptr
+ */
+static const struct ieee80211_regdomain
+*cds_world_regdomain(struct regulatory *reg)
+{
+	REG_DMN_PAIR_MAPPING *regpair =
+		(REG_DMN_PAIR_MAPPING *)reg->regpair;
+
+	switch (regpair->regDmnEnum) {
+	case 0x60:
+	case 0x61:
+	case 0x62:
+		return &cds_world_regdom_60_61_62;
+	case 0x63:
+	case 0x65:
+		return &cds_world_regdom_63_65;
+	case 0x64:
+		return &cds_world_regdom_64;
+	case 0x66:
+	case 0x69:
+		return &cds_world_regdom_66_69;
+	case 0x67:
+	case 0x68:
+	case 0x6A:
+	case 0x6C:
+		return &cds_world_regdom_67_68_6A_6C;
+	default:
+		WARN_ON(1);
+		return &cds_world_regdom_60_61_62;
+	}
+}
+
+/**
+ * cds_regulatory_wiphy_init() - regulatory wiphy init
+ * @hdd_ctx: hdd context
+ * @reg: regulatory data
+ * @wiphy: wiphy structure
+ *
+ * Return: int
+ */
+static int cds_regulatory_wiphy_init(hdd_context_t *hdd_ctx,
+				     struct regulatory *reg,
+				     struct wiphy *wiphy)
+{
+	const struct ieee80211_regdomain *reg_domain;
+
+	if (cds_is_world_regdomain(reg->reg_domain)) {
+		reg_domain = cds_world_regdomain(reg);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+		wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+#else
+		wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif
+	} else if (hdd_ctx->config->fRegChangeDefCountry) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+		wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+#else
+		wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif
+		reg_domain = &cds_world_regdom_60_61_62;
+	} else {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+		wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+#else
+		wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
+#endif
+		reg_domain = &cds_world_regdom_60_61_62;
+	}
+
+	/*
+	 * save the original driver regulatory flags
+	 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+	hdd_ctx->reg.reg_flags = wiphy->regulatory_flags;
+#else
+	hdd_ctx->reg.reg_flags = wiphy->flags;
+#endif
+	wiphy_apply_custom_regulatory(wiphy, reg_domain);
+
+	/*
+	 * restore the driver regulatory flags since
+	 * wiphy_apply_custom_regulatory may have
+	 * changed them
+	 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+	wiphy->regulatory_flags = hdd_ctx->reg.reg_flags;
+#else
+	wiphy->flags = hdd_ctx->reg.reg_flags;
+#endif
+
+	return 0;
+}
+
+/**
+ * cds_update_regulatory_info() - update regulatory info
+ * @hdd_ctx: hdd context
+ *
+ * Return: CDF_STATUS
+ */
+static void cds_update_regulatory_info(hdd_context_t *hdd_ctx)
+{
+	uint32_t country_code;
+
+	country_code = cds_get_country_from_alpha2(hdd_ctx->reg.alpha2);
+
+	hdd_ctx->reg.reg_domain = COUNTRY_ERD_FLAG;
+	hdd_ctx->reg.reg_domain |= country_code;
+
+	cds_fill_some_regulatory_info(&hdd_ctx->reg);
+
+	return;
+}
+
+
+/**
+ * cds_get_channel_list_with_power() - retrieve channel list with power
+ * @base_channels: base channels
+ * @num_base_channels: number of base channels
+ * @channels_40mhz: 40 MHz channels
+ * @num_40mhz_channels: number of 40 Mhz channels
+ *
+ * Return: CDF_STATUS_SUCCESS
+ */
+CDF_STATUS cds_get_channel_list_with_power(tChannelListWithPower *
+					   base_channels,
+					   uint8_t *num_base_channels,
+					   tChannelListWithPower *
+					   channels_40mhz,
+					   uint8_t *num_40mhz_channels)
+{
+	CDF_STATUS status = CDF_STATUS_SUCCESS;
+	int i, count;
+
+	if (base_channels && num_base_channels) {
+		count = 0;
+		for (i = 0; i <= RF_CHAN_14; i++) {
+			if (reg_channels[i].enabled) {
+				base_channels[count].chanId =
+					rf_channels[i].channelNum;
+				base_channels[count++].pwr =
+					reg_channels[i].pwrLimit;
+			}
+		}
+		for (i = RF_CHAN_36; i <= RF_CHAN_184; i++) {
+			if (reg_channels[i].enabled) {
+				base_channels[count].chanId =
+					rf_channels[i].channelNum;
+				base_channels[count++].pwr =
+					reg_channels[i].pwrLimit;
+			}
+		}
+		*num_base_channels = count;
+	}
+
+	if (channels_40mhz && num_40mhz_channels) {
+		count = 0;
+
+		for (i = RF_CHAN_BOND_3; i <= RF_CHAN_BOND_11; i++) {
+			if (reg_channels[i].enabled) {
+				channels_40mhz[count].chanId =
+					rf_channels[i].channelNum;
+				channels_40mhz[count++].pwr =
+					reg_channels[i].pwrLimit;
+			}
+		}
+
+		for (i = RF_CHAN_BOND_38; i <= RF_CHAN_BOND_163; i++) {
+			if (reg_channels[i].enabled) {
+				channels_40mhz[count].chanId =
+					rf_channels[i].channelNum;
+				channels_40mhz[count++].pwr =
+					reg_channels[i].pwrLimit;
+			}
+		}
+		*num_40mhz_channels = count;
+	}
+
+	return status;
+}
+
+/**
+ * cds_read_default_country() - set the default country
+ * @default_country: default country
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_read_default_country(country_code_t default_country)
+{
+	CDF_STATUS status = CDF_STATUS_SUCCESS;
+
+	memcpy(default_country,
+	       reg_table.default_country,
+	       sizeof(country_code_t));
+
+	pr_info("DefaultCountry is %c%c\n",
+		default_country[0],
+		default_country[1]);
+
+	return status;
+}
+
+/**
+ * cds_get_channel_enum() - get the channel enumeration
+ * @chan_num: channel number
+ *
+ * Return: enum for the channel
+ */
+static eRfChannels cds_get_channel_enum(uint32_t chan_num)
+{
+	uint32_t loop;
+
+	for (loop = 0; loop <= RF_CHAN_184; loop++)
+		if (rf_channels[loop].channelNum == chan_num)
+			return loop;
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "invalid channel number %d", chan_num);
+
+	return INVALID_RF_CHANNEL;
+}
+
+
+/**
+ * cds_get_channel_state() - get the channel state
+ * @channel_num: channel number
+ *
+ * Return: CHANNEL_STATE
+ */
+CHANNEL_STATE cds_get_channel_state(uint32_t chan_num)
+{
+	eRfChannels chan_enum;
+
+	chan_enum = cds_get_channel_enum(chan_num);
+	if (INVALID_RF_CHANNEL == chan_enum)
+		return CHANNEL_STATE_INVALID;
+	else
+		return reg_channels[chan_enum].enabled;
+}
+
+
+/**
+ * cds_get_bonded_channel_state() - get the bonded channel state
+ * @channel_num: channel number
+ *
+ * Return: CHANNEL_STATE
+ */
+CHANNEL_STATE cds_get_bonded_channel_state(uint32_t chan_num,
+					   enum channel_width ch_width)
+{
+	eRfChannels chan_enum;
+	bool bw_enabled = false;
+
+	chan_enum = cds_get_channel_enum(chan_num);
+	if (INVALID_RF_CHANNEL == chan_enum)
+		return CHANNEL_STATE_INVALID;
+
+	if (reg_channels[chan_enum].enabled) {
+		if (CHAN_WIDTH_5MHZ == ch_width)
+			bw_enabled = 1;
+		else if (CHAN_WIDTH_10MHZ == ch_width)
+			bw_enabled = !(reg_channels[chan_enum].flags &
+				       IEEE80211_CHAN_NO_10MHZ);
+		else if (CHAN_WIDTH_20MHZ == ch_width)
+			bw_enabled = !(reg_channels[chan_enum].flags &
+				       IEEE80211_CHAN_NO_20MHZ);
+		else if (CHAN_WIDTH_40MHZ == ch_width)
+			bw_enabled = !(reg_channels[chan_enum].flags &
+				       IEEE80211_CHAN_NO_HT40);
+		else if (CHAN_WIDTH_80MHZ == ch_width)
+			bw_enabled = !(reg_channels[chan_enum].flags &
+				       IEEE80211_CHAN_NO_80MHZ);
+		else if (CHAN_WIDTH_160MHZ == ch_width)
+			bw_enabled = !(reg_channels[chan_enum].flags &
+				       IEEE80211_CHAN_NO_160MHZ);
+	}
+
+	if (bw_enabled)
+		return reg_channels[chan_enum].enabled;
+	else
+		return CHANNEL_STATE_DISABLE;
+}
+
+/**
+ * cds_get_max_channel_bw() - get the max channel bandwidth
+ * @channel_num: channel number
+ *
+ * Return: channel_width
+ */
+enum channel_width cds_get_max_channel_bw(uint32_t chan_num)
+{
+	eRfChannels chan_enum;
+	enum channel_width chan_bw = CHAN_WIDTH_0MHZ;
+
+	chan_enum = cds_get_channel_enum(chan_num);
+
+	if ((INVALID_RF_CHANNEL != chan_enum) &&
+	    (CHANNEL_STATE_DISABLE != reg_channels[chan_enum].enabled)) {
+
+		if (!(reg_channels[chan_enum].flags &
+		      IEEE80211_CHAN_NO_160MHZ))
+			chan_bw = CHAN_WIDTH_160MHZ;
+		else if (!(reg_channels[chan_enum].flags &
+			   IEEE80211_CHAN_NO_80MHZ))
+			chan_bw = CHAN_WIDTH_80MHZ;
+		else if (!(reg_channels[chan_enum].flags &
+			   IEEE80211_CHAN_NO_HT40))
+			chan_bw = CHAN_WIDTH_40MHZ;
+		else if (!(reg_channels[chan_enum].flags &
+			   IEEE80211_CHAN_NO_20MHZ))
+			chan_bw = CHAN_WIDTH_20MHZ;
+		else if (!(reg_channels[chan_enum].flags &
+			   IEEE80211_CHAN_NO_10MHZ))
+			chan_bw = CHAN_WIDTH_10MHZ;
+		else
+			chan_bw = CHAN_WIDTH_5MHZ;
+	}
+
+	return chan_bw;
+
+}
+
+static int cds_bw20_ch_index_to_bw40_ch_index(int k)
+{
+	int m = -1;
+	if (k >= RF_CHAN_1 && k <= RF_CHAN_14) {
+		m = k - RF_CHAN_1 + RF_CHAN_BOND_3;
+		if (m > RF_CHAN_BOND_11)
+			m = RF_CHAN_BOND_11;
+	} else if (k >= RF_CHAN_240 && k <= RF_CHAN_216) {
+		m = k - RF_CHAN_240 + RF_CHAN_BOND_242;
+		if (m > RF_CHAN_BOND_214)
+			m = RF_CHAN_BOND_214;
+	} else if (k >= RF_CHAN_36 && k <= RF_CHAN_64) {
+		m = k - RF_CHAN_36 + RF_CHAN_BOND_38;
+		if (m > RF_CHAN_BOND_62)
+			m = RF_CHAN_BOND_62;
+	}
+	else if (k >= RF_CHAN_100 && k <= RF_CHAN_144)
+	{
+		m = k - RF_CHAN_100 + RF_CHAN_BOND_102;
+		if (m > RF_CHAN_BOND_142)
+			m = RF_CHAN_BOND_142;
+	} else if (k >= RF_CHAN_149 && k <= RF_CHAN_165) {
+		m = k - RF_CHAN_149 + RF_CHAN_BOND_151;
+		if (m > RF_CHAN_BOND_163)
+			m = RF_CHAN_BOND_163;
+	}
+	return m;
+}
+
+/**
+ * cds_set_dfs_region() - set the dfs_region
+ * @dfs_region: the dfs_region to set
+ *
+ * Return: CDF_STATUS_SUCCESS if dfs_region set correctly
+ *         CDF_STATUS_E_EXISTS if hdd context not found
+ */
+CDF_STATUS cds_set_dfs_region(uint8_t dfs_region)
+{
+	hdd_context_t *hdd_ctx_ptr = NULL;
+
+	hdd_ctx_ptr = cds_get_context(CDF_MODULE_ID_HDD);
+
+	if (NULL == hdd_ctx_ptr)
+		return CDF_STATUS_E_EXISTS;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+	hdd_ctx_ptr->reg.dfs_region = dfs_region;
+#else
+
+	/* remap the ctl code to dfs region code */
+	switch (hdd_ctx_ptr->reg.ctl_5g) {
+	case FCC:
+		hdd_ctx_ptr->reg.dfs_region = DFS_FCC_DOMAIN;
+		break;
+	case ETSI:
+		hdd_ctx_ptr->reg.dfs_region = DFS_ETSI_DOMAIN;
+		break;
+	case MKK:
+		hdd_ctx_ptr->reg.dfs_region = DFS_MKK4_DOMAIN;
+		break;
+	default:
+		/* set default dfs_region to FCC */
+		hdd_ctx_ptr->reg.dfs_region = DFS_FCC_DOMAIN;
+		break;
+	}
+#endif
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_get_dfs_region() - get the dfs_region
+ * @dfs_region: the dfs_region to return
+ *
+ * Return: CDF_STATUS_SUCCESS if dfs_region set correctly
+ *         CDF_STATUS_E_EXISTS if hdd context not found
+ */
+CDF_STATUS cds_get_dfs_region(uint8_t *dfs_region)
+{
+	hdd_context_t *hdd_ctx_ptr = NULL;
+
+	hdd_ctx_ptr = cds_get_context(CDF_MODULE_ID_HDD);
+
+	if (NULL == hdd_ctx_ptr)
+		return CDF_STATUS_E_EXISTS;
+
+	*dfs_region = hdd_ctx_ptr->reg.dfs_region;
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_get_reg_domain_from_country_code() - get the regulatory domain
+ * @reg_domain_ptr: ptr to store regulatory domain
+ *
+ * Return: CDF_STATUS_SUCCESS on success
+ *         CDF_STATUS_E_FAULT on error
+ *         CDF_STATUS_E_EMPTY country table empty
+ */
+CDF_STATUS cds_get_reg_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr,
+						const country_code_t
+						country_code,
+						v_CountryInfoSource_t source)
+{
+	v_CONTEXT_t cds_context = NULL;
+	hdd_context_t *hdd_ctx = NULL;
+	struct wiphy *wiphy = NULL;
+	int i;
+
+	if (NULL == reg_domain_ptr) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("Invalid reg domain pointer"));
+		return CDF_STATUS_E_FAULT;
+	}
+
+	*reg_domain_ptr = REGDOMAIN_COUNT;
+
+	if (NULL == country_code) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("Country code array is NULL"));
+		return CDF_STATUS_E_FAULT;
+	}
+
+	if (0 == country_info_table.countryCount) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("Reg domain table is empty"));
+		return CDF_STATUS_E_EMPTY;
+	}
+
+	cds_context = cds_get_global_context();
+
+	if (NULL != cds_context)
+		hdd_ctx = cds_get_context(CDF_MODULE_ID_HDD);
+	else
+		return CDF_STATUS_E_EXISTS;
+
+	if (NULL == hdd_ctx) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("Invalid pHddCtx pointer"));
+		return CDF_STATUS_E_FAULT;
+	}
+
+	wiphy = hdd_ctx->wiphy;
+
+	if (cds_is_logp_in_progress()) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "SSR in progress, return");
+		*reg_domain_ptr = temp_reg_domain;
+		return CDF_STATUS_SUCCESS;
+	}
+
+	temp_reg_domain = REGDOMAIN_COUNT;
+	for (i = 0; i < country_info_table.countryCount &&
+	     REGDOMAIN_COUNT == temp_reg_domain; i++) {
+		if (memcmp(country_code,
+			   country_info_table.countryInfo[i].countryCode,
+			    CDS_COUNTRY_CODE_LEN) == 0) {
+
+			temp_reg_domain =
+				country_info_table.countryInfo[i].regDomain;
+			break;
+		}
+	}
+
+	if (REGDOMAIN_COUNT == temp_reg_domain) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("Country does not map to any Regulatory domain"));
+		temp_reg_domain = REGDOMAIN_WORLD;
+	}
+
+	if (COUNTRY_QUERY == source) {
+		*reg_domain_ptr = temp_reg_domain;
+		return CDF_STATUS_SUCCESS;
+	}
+
+	if ((COUNTRY_INIT == source) && (false == init_by_reg_core)) {
+		init_by_driver = true;
+		if (('0' != country_code[0]) || ('0' != country_code[1])) {
+			INIT_COMPLETION(hdd_ctx->reg_init);
+			regulatory_hint(wiphy, country_code);
+			wait_for_completion_timeout(&hdd_ctx->reg_init,
+					       msecs_to_jiffies(REG_WAIT_TIME));
+		}
+	} else if (COUNTRY_IE == source || COUNTRY_USER == source) {
+		regulatory_hint_user(country_code,
+				     NL80211_USER_REG_HINT_USER);
+	}
+
+	*reg_domain_ptr = temp_reg_domain;
+	return CDF_STATUS_SUCCESS;
+}
+
+/*
+ * cds_is_dsrc_channel() - is the channel DSRC
+ * @center_freq: center freq of the channel
+ *
+ * Return: true if dsrc channel
+ *         false otherwise
+ */
+bool cds_is_dsrc_channel(uint16_t center_freq)
+{
+	switch (center_freq) {
+	case 5852:
+	case 5860:
+	case 5870:
+	case 5880:
+	case 5890:
+	case 5900:
+	case 5910:
+	case 5920:
+	case 5875:
+	case 5905:
+		return 1;
+	}
+	return 0;
+}
+
+#ifdef FEATURE_STATICALLY_ADD_11P_CHANNELS
+#define DEFAULT_11P_POWER (30)
+#endif
+
+/**
+ * cds_process_regulatory_data() - process regulatory data
+ * @wiphy: wiphy
+ * @band_capability: band_capability
+ *
+ * Return: int
+ */
+static int cds_process_regulatory_data(struct wiphy *wiphy,
+				       uint8_t band_capability, bool reset)
+{
+	int i, j, m;
+	int k = 0, n = 0;
+	hdd_context_t *hdd_ctx;
+	const struct ieee80211_reg_rule *reg_rule;
+	struct ieee80211_channel *chan;
+	sRegulatoryChannel *temp_chan_k;
+	sRegulatoryChannel *temp_chan_n;
+	sRegulatoryChannel *temp_chan;
+
+	hdd_ctx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (NULL == hdd_ctx) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "invalid hdd_ctx pointer");
+		return CDF_STATUS_E_FAULT;
+	}
+
+	hdd_ctx->isVHT80Allowed = 0;
+
+	if (band_capability == eCSR_BAND_24)
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+			  "band capability is set to 2G only");
+
+	for (i = 0, m = 0; i < IEEE80211_NUM_BANDS; i++) {
+
+		if (i == IEEE80211_BAND_2GHZ && band_capability == eCSR_BAND_5G)
+			continue;
+
+		else if (i == IEEE80211_BAND_5GHZ
+			 && band_capability == eCSR_BAND_24)
+			continue;
+
+		if (wiphy->bands[i] == NULL) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "error: wiphy->bands is NULL, i = %d", i);
+			continue;
+		}
+
+		if (i == 0)
+			m = 0;
+		else
+			m = wiphy->bands[i-1]->n_channels + m;
+
+		for (j = 0; j < wiphy->bands[i]->n_channels; j++) {
+
+			k = m + j;
+			n = cds_bw20_ch_index_to_bw40_ch_index(k);
+
+			chan = &(wiphy->bands[i]->channels[j]);
+			temp_chan_k =
+				&(reg_table.regDomains[temp_reg_domain].
+				  channels[k]);
+
+			temp_chan_n =
+				&(reg_table.regDomains[temp_reg_domain].
+				  channels[n]);
+
+			if ((!reset) &&
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+			    (wiphy->regulatory_flags &
+			     REGULATORY_CUSTOM_REG)) {
+#else
+				(wiphy->flags &
+				 WIPHY_FLAG_CUSTOM_REGULATORY)) {
+#endif
+				reg_rule = freq_reg_info(wiphy,
+							 MHZ_TO_KHZ(chan->
+								 center_freq));
+
+				if (!IS_ERR(reg_rule)) {
+					chan->flags &=
+						~IEEE80211_CHAN_DISABLED;
+
+					if (!(reg_rule->flags &
+					      NL80211_RRF_DFS)) {
+						CDF_TRACE(CDF_MODULE_ID_CDF,
+							  CDF_TRACE_LEVEL_INFO,
+							  "%s: Remove passive scan restriction for %u",
+							  __func__,
+							  chan->center_freq);
+						chan->flags &=
+							~IEEE80211_CHAN_RADAR;
+					}
+
+					if (!(reg_rule->flags &
+					      NL80211_RRF_PASSIVE_SCAN)) {
+						CDF_TRACE(CDF_MODULE_ID_CDF,
+							  CDF_TRACE_LEVEL_INFO,
+							  "%s: Remove passive scan restriction for %u",
+							  __func__,
+							  chan->center_freq);
+						chan->flags &=
+						   ~IEEE80211_CHAN_PASSIVE_SCAN;
+					}
+
+					if (!(reg_rule->flags &
+					      NL80211_RRF_NO_IBSS)) {
+						CDF_TRACE(CDF_MODULE_ID_CDF,
+							  CDF_TRACE_LEVEL_INFO,
+							  "%s: Remove no ibss restriction for %u",
+							  __func__,
+							  chan->center_freq);
+						chan->flags &=
+							~IEEE80211_CHAN_NO_IBSS;
+					}
+
+					chan->max_power = MBM_TO_DBM(reg_rule->
+								     power_rule.
+								     max_eirp);
+				}
+			}
+
+#ifdef FEATURE_STATICALLY_ADD_11P_CHANNELS
+			if (is_dsrc_channel(chan->center_freq)) {
+				temp_chan_k->enabled =
+					CHANNEL_STATE_ENABLE;
+				temp_chan_k->pwrLimit =
+					DEFAULT_11P_POWER;
+				temp_chan_k->flags = chan->flags;
+			} else
+#endif
+			if (chan->flags & IEEE80211_CHAN_DISABLED) {
+				temp_chan_k->enabled =
+					CHANNEL_STATE_DISABLE;
+				temp_chan_k->flags = chan->flags;
+				if (n != -1) {
+					temp_chan_n->enabled =
+						CHANNEL_STATE_DISABLE;
+					temp_chan_n->flags = chan->flags;
+				}
+			} else if (chan->flags &
+				   (IEEE80211_CHAN_RADAR |
+				    IEEE80211_CHAN_PASSIVE_SCAN
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+					|
+					IEEE80211_CHAN_INDOOR_ONLY
+#endif
+				)) {
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+				if (chan->flags &
+				    IEEE80211_CHAN_INDOOR_ONLY)
+					chan->flags |=
+						IEEE80211_CHAN_PASSIVE_SCAN;
+#endif
+				temp_chan_k->enabled = CHANNEL_STATE_DFS;
+				temp_chan_k->pwrLimit =
+					chan->max_power;
+				temp_chan_k->flags = chan->flags;
+
+				if (n != -1) {
+					if ((chan->flags &
+					     IEEE80211_CHAN_NO_HT40) ==
+					    IEEE80211_CHAN_NO_HT40) {
+						temp_chan_n->enabled =
+							CHANNEL_STATE_DISABLE;
+					} else {
+						temp_chan_n->enabled =
+							CHANNEL_STATE_DFS;
+						temp_chan_n->pwrLimit =
+							 chan->max_power-3;
+					}
+					temp_chan_n->flags = chan->flags;
+				}
+				if ((chan->flags &
+				     IEEE80211_CHAN_NO_80MHZ) == 0)
+					hdd_ctx->isVHT80Allowed = 1;
+			} else {
+				temp_chan_k->enabled = CHANNEL_STATE_ENABLE;
+				temp_chan_k->pwrLimit = chan->max_power;
+				temp_chan_k->flags = chan->flags;
+				if (n != -1) {
+					if ((chan->flags &
+					     IEEE80211_CHAN_NO_HT40) ==
+					    IEEE80211_CHAN_NO_HT40) {
+						temp_chan_n->enabled =
+							CHANNEL_STATE_DISABLE;
+					} else {
+						temp_chan_n->enabled =
+							CHANNEL_STATE_ENABLE;
+						temp_chan_n->pwrLimit =
+							chan->max_power - 3;
+					}
+					temp_chan_n->flags = chan->flags;
+				}
+				if ((chan->flags &
+				     IEEE80211_CHAN_NO_80MHZ) == 0)
+					hdd_ctx->isVHT80Allowed = 1;
+			}
+		}
+	}
+
+	if (0 == (hdd_ctx->reg.eeprom_rd_ext &
+		  (1 << WHAL_REG_EXT_FCC_CH_144))) {
+		temp_chan = &(reg_table.regDomains[temp_reg_domain].
+			      channels[RF_CHAN_144]);
+		temp_chan->enabled =
+			CHANNEL_STATE_DISABLE;
+	}
+
+	if (k == 0)
+		return -1;
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) && !defined(WITH_BACKPORTS)
+/**
+ * restore_custom_reg_settings() - restore custom reg settings
+ * @wiphy: wiphy structure
+ *
+ * Return: void
+ */
+static void restore_custom_reg_settings(struct wiphy *wiphy)
+{
+	struct ieee80211_supported_band *sband;
+	enum ieee80211_band band;
+	struct ieee80211_channel *chan;
+	int i;
+
+	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		sband = wiphy->bands[band];
+		if (!sband)
+			continue;
+		for (i = 0; i < sband->n_channels; i++) {
+			chan = &sband->channels[i];
+			chan->flags = chan->orig_flags;
+			chan->max_antenna_gain = chan->orig_mag;
+			chan->max_power = chan->orig_mpwr;
+		}
+	}
+}
+#endif
+
+/**
+ * hdd_reg_notifier() - regulatory notifier
+ * @wiphy: wiphy
+ * @request: regulatory request
+ *
+ * Return: void or int
+ */
+void hdd_reg_notifier(struct wiphy *wiphy,
+		      struct regulatory_request *request)
+{
+	hdd_context_t *hdd_ctx = wiphy_priv(wiphy);
+	eCsrBand band_capability = eCSR_BAND_ALL;
+	country_code_t country_code;
+	int i;
+	bool vht80_allowed;
+	bool reset = false;
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  FL("country: %c%c, initiator %d, dfs_region: %d"),
+		  request->alpha2[0],
+		  request->alpha2[1],
+		  request->initiator,
+		  request->dfs_region);
+
+	if (NULL == hdd_ctx) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("Invalid pHddCtx pointer"));
+		return;
+	}
+
+	if (hdd_ctx->isUnloadInProgress || hdd_ctx->isLogpInProgress) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Unloading or SSR in Progress, Ignore!!!",
+			  __func__);
+		return;
+	}
+
+	sme_get_freq_band(hdd_ctx->hHal, &band_capability);
+
+	/* first check if this callback is in response to the driver callback */
+
+	switch (request->initiator) {
+	case NL80211_REGDOM_SET_BY_DRIVER:
+	case NL80211_REGDOM_SET_BY_CORE:
+	case NL80211_REGDOM_SET_BY_USER:
+
+		if ((false == init_by_driver) &&
+		    (false == init_by_reg_core)) {
+
+			if (NL80211_REGDOM_SET_BY_CORE == request->initiator) {
+				return;
+			}
+			init_by_reg_core = true;
+		}
+
+		if ((NL80211_REGDOM_SET_BY_DRIVER == request->initiator) &&
+		    (true == init_by_driver)) {
+
+			/*
+			 * restore the driver regulatory flags since
+			 * regulatory_hint may have
+			 * changed them
+			 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+			wiphy->regulatory_flags = hdd_ctx->reg.reg_flags;
+#else
+			wiphy->flags = hdd_ctx->reg.reg_flags;
+#endif
+		}
+
+		if (NL80211_REGDOM_SET_BY_CORE == request->initiator) {
+			hdd_ctx->reg.cc_src = COUNTRY_CODE_SET_BY_CORE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(WITH_BACKPORTS)
+			if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)
+#else
+				if (wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
+#endif
+					reset = true;
+		} else if (NL80211_REGDOM_SET_BY_DRIVER == request->initiator)
+			hdd_ctx->reg.cc_src = COUNTRY_CODE_SET_BY_DRIVER;
+		else {
+			hdd_ctx->reg.cc_src = COUNTRY_CODE_SET_BY_USER;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) && !defined(WITH_BACKPORTS)
+			if ((request->alpha2[0] == '0') &&
+			    (request->alpha2[1] == '0') &&
+			    (wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)) {
+				restore_custom_reg_settings(wiphy);
+				reset = true;
+			}
+#endif
+		}
+
+		/* first lookup the country in the local database */
+		country_code[0] = request->alpha2[0];
+		country_code[1] = request->alpha2[1];
+
+		hdd_ctx->reg.alpha2[0] = request->alpha2[0];
+		hdd_ctx->reg.alpha2[1] = request->alpha2[1];
+
+		cds_update_regulatory_info(hdd_ctx);
+
+		temp_reg_domain = REGDOMAIN_COUNT;
+		for (i = 0; i < country_info_table.countryCount &&
+		     REGDOMAIN_COUNT == temp_reg_domain; i++) {
+			if (memcmp(country_code,
+				  country_info_table.countryInfo[i].countryCode,
+				  CDS_COUNTRY_CODE_LEN) == 0) {
+
+				temp_reg_domain =
+				country_info_table.countryInfo[i].regDomain;
+				break;
+			}
+		}
+
+		if (REGDOMAIN_COUNT == temp_reg_domain)
+			temp_reg_domain = REGDOMAIN_WORLD;
+
+		vht80_allowed = hdd_ctx->isVHT80Allowed;
+		if (cds_process_regulatory_data(wiphy, band_capability,
+						reset) == 0) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+				  (" regulatory entry created"));
+		}
+		if (hdd_ctx->isVHT80Allowed != vht80_allowed)
+			hdd_checkandupdate_phymode(hdd_ctx);
+
+		if (NL80211_REGDOM_SET_BY_DRIVER == request->initiator)
+			complete(&hdd_ctx->reg_init);
+
+		if (request->alpha2[0] == '0'
+		    && request->alpha2[1] == '0') {
+			sme_generic_change_country_code(hdd_ctx->hHal,
+							country_code,
+							REGDOMAIN_COUNT);
+		} else {
+			sme_generic_change_country_code(hdd_ctx->hHal,
+							country_code,
+							temp_reg_domain);
+		}
+
+		cds_fill_and_send_ctl_to_fw(&hdd_ctx->reg);
+
+		cds_set_dfs_region(request->dfs_region);
+
+		cds_set_wma_dfs_region(&hdd_ctx->reg);
+	default:
+		break;
+	}
+
+	return;
+}
+
+/**
+ * cds_regulatory_init() - regulatory_init
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_regulatory_init(void)
+{
+	v_CONTEXT_t cds_context = NULL;
+	hdd_context_t *hdd_ctx = NULL;
+	struct wiphy *wiphy = NULL;
+	int ret_val = 0;
+	struct regulatory *reg_info;
+
+	cds_context = cds_get_global_context();
+
+	if (!cds_context)
+		return CDF_STATUS_E_FAULT;
+
+	hdd_ctx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (!hdd_ctx) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("Invalid pHddCtx pointer"));
+		return CDF_STATUS_E_FAULT;
+	}
+
+	wiphy = hdd_ctx->wiphy;
+
+	reg_info = &hdd_ctx->reg;
+
+	cds_regulatory_wiphy_init(hdd_ctx, reg_info, wiphy);
+
+	temp_reg_domain = REGDOMAIN_WORLD;
+
+	if (cds_process_regulatory_data(wiphy,
+					hdd_ctx->config->
+					nBandCapability, true) != 0) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  ("Error while creating regulatory entry"));
+		return CDF_STATUS_E_FAULT;
+	}
+
+	reg_info->cc_src = COUNTRY_CODE_SET_BY_DRIVER;
+
+	ret_val = cds_fill_some_regulatory_info(reg_info);
+	if (ret_val) {
+		cdf_print(KERN_ERR "Error in getting country code\n");
+		return ret_val;
+	}
+
+	reg_table.default_country[0] = reg_info->alpha2[0];
+	reg_table.default_country[1] = reg_info->alpha2[1];
+
+	init_completion(&hdd_ctx->reg_init);
+
+	cds_fill_and_send_ctl_to_fw(reg_info);
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_set_reg_domain() - set regulatory domain
+ * @client_ctxt: client context
+ * @reg_domain: regulatory domain
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS cds_set_reg_domain(void *client_ctxt, v_REGDOMAIN_t reg_domain)
+{
+	if (reg_domain >= REGDOMAIN_COUNT) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "CDS set reg domain, invalid REG domain ID %d",
+			  reg_domain);
+		return CDF_STATUS_E_INVAL;
+	}
+
+	reg_channels = reg_table.regDomains[reg_domain].channels;
+
+	return CDF_STATUS_SUCCESS;
+}

+ 699 - 0
core/cds/src/cds_regdomain.c

@@ -0,0 +1,699 @@
+/*
+ * Copyright (c) 2011,2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Notifications and licenses are retained for attribution purposes only.
+ */
+/*
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2005-2006 Atheros Communications, Inc.
+ * Copyright (c) 2010, Atheros Communications Inc.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the following conditions are met:
+ * 1. The materials contained herein are unmodified and are used
+ *    unmodified.
+ * 2. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following NO
+ *    ''WARRANTY'' disclaimer below (''Disclaimer''), without
+ *    modification.
+ * 3. Redistributions in binary form must reproduce at minimum a
+ *    disclaimer similar to the Disclaimer below and any redistribution
+ *    must be conditioned upon including a substantially similar
+ *    Disclaimer requirement for further binary redistribution.
+ * 4. Neither the names of the above-listed copyright holders nor the
+ *    names of any contributors may be used to endorse or promote
+ *    product derived from this software without specific prior written
+ *    permission.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT,
+ * MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
+ * FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+#include <cdf_types.h>
+#include "wma.h"
+#include "cds_regdomain.h"
+#include "cds_regdomain_common.h"
+
+static regdm_supp_op_classes regdm_curr_supp_opp_classes = { 0 };
+
+/* Global Operating Classes */
+regdm_op_class_map_t global_op_class[] = {
+	{81, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}},
+	{82, 25, BW20, {14}},
+	{83, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7, 8, 9}},
+	{84, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11, 12, 13}},
+	{115, 20, BW20, {36, 40, 44, 48}},
+	{116, 40, BW40_LOW_PRIMARY, {36, 44}},
+	{117, 40, BW40_HIGH_PRIMARY, {40, 48}},
+	{118, 20, BW20, {52, 56, 60, 64}},
+	{119, 40, BW40_LOW_PRIMARY, {52, 60}},
+	{120, 40, BW40_HIGH_PRIMARY, {56, 64}},
+	{121, 20, BW20,
+	 {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}},
+	{122, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132}},
+	{123, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136}},
+	{125, 20, BW20, {149, 153, 157, 161, 165, 169}},
+	{126, 40, BW40_LOW_PRIMARY, {149, 157}},
+	{127, 40, BW40_HIGH_PRIMARY, {153, 161}},
+	{128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108,
+			   112, 116, 120, 124, 128, 132, 136, 140, 144,
+			   149, 153, 157, 161} },
+	{0, 0, 0, {0}},
+};
+
+/* Operating Classes in US */
+regdm_op_class_map_t us_op_class[] = {
+	{1, 20, BW20, {36, 40, 44, 48}},
+	{2, 20, BW20, {52, 56, 60, 64}},
+	{4, 20, BW20, {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140,
+			144} },
+	{5, 20, BW20, {149, 153, 157, 161, 165}},
+	{12, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}},
+	{22, 40, BW40_LOW_PRIMARY, {36, 44}},
+	{23, 40, BW40_LOW_PRIMARY, {52, 60}},
+	{24, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132}},
+	{26, 40, BW40_LOW_PRIMARY, {149, 157}},
+	{27, 40, BW40_HIGH_PRIMARY, {40, 48}},
+	{28, 40, BW40_HIGH_PRIMARY, {56, 64}},
+	{29, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136}},
+	{31, 40, BW40_HIGH_PRIMARY, {153, 161}},
+	{32, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7}},
+	{33, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11}},
+	{128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108,
+			   112, 116, 120, 124, 128, 132, 136, 140, 144,
+			   149, 153, 157, 161} },
+	{0, 0, 0, {0}},
+};
+
+/* Operating Classes in Europe */
+regdm_op_class_map_t euro_op_class[] = {
+	{1, 20, BW20, {36, 40, 44, 48}},
+	{2, 20, BW20, {52, 56, 60, 64}},
+	{3, 20, BW20, {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}},
+	{4, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}},
+	{5, 40, BW40_LOW_PRIMARY, {36, 44}},
+	{6, 40, BW40_LOW_PRIMARY, {52, 60}},
+	{7, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132}},
+	{8, 40, BW40_HIGH_PRIMARY, {40, 48}},
+	{9, 40, BW40_HIGH_PRIMARY, {56, 64}},
+	{10, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136}},
+	{11, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7, 8, 9}},
+	{12, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11, 12, 13}},
+	{17, 20, BW20, {149, 153, 157, 161, 165, 169}},
+	{128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
+			   116, 120, 124, 128} },
+	{0, 0, 0, {0}},
+};
+
+/* Operating Classes in Japan */
+regdm_op_class_map_t japan_op_class[] = {
+	{1, 20, BW20, {36, 40, 44, 48}},
+	{30, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}},
+	{31, 25, BW20, {14}},
+	{32, 20, BW20, {52, 56, 60, 64}},
+	{34, 20, BW20, {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}},
+	{36, 40, BW40_LOW_PRIMARY, {36, 44}},
+	{37, 40, BW40_LOW_PRIMARY, {52, 60}},
+	{39, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132}},
+	{41, 40, BW40_HIGH_PRIMARY, {40, 48}},
+	{42, 40, BW40_HIGH_PRIMARY, {56, 64}},
+	{44, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136}},
+	{128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112,
+			   116, 120, 124, 128} },
+	{0, 0, 0, {0}},
+};
+
+/*
+ * By default, the regdomain tables reference the common tables
+ * from regdomain_common.h.  These default tables can be replaced
+ * by calls to populate_regdomain_tables functions.
+ */
+HAL_REG_DMN_TABLES ol_regdmn_rdt = {
+	ah_cmn_reg_domain_pairs,    /* regDomainPairs */
+	ah_cmn_all_countries,      /* allCountries */
+	ah_cmn_reg_domains,        /* allRegDomains */
+	CDF_ARRAY_SIZE(ah_cmn_reg_domain_pairs), /* regDomainPairsCt */
+	CDF_ARRAY_SIZE(ah_cmn_all_countries),   /* allCountriesCt */
+	CDF_ARRAY_SIZE(ah_cmn_reg_domains),     /* allRegDomainCt */
+};
+
+static uint16_t get_eeprom_rd(uint16_t rd)
+{
+	return rd & ~WORLDWIDE_ROAMING_FLAG;
+}
+
+/*
+ * Return whether or not the regulatory domain/country in EEPROM
+ * is acceptable.
+ */
+static bool regdmn_is_eeprom_valid(uint16_t rd)
+{
+	int32_t i;
+
+	if (rd & COUNTRY_ERD_FLAG) {
+		uint16_t cc = rd & ~COUNTRY_ERD_FLAG;
+		for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++)
+			if (ol_regdmn_rdt.allCountries[i].countryCode == cc)
+				return true;
+	} else {
+		for (i = 0; i < ol_regdmn_rdt.regDomainPairsCt; i++)
+			if (ol_regdmn_rdt.regDomainPairs[i].regDmnEnum == rd)
+				return true;
+	}
+	/* TODO: Bring it under debug level */
+	cdf_print("%s: invalid regulatory domain/country code 0x%x\n",
+		  __func__, rd);
+	return false;
+}
+
+/*
+ * Find the pointer to the country element in the country table
+ * corresponding to the country code
+ */
+static const COUNTRY_CODE_TO_ENUM_RD *find_country(uint16_t country_code)
+{
+	int32_t i;
+
+	for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++) {
+		if (ol_regdmn_rdt.allCountries[i].countryCode == country_code)
+			return &ol_regdmn_rdt.allCountries[i];
+	}
+	return NULL;            /* Not found */
+}
+
+int32_t cds_get_country_from_alpha2(uint8_t *alpha2)
+{
+	int32_t i;
+
+	for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++) {
+		if (ol_regdmn_rdt.allCountries[i].isoName[0] == alpha2[0] &&
+		    ol_regdmn_rdt.allCountries[i].isoName[1] == alpha2[1])
+			return ol_regdmn_rdt.allCountries[i].countryCode;
+	}
+	return CTRY_DEFAULT;
+}
+
+static uint16_t regdmn_get_default_country(uint16_t rd)
+{
+	int32_t i;
+
+	if (rd & COUNTRY_ERD_FLAG) {
+		const COUNTRY_CODE_TO_ENUM_RD *country = NULL;
+		uint16_t cc = rd & ~COUNTRY_ERD_FLAG;
+
+		country = find_country(cc);
+		if (country)
+			return cc;
+	}
+
+	/*
+	 * Check reg domains that have only one country
+	 */
+	for (i = 0; i < ol_regdmn_rdt.regDomainPairsCt; i++) {
+		if (ol_regdmn_rdt.regDomainPairs[i].regDmnEnum == rd) {
+			if (ol_regdmn_rdt.regDomainPairs[i].singleCC != 0)
+				return ol_regdmn_rdt.regDomainPairs[i].singleCC;
+			else
+				i = ol_regdmn_rdt.regDomainPairsCt;
+		}
+	}
+	return CTRY_DEFAULT;
+}
+
+static const REG_DMN_PAIR_MAPPING *get_regdmn_pair(uint16_t reg_dmn)
+{
+	int32_t i;
+
+	for (i = 0; i < ol_regdmn_rdt.regDomainPairsCt; i++) {
+		if (ol_regdmn_rdt.regDomainPairs[i].regDmnEnum == reg_dmn)
+			return &ol_regdmn_rdt.regDomainPairs[i];
+	}
+	return NULL;
+}
+
+static const REG_DOMAIN *get_regdmn(uint16_t reg_dmn)
+{
+	int32_t i;
+
+	for (i = 0; i < ol_regdmn_rdt.regDomainsCt; i++) {
+		if (ol_regdmn_rdt.regDomains[i].regDmnEnum == reg_dmn)
+			return &ol_regdmn_rdt.regDomains[i];
+	}
+	return NULL;
+}
+
+static const COUNTRY_CODE_TO_ENUM_RD *get_country_from_rd(uint16_t regdmn)
+{
+	int32_t i;
+
+	for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++) {
+		if (ol_regdmn_rdt.allCountries[i].regDmnEnum == regdmn)
+			return &ol_regdmn_rdt.allCountries[i];
+	}
+	return NULL;            /* Not found */
+}
+
+/*
+ * Some users have reported their EEPROM programmed with
+ * 0x8000 set, this is not a supported regulatory domain
+ * but since we have more than one user with it we need
+ * a solution for them. We default to WOR0_WORLD
+ */
+static void regd_sanitize(struct regulatory *reg)
+{
+	if (reg->reg_domain != COUNTRY_ERD_FLAG)
+		return;
+	reg->reg_domain = WOR0_WORLD;
+}
+
+/*
+ * Returns country string for the given regulatory domain.
+ */
+int32_t cds_fill_some_regulatory_info(struct regulatory *reg)
+{
+	uint16_t country_code;
+	uint16_t regdmn, rd;
+	const COUNTRY_CODE_TO_ENUM_RD *country = NULL;
+
+	regd_sanitize(reg);
+	rd = reg->reg_domain;
+
+	if (!regdmn_is_eeprom_valid(rd))
+		return -EINVAL;
+
+	regdmn = get_eeprom_rd(rd);
+
+	country_code = regdmn_get_default_country(regdmn);
+	if (country_code == CTRY_DEFAULT && regdmn == CTRY_DEFAULT) {
+		/* Set to CTRY_UNITED_STATES for testing */
+		country_code = CTRY_UNITED_STATES;
+	}
+
+	if (country_code != CTRY_DEFAULT) {
+		country = find_country(country_code);
+		if (!country) {
+			/* TODO: Bring it under debug level */
+			cdf_print(KERN_ERR "Not a valid country code\n");
+			return -EINVAL;
+		}
+		regdmn = country->regDmnEnum;
+	}
+
+	reg->regpair = get_regdmn_pair(regdmn);
+	if (!reg->regpair) {
+		/* TODO: Bring it under debug level */
+		cdf_print(KERN_ERR "No regpair is found, can not proceeed\n");
+		return -EINVAL;
+	}
+	reg->country_code = country_code;
+
+	if (!country)
+		country = get_country_from_rd(regdmn);
+
+	if (country) {
+		reg->alpha2[0] = country->isoName[0];
+		reg->alpha2[1] = country->isoName[1];
+	} else {
+		reg->alpha2[0] = '0';
+		reg->alpha2[1] = '0';
+	}
+
+	return 0;
+}
+
+/*
+ * Returns regulatory domain for given country string
+ */
+int32_t regdmn_get_regdmn_for_country(uint8_t *alpha2)
+{
+	uint8_t i;
+
+	for (i = 0; i < ol_regdmn_rdt.allCountriesCt; i++) {
+		if ((ol_regdmn_rdt.allCountries[i].isoName[0] == alpha2[0]) &&
+		    (ol_regdmn_rdt.allCountries[i].isoName[1] == alpha2[1]))
+			return ol_regdmn_rdt.allCountries[i].regDmnEnum;
+	}
+	return -1;
+}
+
+/*
+ * Test to see if the bitmask array is all zeros
+ */
+static bool is_chan_bit_mask_zero(const uint64_t *bitmask)
+{
+	int i;
+
+	for (i = 0; i < BMLEN; i++) {
+		if (bitmask[i] != 0)
+			return false;
+	}
+	return true;
+}
+
+/*
+ * Return the mask of available modes based on the hardware
+ * capabilities and the specified country code and reg domain.
+ */
+static uint32_t regdmn_getwmodesnreg(uint32_t modesAvail,
+				     const COUNTRY_CODE_TO_ENUM_RD *country,
+				     const REG_DOMAIN *rd5GHz)
+{
+
+	/* Check country regulations for allowed modes */
+	if ((modesAvail & (REGDMN_MODE_11A_TURBO | REGDMN_MODE_TURBO)) &&
+	    (!country->allow11aTurbo))
+		modesAvail &= ~(REGDMN_MODE_11A_TURBO | REGDMN_MODE_TURBO);
+
+	if ((modesAvail & REGDMN_MODE_11G_TURBO) && (!country->allow11gTurbo))
+		modesAvail &= ~REGDMN_MODE_11G_TURBO;
+
+	if ((modesAvail & REGDMN_MODE_11G) && (!country->allow11g))
+		modesAvail &= ~REGDMN_MODE_11G;
+
+	if ((modesAvail & REGDMN_MODE_11A) &&
+	    (is_chan_bit_mask_zero(rd5GHz->chan11a)))
+		modesAvail &= ~REGDMN_MODE_11A;
+
+	if ((modesAvail & REGDMN_MODE_11NG_HT20) && (!country->allow11ng20))
+		modesAvail &= ~REGDMN_MODE_11NG_HT20;
+
+	if ((modesAvail & REGDMN_MODE_11NA_HT20) && (!country->allow11na20))
+		modesAvail &= ~REGDMN_MODE_11NA_HT20;
+
+	if ((modesAvail & REGDMN_MODE_11NG_HT40PLUS) && (!country->allow11ng40))
+		modesAvail &= ~REGDMN_MODE_11NG_HT40PLUS;
+
+	if ((modesAvail & REGDMN_MODE_11NG_HT40MINUS) &&
+	    (!country->allow11ng40))
+		modesAvail &= ~REGDMN_MODE_11NG_HT40MINUS;
+
+	if ((modesAvail & REGDMN_MODE_11NA_HT40PLUS) && (!country->allow11na40))
+		modesAvail &= ~REGDMN_MODE_11NA_HT40PLUS;
+
+	if ((modesAvail & REGDMN_MODE_11NA_HT40MINUS) &&
+	    (!country->allow11na40))
+		modesAvail &= ~REGDMN_MODE_11NA_HT40MINUS;
+
+	if ((modesAvail & REGDMN_MODE_11AC_VHT20) && (!country->allow11na20))
+		modesAvail &= ~REGDMN_MODE_11AC_VHT20;
+
+	if ((modesAvail & REGDMN_MODE_11AC_VHT40PLUS) &&
+	    (!country->allow11na40))
+		modesAvail &= ~REGDMN_MODE_11AC_VHT40PLUS;
+
+	if ((modesAvail & REGDMN_MODE_11AC_VHT40MINUS) &&
+	    (!country->allow11na40))
+		modesAvail &= ~REGDMN_MODE_11AC_VHT40MINUS;
+
+	if ((modesAvail & REGDMN_MODE_11AC_VHT80) && (!country->allow11na80))
+		modesAvail &= ~REGDMN_MODE_11AC_VHT80;
+
+	if ((modesAvail & REGDMN_MODE_11AC_VHT20_2G) && (!country->allow11ng20))
+		modesAvail &= ~REGDMN_MODE_11AC_VHT20_2G;
+
+	return modesAvail;
+}
+
+void cds_fill_send_ctl_info_to_fw(struct regulatory *reg, uint32_t modesAvail,
+				  uint32_t modeSelect)
+{
+	const REG_DOMAIN *regdomain2G = NULL;
+	const REG_DOMAIN *regdomain5G = NULL;
+	int8_t ctl_2g, ctl_5g, ctl;
+	const REG_DOMAIN *rd = NULL;
+	const struct cmode *cm;
+	const COUNTRY_CODE_TO_ENUM_RD *country;
+	const REG_DMN_PAIR_MAPPING *regpair;
+
+	regpair = reg->regpair;
+	regdomain2G = get_regdmn(regpair->regDmn2GHz);
+	if (!regdomain2G) {
+		cdf_print(KERN_ERR "Failed to get regdmn 2G");
+		return;
+	}
+
+	regdomain5G = get_regdmn(regpair->regDmn5GHz);
+	if (!regdomain5G) {
+		cdf_print(KERN_ERR "Failed to get regdmn 5G");
+		return;
+	}
+
+	/* find first nible of CTL */
+	ctl_2g = regdomain2G->conformance_test_limit;
+	ctl_5g = regdomain5G->conformance_test_limit;
+
+	/* find second nible of CTL */
+	country = find_country(reg->country_code);
+	if (country != NULL)
+		modesAvail =
+			regdmn_getwmodesnreg(modesAvail, country, regdomain5G);
+
+	for (cm = modes; cm < &modes[CDF_ARRAY_SIZE(modes)]; cm++) {
+
+		if ((cm->mode & modeSelect) == 0)
+			continue;
+
+		if ((cm->mode & modesAvail) == 0)
+			continue;
+
+		switch (cm->mode) {
+		case REGDMN_MODE_TURBO:
+			rd = regdomain5G;
+			ctl = rd->conformance_test_limit | CTL_TURBO;
+			break;
+		case REGDMN_MODE_11A:
+		case REGDMN_MODE_11NA_HT20:
+		case REGDMN_MODE_11NA_HT40PLUS:
+		case REGDMN_MODE_11NA_HT40MINUS:
+		case REGDMN_MODE_11AC_VHT20:
+		case REGDMN_MODE_11AC_VHT40PLUS:
+		case REGDMN_MODE_11AC_VHT40MINUS:
+		case REGDMN_MODE_11AC_VHT80:
+			rd = regdomain5G;
+			ctl = rd->conformance_test_limit;
+			break;
+		case REGDMN_MODE_11B:
+			rd = regdomain2G;
+			ctl = rd->conformance_test_limit | CTL_11B;
+			break;
+		case REGDMN_MODE_11G:
+		case REGDMN_MODE_11NG_HT20:
+		case REGDMN_MODE_11NG_HT40PLUS:
+		case REGDMN_MODE_11NG_HT40MINUS:
+		case REGDMN_MODE_11AC_VHT20_2G:
+		case REGDMN_MODE_11AC_VHT40_2G:
+		case REGDMN_MODE_11AC_VHT80_2G:
+			rd = regdomain2G;
+			ctl = rd->conformance_test_limit | CTL_11G;
+			break;
+		case REGDMN_MODE_11G_TURBO:
+			rd = regdomain2G;
+			ctl = rd->conformance_test_limit | CTL_108G;
+			break;
+		case REGDMN_MODE_11A_TURBO:
+			rd = regdomain5G;
+			ctl = rd->conformance_test_limit | CTL_108G;
+			break;
+		default:
+			cdf_print(KERN_ERR "%s: Unkonwn HAL mode 0x%x\n",
+				  __func__, cm->mode);
+			continue;
+		}
+
+		if (rd == regdomain2G)
+			ctl_2g = ctl;
+
+		if (rd == regdomain5G)
+			ctl_5g = ctl;
+	}
+
+	/* save the ctl information for future reference */
+	reg->ctl_5g = ctl_5g;
+	reg->ctl_2g = ctl_2g;
+
+	wma_send_regdomain_info_to_fw(reg->reg_domain, regpair->regDmn2GHz,
+				      regpair->regDmn5GHz, ctl_2g, ctl_5g);
+}
+
+/* cds_set_wma_dfs_region() - to set the dfs region to wma
+ *
+ * @reg: the regulatory handle
+ *
+ * Return: none
+ */
+void cds_set_wma_dfs_region(struct regulatory *reg)
+{
+	tp_wma_handle wma = cds_get_context(CDF_MODULE_ID_WMA);
+
+	if (!wma) {
+		cdf_print(KERN_ERR "%s: Unable to get WMA handle", __func__);
+		return;
+	}
+
+	cdf_print("%s: dfs_region: %d", __func__, reg->dfs_region);
+	wma_set_dfs_region(wma, reg->dfs_region);
+}
+
+void cds_fill_and_send_ctl_to_fw(struct regulatory *reg)
+{
+	tp_wma_handle wma = cds_get_context(CDF_MODULE_ID_WMA);
+	uint32_t modeSelect = 0xFFFFFFFF;
+
+	if (!wma) {
+		WMA_LOGE("%s: Unable to get WMA handle", __func__);
+		return;
+	}
+
+	wma_get_modeselect(wma, &modeSelect);
+
+	cds_fill_send_ctl_info_to_fw(reg, wma->reg_cap.wireless_modes,
+				     modeSelect);
+	return;
+}
+
+/* get the ctl from regdomain */
+uint8_t cds_get_ctl_for_regdmn(uint32_t reg_dmn)
+{
+	uint8_t i;
+	uint8_t default_regdmn_ctl = FCC;
+
+	if (reg_dmn == CTRY_DEFAULT) {
+		return default_regdmn_ctl;
+	} else {
+		for (i = 0; i < ol_regdmn_rdt.regDomainsCt; i++) {
+			if (ol_regdmn_rdt.regDomains[i].regDmnEnum == reg_dmn)
+				return ol_regdmn_rdt.regDomains[i].
+				       conformance_test_limit;
+		}
+	}
+	return -1;
+}
+
+/*
+ * Get the 5G reg domain value for reg doamin
+ */
+uint16_t cds_get_regdmn_5g(uint32_t reg_dmn)
+{
+	uint16_t i;
+
+	for (i = 0; i < ol_regdmn_rdt.regDomainPairsCt; i++) {
+		if (ol_regdmn_rdt.regDomainPairs[i].regDmnEnum == reg_dmn) {
+			return ol_regdmn_rdt.regDomainPairs[i].regDmn5GHz;
+		}
+	}
+	cdf_print("%s: invalid regulatory domain/country code 0x%x\n",
+		  __func__, reg_dmn);
+	return 0;
+}
+
+/*
+ * Get operating class for a given channel
+ */
+uint16_t cds_regdm_get_opclass_from_channel(uint8_t *country, uint8_t channel,
+					    uint8_t offset)
+{
+	regdm_op_class_map_t *class = NULL;
+	uint16_t i = 0;
+
+	if (true == cdf_mem_compare(country, "US", 2)) {
+		class = us_op_class;
+	} else if (true == cdf_mem_compare(country, "EU", 2)) {
+		class = euro_op_class;
+	} else if (true == cdf_mem_compare(country, "JP", 2)) {
+		class = japan_op_class;
+	} else {
+		class = global_op_class;
+	}
+
+	while (class->op_class) {
+		if ((offset == class->offset) || (offset == BWALL)) {
+			for (i = 0;
+			     (i < MAX_CHANNELS_PER_OPERATING_CLASS &&
+			      class->channels[i]); i++) {
+				if (channel == class->channels[i])
+					return class->op_class;
+			}
+		}
+		class++;
+	}
+	return 0;
+}
+
+/*
+ * Set current operating classes per country, regdomain
+ */
+uint16_t cds_regdm_set_curr_opclasses(uint8_t num_classes, uint8_t *class)
+{
+	uint8_t i;
+
+	if (SIR_MAC_MAX_SUPP_OPER_CLASSES < num_classes) {
+		cdf_print(KERN_ERR "%s: Invalid numClasses (%d)\n",
+			  __func__, num_classes);
+		return -1;
+	}
+
+	for (i = 0; i < num_classes; i++) {
+		regdm_curr_supp_opp_classes.classes[i] = class[i];
+	}
+	regdm_curr_supp_opp_classes.num_classes = num_classes;
+
+	return 0;
+}
+
+/*
+ * Get current operating classes
+ */
+uint16_t cds_regdm_get_curr_opclasses(uint8_t *num_classes, uint8_t *class)
+{
+	uint8_t i;
+
+	if (!num_classes || !class) {
+		cdf_print(KERN_ERR "%s: Either num_classes or class is null\n",
+			  __func__);
+		return -1;
+	}
+
+	for (i = 0; i < regdm_curr_supp_opp_classes.num_classes; i++) {
+		class[i] = regdm_curr_supp_opp_classes.classes[i];
+	}
+
+	*num_classes = regdm_curr_supp_opp_classes.num_classes;
+
+	return 0;
+}

+ 1270 - 0
core/cds/src/cds_sched.c

@@ -0,0 +1,1270 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ *  File: cds_sched.c
+ *
+ *  DOC: CDS Scheduler Implementation
+ */
+
+ /* Include Files */
+#include <cds_mq.h>
+#include <cds_api.h>
+#include <ani_global.h>
+#include <sir_types.h>
+#include <cdf_types.h>
+#include <lim_api.h>
+#include <sme_api.h>
+#include <wlan_qct_sys.h>
+#include "cds_sched.h"
+#include <wlan_hdd_power.h>
+#include "wma_types.h"
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/cpu.h>
+#if defined(QCA_CONFIG_SMP) && defined(CONFIG_CNSS)
+#include <net/cnss.h>
+#endif
+/* Preprocessor Definitions and Constants */
+#define CDS_SCHED_THREAD_HEART_BEAT    INFINITE
+/* Milli seconds to delay SSR thread when an Entry point is Active */
+#define SSR_WAIT_SLEEP_TIME 200
+/* MAX iteration count to wait for Entry point to exit before
+ * we proceed with SSR in WD Thread
+ */
+#define MAX_SSR_WAIT_ITERATIONS 200
+#define MAX_SSR_PROTECT_LOG (16)
+
+static atomic_t ssr_protect_entry_count;
+
+/**
+ * struct ssr_protect - sub system restart(ssr) protection tracking table
+ * @func: Function which needs ssr protection
+ * @free: Flag to tell whether entry is free in table or not
+ * @pid: Process id which needs ssr protection
+ */
+struct ssr_protect {
+	const char *func;
+	bool  free;
+	uint32_t pid;
+};
+
+static spinlock_t ssr_protect_lock;
+static struct ssr_protect ssr_protect_log[MAX_SSR_PROTECT_LOG];
+
+static p_cds_sched_context gp_cds_sched_context;
+
+static int cds_mc_thread(void *Arg);
+#ifdef QCA_CONFIG_SMP
+static int cds_ol_rx_thread(void *arg);
+static unsigned long affine_cpu;
+static CDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
+#endif
+
+#ifdef QCA_CONFIG_SMP
+#define CDS_CORE_PER_CLUSTER (4)
+static int cds_set_cpus_allowed_ptr(struct task_struct *task, unsigned long cpu)
+{
+#ifdef WLAN_OPEN_SOURCE
+	return set_cpus_allowed_ptr(task, cpumask_of(cpu));
+#elif defined(CONFIG_CNSS)
+	return cnss_set_cpus_allowed_ptr(task, cpu);
+#else
+	return 0;
+#endif
+}
+
+/**
+ * cds_cpu_hotplug_notify() - hot plug notify
+ * @block: Pointer to block
+ * @state: State
+ * @hcpu: Pointer to hotplug cpu
+ *
+ * Return: NOTIFY_OK
+ */
+static int
+cds_cpu_hotplug_notify(struct notifier_block *block,
+		       unsigned long state, void *hcpu)
+{
+	unsigned long cpu = (unsigned long)hcpu;
+	unsigned long pref_cpu = 0;
+	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
+	int i;
+	unsigned int multi_cluster;
+	unsigned int num_cpus;
+
+	if ((NULL == pSchedContext) || (NULL == pSchedContext->ol_rx_thread))
+		return NOTIFY_OK;
+
+	if (cds_is_load_unload_in_progress())
+		return NOTIFY_OK;
+
+	num_cpus = num_possible_cpus();
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_LOW,
+		  "%s: RX CORE %d, STATE %d, NUM CPUS %d",
+		  __func__, (int)affine_cpu, (int)state, num_cpus);
+	multi_cluster = (num_cpus > CDS_CORE_PER_CLUSTER) ? 1 : 0;
+
+	switch (state) {
+	case CPU_ONLINE:
+		if ((!multi_cluster) && (affine_cpu != 0))
+			return NOTIFY_OK;
+
+		for_each_online_cpu(i) {
+			if (i == 0)
+				continue;
+			pref_cpu = i;
+			if (!multi_cluster)
+				break;
+		}
+		break;
+	case CPU_DEAD:
+		if (cpu != affine_cpu)
+			return NOTIFY_OK;
+
+		affine_cpu = 0;
+		for_each_online_cpu(i) {
+			if (i == 0)
+				continue;
+			pref_cpu = i;
+			if (!multi_cluster)
+				break;
+		}
+	}
+
+	if (pref_cpu == 0)
+		return NOTIFY_OK;
+
+	if (!cds_set_cpus_allowed_ptr(pSchedContext->ol_rx_thread, pref_cpu))
+		affine_cpu = pref_cpu;
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cds_cpu_hotplug_notifier = {
+	.notifier_call = cds_cpu_hotplug_notify,
+};
+#endif
+
+/**
+ * cds_sched_open() - initialize the CDS Scheduler
+ * @p_cds_context: Pointer to the global CDS Context
+ * @pSchedContext: Pointer to a previously allocated buffer big
+ *	enough to hold a scheduler context.
+ * @SchedCtxSize: CDS scheduler context size
+ *
+ * This function initializes the CDS Scheduler
+ * Upon successful initialization:
+ *	- All the message queues are initialized
+ *	- The Main Controller thread is created and ready to receive and
+ *	dispatch messages.
+ *
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_sched_open(void *p_cds_context,
+		p_cds_sched_context pSchedContext,
+		uint32_t SchedCtxSize)
+{
+	CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Opening the CDS Scheduler", __func__);
+	/* Sanity checks */
+	if ((p_cds_context == NULL) || (pSchedContext == NULL)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+	if (sizeof(cds_sched_context) != SchedCtxSize) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+			  "%s: Incorrect CDS Sched Context size passed",
+			  __func__);
+		return CDF_STATUS_E_INVAL;
+	}
+	cdf_mem_zero(pSchedContext, sizeof(cds_sched_context));
+	pSchedContext->pVContext = p_cds_context;
+	vStatus = cds_sched_init_mqs(pSchedContext);
+	if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to initialize CDS Scheduler MQs",
+			  __func__);
+		return vStatus;
+	}
+	/* Initialize the helper events and event queues */
+	init_completion(&pSchedContext->McStartEvent);
+	init_completion(&pSchedContext->McShutdown);
+	init_completion(&pSchedContext->ResumeMcEvent);
+
+	spin_lock_init(&pSchedContext->McThreadLock);
+#ifdef QCA_CONFIG_SMP
+	spin_lock_init(&pSchedContext->ol_rx_thread_lock);
+#endif
+
+	init_waitqueue_head(&pSchedContext->mcWaitQueue);
+	pSchedContext->mcEventFlag = 0;
+
+#ifdef QCA_CONFIG_SMP
+	init_waitqueue_head(&pSchedContext->ol_rx_wait_queue);
+	init_completion(&pSchedContext->ol_rx_start_event);
+	init_completion(&pSchedContext->ol_suspend_rx_event);
+	init_completion(&pSchedContext->ol_resume_rx_event);
+	init_completion(&pSchedContext->ol_rx_shutdown);
+	pSchedContext->ol_rx_event_flag = 0;
+	spin_lock_init(&pSchedContext->ol_rx_queue_lock);
+	spin_lock_init(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	INIT_LIST_HEAD(&pSchedContext->ol_rx_thread_queue);
+	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	INIT_LIST_HEAD(&pSchedContext->cds_ol_rx_pkt_freeq);
+	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	if (cds_alloc_ol_rx_pkt_freeq(pSchedContext) != CDF_STATUS_SUCCESS) {
+		return CDF_STATUS_E_FAILURE;
+	}
+	register_hotcpu_notifier(&cds_cpu_hotplug_notifier);
+	pSchedContext->cpu_hot_plug_notifier = &cds_cpu_hotplug_notifier;
+#endif
+	gp_cds_sched_context = pSchedContext;
+
+	/* Create the CDS Main Controller thread */
+	pSchedContext->McThread = kthread_create(cds_mc_thread, pSchedContext,
+						 "cds_mc_thread");
+	if (IS_ERR(pSchedContext->McThread)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Could not Create CDS Main Thread Controller",
+			  __func__);
+		goto MC_THREAD_START_FAILURE;
+	}
+	wake_up_process(pSchedContext->McThread);
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: CDS Main Controller thread Created", __func__);
+
+#ifdef QCA_CONFIG_SMP
+	pSchedContext->ol_rx_thread = kthread_create(cds_ol_rx_thread,
+						       pSchedContext,
+						       "cds_ol_rx_thread");
+	if (IS_ERR(pSchedContext->ol_rx_thread)) {
+
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+			  "%s: Could not Create CDS OL RX Thread",
+			  __func__);
+		goto OL_RX_THREAD_START_FAILURE;
+
+	}
+	wake_up_process(pSchedContext->ol_rx_thread);
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  ("CDS OL RX thread Created"));
+#endif
+	/*
+	 * Now make sure all threads have started before we exit.
+	 * Each thread should normally ACK back when it starts.
+	 */
+	wait_for_completion_interruptible(&pSchedContext->McStartEvent);
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: CDS MC Thread has started", __func__);
+#ifdef QCA_CONFIG_SMP
+	wait_for_completion_interruptible(&pSchedContext->ol_rx_start_event);
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: CDS OL Rx Thread has started", __func__);
+#endif
+	/* We're good now: Let's get the ball rolling!!! */
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: CDS Scheduler successfully Opened", __func__);
+	return CDF_STATUS_SUCCESS;
+
+#ifdef QCA_CONFIG_SMP
+OL_RX_THREAD_START_FAILURE:
+	/* Try and force the Main thread controller to exit */
+	set_bit(MC_SHUTDOWN_EVENT_MASK, &pSchedContext->mcEventFlag);
+	set_bit(MC_POST_EVENT_MASK, &pSchedContext->mcEventFlag);
+	wake_up_interruptible(&pSchedContext->mcWaitQueue);
+	/* Wait for MC to exit */
+	wait_for_completion_interruptible(&pSchedContext->McShutdown);
+#endif
+
+MC_THREAD_START_FAILURE:
+	/* De-initialize all the message queues */
+	cds_sched_deinit_mqs(pSchedContext);
+
+#ifdef QCA_CONFIG_SMP
+	unregister_hotcpu_notifier(&cds_cpu_hotplug_notifier);
+	cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
+#endif
+
+	return CDF_STATUS_E_RESOURCES;
+
+} /* cds_sched_open() */
+
+/**
+ * cds_mc_thread() - cds main controller thread execution handler
+ * @Arg: Pointer to the global CDS Sched Context
+ *
+ * Return: thread exit code
+ */
+static int cds_mc_thread(void *Arg)
+{
+	p_cds_sched_context pSchedContext = (p_cds_sched_context) Arg;
+	p_cds_msg_wrapper pMsgWrapper = NULL;
+	tpAniSirGlobal pMacContext = NULL;
+	tSirRetStatus macStatus = eSIR_SUCCESS;
+	CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
+	int retWaitStatus = 0;
+	bool shutdown = false;
+	hdd_context_t *pHddCtx = NULL;
+	v_CONTEXT_t p_cds_context = NULL;
+
+	if (Arg == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Bad Args passed", __func__);
+		return 0;
+	}
+	set_user_nice(current, -2);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	daemonize("MC_Thread");
+#endif
+
+	/* Ack back to the context from which the main controller thread
+	 * has been created
+	 */
+	complete(&pSchedContext->McStartEvent);
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: MC Thread %d (%s) starting up", __func__, current->pid,
+		  current->comm);
+
+	/* Get the Global CDS Context */
+	p_cds_context = cds_get_global_context();
+	if (!p_cds_context) {
+		hddLog(CDF_TRACE_LEVEL_FATAL, "%s: Global CDS context is Null",
+		       __func__);
+		return 0;
+	}
+
+	pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
+	if (!pHddCtx) {
+		hddLog(CDF_TRACE_LEVEL_FATAL, "%s: HDD context is Null",
+		       __func__);
+		return 0;
+	}
+
+	while (!shutdown) {
+		/* This implements the execution model algorithm */
+		retWaitStatus =
+			wait_event_interruptible(pSchedContext->mcWaitQueue,
+						 test_bit(MC_POST_EVENT_MASK,
+							  &pSchedContext->mcEventFlag)
+						 || test_bit(MC_SUSPEND_EVENT_MASK,
+							     &pSchedContext->mcEventFlag));
+
+		if (retWaitStatus == -ERESTARTSYS) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: wait_event_interruptible returned -ERESTARTSYS",
+				  __func__);
+			CDF_BUG(0);
+		}
+		clear_bit(MC_POST_EVENT_MASK, &pSchedContext->mcEventFlag);
+
+		while (1) {
+			/* Check if MC needs to shutdown */
+			if (test_bit
+				    (MC_SHUTDOWN_EVENT_MASK,
+				    &pSchedContext->mcEventFlag)) {
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_INFO,
+					  "%s: MC thread signaled to shutdown",
+					  __func__);
+				shutdown = true;
+				/* Check for any Suspend Indication */
+				if (test_bit
+					    (MC_SUSPEND_EVENT_MASK,
+					    &pSchedContext->mcEventFlag)) {
+					clear_bit(MC_SUSPEND_EVENT_MASK,
+						  &pSchedContext->mcEventFlag);
+
+					/* Unblock anyone waiting on suspend */
+					complete(&pHddCtx->mc_sus_event_var);
+				}
+				break;
+			}
+			/* Check the SYS queue first */
+			if (!cds_is_mq_empty(&pSchedContext->sysMcMq)) {
+				/* Service the SYS message queue */
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_INFO,
+					  "%s: Servicing the CDS SYS MC Message queue",
+					  __func__);
+				pMsgWrapper =
+					cds_mq_get(&pSchedContext->sysMcMq);
+				if (pMsgWrapper == NULL) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_ERROR,
+						  "%s: pMsgWrapper is NULL",
+						  __func__);
+					CDF_ASSERT(0);
+					break;
+				}
+				vStatus =
+					sys_mc_process_msg(pSchedContext->pVContext,
+							   pMsgWrapper->pVosMsg);
+				if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_ERROR,
+						  "%s: Issue Processing SYS message",
+						  __func__);
+				}
+				/* return message to the Core */
+				cds_core_return_msg(pSchedContext->pVContext,
+						    pMsgWrapper);
+				continue;
+			}
+			/* Check the WMA queue */
+			if (!cds_is_mq_empty(&pSchedContext->wmaMcMq)) {
+				/* Service the WMA message queue */
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_INFO,
+					  "%s: Servicing the CDS WMA MC Message queue",
+					  __func__);
+				pMsgWrapper =
+					cds_mq_get(&pSchedContext->wmaMcMq);
+				if (pMsgWrapper == NULL) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_ERROR,
+						  "%s: pMsgWrapper is NULL",
+						  __func__);
+					CDF_ASSERT(0);
+					break;
+				}
+				vStatus =
+					wma_mc_process_msg(pSchedContext->pVContext,
+							 pMsgWrapper->pVosMsg);
+				if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_ERROR,
+						  "%s: Issue Processing WMA message",
+						  __func__);
+				}
+				/* return message to the Core */
+				cds_core_return_msg(pSchedContext->pVContext,
+						    pMsgWrapper);
+				continue;
+			}
+			/* Check the PE queue */
+			if (!cds_is_mq_empty(&pSchedContext->peMcMq)) {
+				/* Service the PE message queue */
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_INFO,
+					  "%s: Servicing the CDS PE MC Message queue",
+					  __func__);
+				pMsgWrapper =
+					cds_mq_get(&pSchedContext->peMcMq);
+				if (NULL == pMsgWrapper) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_ERROR,
+						  "%s: pMsgWrapper is NULL",
+						  __func__);
+					CDF_ASSERT(0);
+					break;
+				}
+
+				/* Need some optimization */
+				pMacContext =
+					cds_get_context(CDF_MODULE_ID_PE);
+				if (NULL == pMacContext) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_INFO,
+						  "MAC Context not ready yet");
+					cds_core_return_msg
+						(pSchedContext->pVContext,
+						pMsgWrapper);
+					continue;
+				}
+
+				macStatus =
+					pe_process_messages(pMacContext,
+							    (tSirMsgQ *)
+							    pMsgWrapper->pVosMsg);
+				if (eSIR_SUCCESS != macStatus) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_ERROR,
+						  "%s: Issue Processing PE message",
+						  __func__);
+				}
+				/* return message to the Core */
+				cds_core_return_msg(pSchedContext->pVContext,
+						    pMsgWrapper);
+				continue;
+			}
+			/** Check the SME queue **/
+			if (!cds_is_mq_empty(&pSchedContext->smeMcMq)) {
+				/* Service the SME message queue */
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_INFO,
+					  "%s: Servicing the CDS SME MC Message queue",
+					  __func__);
+				pMsgWrapper =
+					cds_mq_get(&pSchedContext->smeMcMq);
+				if (NULL == pMsgWrapper) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_ERROR,
+						  "%s: pMsgWrapper is NULL",
+						  __func__);
+					CDF_ASSERT(0);
+					break;
+				}
+
+				/* Need some optimization */
+				pMacContext =
+					cds_get_context(CDF_MODULE_ID_SME);
+				if (NULL == pMacContext) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_INFO,
+						  "MAC Context not ready yet");
+					cds_core_return_msg
+						(pSchedContext->pVContext,
+						pMsgWrapper);
+					continue;
+				}
+
+				vStatus =
+					sme_process_msg((tHalHandle) pMacContext,
+							pMsgWrapper->pVosMsg);
+				if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
+					CDF_TRACE(CDF_MODULE_ID_CDF,
+						  CDF_TRACE_LEVEL_ERROR,
+						  "%s: Issue Processing SME message",
+						  __func__);
+				}
+				/* return message to the Core */
+				cds_core_return_msg(pSchedContext->pVContext,
+						    pMsgWrapper);
+				continue;
+			}
+			/* Check for any Suspend Indication */
+			if (test_bit
+				    (MC_SUSPEND_EVENT_MASK,
+				    &pSchedContext->mcEventFlag)) {
+				clear_bit(MC_SUSPEND_EVENT_MASK,
+					  &pSchedContext->mcEventFlag);
+				spin_lock(&pSchedContext->McThreadLock);
+
+				/* Mc Thread Suspended */
+				complete(&pHddCtx->mc_sus_event_var);
+
+				INIT_COMPLETION(pSchedContext->ResumeMcEvent);
+				spin_unlock(&pSchedContext->McThreadLock);
+
+				/* Wait foe Resume Indication */
+				wait_for_completion_interruptible
+					(&pSchedContext->ResumeMcEvent);
+			}
+			break;  /* All queues are empty now */
+		} /* while message loop processing */
+	} /* while true */
+	/* If we get here the MC thread must exit */
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: MC Thread exiting!!!!", __func__);
+	complete_and_exit(&pSchedContext->McShutdown, 0);
+} /* cds_mc_thread() */
+
+#ifdef QCA_CONFIG_SMP
+/**
+ * cds_free_ol_rx_pkt_freeq() - free cds buffer free queue
+ * @pSchedContext - pointer to the global CDS Sched Context
+ *
+ * This API does mem free of the buffers available in free cds buffer
+ * queue which is used for Data rx processing.
+ *
+ * Return: none
+ */
+void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
+{
+	struct cds_ol_rx_pkt *pkt, *tmp;
+
+	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	list_for_each_entry_safe(pkt, tmp, &pSchedContext->cds_ol_rx_pkt_freeq,
+				 list) {
+		list_del(&pkt->list);
+		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+		cdf_mem_free(pkt);
+		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	}
+	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+}
+
+/**
+ * cds_alloc_ol_rx_pkt_freeq() - Function to allocate free buffer queue
+ * @pSchedContext - pointer to the global CDS Sched Context
+ *
+ * This API allocates CDS_MAX_OL_RX_PKT number of cds message buffers
+ * which are used for Rx data processing.
+ *
+ * Return: status of memory allocation
+ */
+static CDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
+{
+	struct cds_ol_rx_pkt *pkt, *tmp;
+	int i;
+
+	for (i = 0; i < CDS_MAX_OL_RX_PKT; i++) {
+		pkt = cdf_mem_malloc(sizeof(*pkt));
+		if (!pkt) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s Vos packet allocation for ol rx thread failed",
+				  __func__);
+			goto free;
+		}
+		memset(pkt, 0, sizeof(*pkt));
+		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+		list_add_tail(&pkt->list, &pSchedContext->cds_ol_rx_pkt_freeq);
+		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	}
+
+	return CDF_STATUS_SUCCESS;
+
+free:
+	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	list_for_each_entry_safe(pkt, tmp, &pSchedContext->cds_ol_rx_pkt_freeq,
+				 list) {
+		list_del(&pkt->list);
+		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+		cdf_mem_free(pkt);
+		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	}
+	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	return CDF_STATUS_E_NOMEM;
+}
+
+/**
+ * cds_free_ol_rx_pkt() - api to release cds message to the freeq
+ * This api returns the cds message used for Rx data to the free queue
+ * @pSchedContext: Pointer to the global CDS Sched Context
+ * @pkt: CDS message buffer to be returned to free queue.
+ *
+ * Return: none
+ */
+void
+cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
+		    struct cds_ol_rx_pkt *pkt)
+{
+	memset(pkt, 0, sizeof(*pkt));
+	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	list_add_tail(&pkt->list, &pSchedContext->cds_ol_rx_pkt_freeq);
+	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+}
+
+/**
+ * cds_alloc_ol_rx_pkt() - API to return next available cds message
+ * @pSchedContext: Pointer to the global CDS Sched Context
+ *
+ * This api returns next available cds message buffer used for rx data
+ * processing
+ *
+ * Return: Pointer to cds message buffer
+ */
+struct cds_ol_rx_pkt *cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext)
+{
+	struct cds_ol_rx_pkt *pkt;
+
+	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	if (list_empty(&pSchedContext->cds_ol_rx_pkt_freeq)) {
+		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+		return NULL;
+	}
+	pkt = list_first_entry(&pSchedContext->cds_ol_rx_pkt_freeq,
+			       struct cds_ol_rx_pkt, list);
+	list_del(&pkt->list);
+	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
+	return pkt;
+}
+
+/**
+ * cds_indicate_rxpkt() - indicate rx data packet
+ * @Arg: Pointer to the global CDS Sched Context
+ * @pkt: CDS data message buffer
+ *
+ * This api enqueues the rx packet into ol_rx_thread_queue and notifies
+ * cds_ol_rx_thread()
+ *
+ * Return: none
+ */
+void
+cds_indicate_rxpkt(p_cds_sched_context pSchedContext,
+		   struct cds_ol_rx_pkt *pkt)
+{
+	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
+	list_add_tail(&pkt->list, &pSchedContext->ol_rx_thread_queue);
+	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
+	set_bit(RX_POST_EVENT_MASK, &pSchedContext->ol_rx_event_flag);
+	wake_up_interruptible(&pSchedContext->ol_rx_wait_queue);
+}
+
+/**
+ * cds_drop_rxpkt_by_staid() - api to drop pending rx packets for a sta
+ * @pSchedContext: Pointer to the global CDS Sched Context
+ * @staId: Station Id
+ *
+ * This api drops queued packets for a station, to drop all the pending
+ * packets the caller has to send WLAN_MAX_STA_COUNT as staId.
+ *
+ * Return: none
+ */
+void cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext, uint16_t staId)
+{
+	struct list_head local_list;
+	struct cds_ol_rx_pkt *pkt, *tmp;
+	cdf_nbuf_t buf, next_buf;
+
+	INIT_LIST_HEAD(&local_list);
+	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
+	if (list_empty(&pSchedContext->ol_rx_thread_queue)) {
+		spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
+		return;
+	}
+	list_for_each_entry_safe(pkt, tmp, &pSchedContext->ol_rx_thread_queue,
+								list) {
+		if (pkt->staId == staId || staId == WLAN_MAX_STA_COUNT)
+			list_move_tail(&pkt->list, &local_list);
+	}
+	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
+
+	list_for_each_entry(pkt, &local_list, list) {
+		list_del(&pkt->list);
+		buf = pkt->Rxpkt;
+		while (buf) {
+			next_buf = cdf_nbuf_queue_next(buf);
+			cdf_nbuf_free(buf);
+			buf = next_buf;
+		}
+		cds_free_ol_rx_pkt(pSchedContext, pkt);
+	}
+}
+
+/**
+ * cds_rx_from_queue() - function to process pending Rx packets
+ * @pSchedContext: Pointer to the global CDS Sched Context
+ *
+ * This api traverses the pending buffer list and calling the callback.
+ * This callback would essentially send the packet to HDD.
+ *
+ * Return: none
+ */
+static void cds_rx_from_queue(p_cds_sched_context pSchedContext)
+{
+	struct cds_ol_rx_pkt *pkt;
+	uint16_t sta_id;
+
+	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
+	while (!list_empty(&pSchedContext->ol_rx_thread_queue)) {
+		pkt = list_first_entry(&pSchedContext->ol_rx_thread_queue,
+				       struct cds_ol_rx_pkt, list);
+		list_del(&pkt->list);
+		spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
+		sta_id = pkt->staId;
+		pkt->callback(pkt->context, pkt->Rxpkt, sta_id);
+		cds_free_ol_rx_pkt(pSchedContext, pkt);
+		spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
+	}
+	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
+}
+
+/**
+ * cds_ol_rx_thread() - cds main tlshim rx thread
+ * @Arg: pointer to the global CDS Sched Context
+ *
+ * This api is the thread handler for Tlshim Data packet processing.
+ *
+ * Return: thread exit code
+ */
+static int cds_ol_rx_thread(void *arg)
+{
+	p_cds_sched_context pSchedContext = (p_cds_sched_context) arg;
+	unsigned long pref_cpu = 0;
+	bool shutdown = false;
+	int status, i;
+	unsigned int num_cpus;
+
+	set_user_nice(current, -1);
+#ifdef MSM_PLATFORM
+	set_wake_up_idle(true);
+#endif
+
+	num_cpus = num_possible_cpus();
+	/* Find the available cpu core other than cpu 0 and
+	 * bind the thread
+	 */
+	for_each_online_cpu(i) {
+		if (i == 0)
+			continue;
+		pref_cpu = i;
+		if (num_cpus <= CDS_CORE_PER_CLUSTER)
+			break;
+	}
+	if (pref_cpu != 0 && (!cds_set_cpus_allowed_ptr(current, pref_cpu)))
+		affine_cpu = pref_cpu;
+
+	if (!arg) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Bad Args passed", __func__);
+		return 0;
+	}
+
+	complete(&pSchedContext->ol_rx_start_event);
+
+	while (!shutdown) {
+		status =
+			wait_event_interruptible(pSchedContext->ol_rx_wait_queue,
+						 test_bit(RX_POST_EVENT_MASK,
+							  &pSchedContext->ol_rx_event_flag)
+						 || test_bit(RX_SUSPEND_EVENT_MASK,
+							     &pSchedContext->ol_rx_event_flag));
+		if (status == -ERESTARTSYS)
+			break;
+
+		clear_bit(RX_POST_EVENT_MASK, &pSchedContext->ol_rx_event_flag);
+		while (true) {
+			if (test_bit(RX_SHUTDOWN_EVENT_MASK,
+				     &pSchedContext->ol_rx_event_flag)) {
+				clear_bit(RX_SHUTDOWN_EVENT_MASK,
+					  &pSchedContext->ol_rx_event_flag);
+				if (test_bit(RX_SUSPEND_EVENT_MASK,
+					     &pSchedContext->ol_rx_event_flag)) {
+					clear_bit(RX_SUSPEND_EVENT_MASK,
+						  &pSchedContext->ol_rx_event_flag);
+					complete
+						(&pSchedContext->ol_suspend_rx_event);
+				}
+				CDF_TRACE(CDF_MODULE_ID_CDF,
+					  CDF_TRACE_LEVEL_INFO,
+					  "%s: Shutting down OL RX Thread",
+					  __func__);
+				shutdown = true;
+				break;
+			}
+			cds_rx_from_queue(pSchedContext);
+
+			if (test_bit(RX_SUSPEND_EVENT_MASK,
+				     &pSchedContext->ol_rx_event_flag)) {
+				clear_bit(RX_SUSPEND_EVENT_MASK,
+					  &pSchedContext->ol_rx_event_flag);
+				spin_lock(&pSchedContext->ol_rx_thread_lock);
+				complete(&pSchedContext->ol_suspend_rx_event);
+				INIT_COMPLETION
+					(pSchedContext->ol_resume_rx_event);
+				spin_unlock(&pSchedContext->ol_rx_thread_lock);
+				wait_for_completion_interruptible
+					(&pSchedContext->ol_resume_rx_event);
+			}
+			break;
+		}
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+		  "%s: Exiting CDS OL rx thread", __func__);
+	complete_and_exit(&pSchedContext->ol_rx_shutdown, 0);
+}
+#endif
+
+/**
+ * cds_sched_close() - close the cds scheduler
+ * @p_cds_context: Pointer to the global CDS Context
+ *
+ * This api closes the CDS Scheduler upon successful closing:
+ *	- All the message queues are flushed
+ *	- The Main Controller thread is closed
+ *	- The Tx thread is closed
+ *
+ *
+ * Return: cdf status
+ */
+CDF_STATUS cds_sched_close(void *p_cds_context)
+{
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: invoked", __func__);
+	if (gp_cds_sched_context == NULL) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: gp_cds_sched_context == NULL", __func__);
+		return CDF_STATUS_E_FAILURE;
+	}
+	/* shut down MC Thread */
+	set_bit(MC_SHUTDOWN_EVENT_MASK, &gp_cds_sched_context->mcEventFlag);
+	set_bit(MC_POST_EVENT_MASK, &gp_cds_sched_context->mcEventFlag);
+	wake_up_interruptible(&gp_cds_sched_context->mcWaitQueue);
+	/* Wait for MC to exit */
+	wait_for_completion(&gp_cds_sched_context->McShutdown);
+	gp_cds_sched_context->McThread = 0;
+
+	/* Clean up message queues of MC thread */
+	cds_sched_flush_mc_mqs(gp_cds_sched_context);
+
+	/* Deinit all the queues */
+	cds_sched_deinit_mqs(gp_cds_sched_context);
+
+#ifdef QCA_CONFIG_SMP
+	/* Shut down Tlshim Rx thread */
+	set_bit(RX_SHUTDOWN_EVENT_MASK, &gp_cds_sched_context->ol_rx_event_flag);
+	set_bit(RX_POST_EVENT_MASK, &gp_cds_sched_context->ol_rx_event_flag);
+	wake_up_interruptible(&gp_cds_sched_context->ol_rx_wait_queue);
+	wait_for_completion_interruptible
+		(&gp_cds_sched_context->ol_rx_shutdown);
+	gp_cds_sched_context->ol_rx_thread = NULL;
+	cds_drop_rxpkt_by_staid(gp_cds_sched_context, WLAN_MAX_STA_COUNT);
+	cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
+	unregister_hotcpu_notifier(&cds_cpu_hotplug_notifier);
+#endif
+	return CDF_STATUS_SUCCESS;
+} /* cds_sched_close() */
+
+/**
+ * cds_sched_init_mqs() - initialize the cds scheduler message queues
+ * @p_cds_sched_context: Pointer to the Scheduler Context.
+ *
+ * This api initializes the cds scheduler message queues.
+ *
+ * Return: CDF status
+ */
+CDF_STATUS cds_sched_init_mqs(p_cds_sched_context pSchedContext)
+{
+	CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
+	/* Now intialize all the message queues */
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Initializing the WMA MC Message queue", __func__);
+	vStatus = cds_mq_init(&pSchedContext->wmaMcMq);
+	if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to init WMA MC Message queue", __func__);
+		CDF_ASSERT(0);
+		return vStatus;
+	}
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Initializing the PE MC Message queue", __func__);
+	vStatus = cds_mq_init(&pSchedContext->peMcMq);
+	if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to init PE MC Message queue", __func__);
+		CDF_ASSERT(0);
+		return vStatus;
+	}
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Initializing the SME MC Message queue", __func__);
+	vStatus = cds_mq_init(&pSchedContext->smeMcMq);
+	if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to init SME MC Message queue", __func__);
+		CDF_ASSERT(0);
+		return vStatus;
+	}
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Initializing the SYS MC Message queue", __func__);
+	vStatus = cds_mq_init(&pSchedContext->sysMcMq);
+	if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Failed to init SYS MC Message queue", __func__);
+		CDF_ASSERT(0);
+		return vStatus;
+	}
+
+	return CDF_STATUS_SUCCESS;
+} /* cds_sched_init_mqs() */
+
+/**
+ * cds_sched_deinit_mqs() - Deinitialize the cds scheduler message queues
+ * @p_cds_sched_context: Pointer to the Scheduler Context.
+ *
+ * Return: none
+ */
+void cds_sched_deinit_mqs(p_cds_sched_context pSchedContext)
+{
+	/* Now de-intialize all message queues */
+
+	/* MC WMA */
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s De-Initializing the WMA MC Message queue", __func__);
+	cds_mq_deinit(&pSchedContext->wmaMcMq);
+	/* MC PE */
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s De-Initializing the PE MC Message queue", __func__);
+	cds_mq_deinit(&pSchedContext->peMcMq);
+	/* MC SME */
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s De-Initializing the SME MC Message queue", __func__);
+	cds_mq_deinit(&pSchedContext->smeMcMq);
+	/* MC SYS */
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s De-Initializing the SYS MC Message queue", __func__);
+	cds_mq_deinit(&pSchedContext->sysMcMq);
+
+} /* cds_sched_deinit_mqs() */
+
+/**
+ * cds_sched_flush_mc_mqs() - flush all the MC thread message queues
+ * @pSchedContext: Pointer to global cds context
+ *
+ * Return: none
+ */
+void cds_sched_flush_mc_mqs(p_cds_sched_context pSchedContext)
+{
+	p_cds_msg_wrapper pMsgWrapper = NULL;
+	p_cds_contextType cds_ctx;
+
+	/* Here each of the MC thread MQ shall be drained and returned to the
+	 * Core. Before returning a wrapper to the Core, the CDS message shall
+	 * be freed first
+	 */
+	CDF_TRACE(CDF_MODULE_ID_CDF,
+		  CDF_TRACE_LEVEL_INFO,
+		  ("Flushing the MC Thread message queue"));
+
+	if (NULL == pSchedContext) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: pSchedContext is NULL", __func__);
+		return;
+	}
+
+	cds_ctx = (p_cds_contextType) (pSchedContext->pVContext);
+	if (NULL == cds_ctx) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: cds_ctx is NULL", __func__);
+		return;
+	}
+
+	/* Flush the SYS Mq */
+	while (NULL != (pMsgWrapper = cds_mq_get(&pSchedContext->sysMcMq))) {
+		CDF_TRACE(CDF_MODULE_ID_CDF,
+			  CDF_TRACE_LEVEL_INFO,
+			  "%s: Freeing MC SYS message type %d ", __func__,
+			  pMsgWrapper->pVosMsg->type);
+		cds_core_return_msg(pSchedContext->pVContext, pMsgWrapper);
+	}
+	/* Flush the WMA Mq */
+	while (NULL != (pMsgWrapper = cds_mq_get(&pSchedContext->wmaMcMq))) {
+		if (pMsgWrapper->pVosMsg != NULL) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
+				  "%s: Freeing MC WMA MSG message type %d",
+				  __func__, pMsgWrapper->pVosMsg->type);
+			if (pMsgWrapper->pVosMsg->bodyptr) {
+				cdf_mem_free((void *)pMsgWrapper->
+					     pVosMsg->bodyptr);
+			}
+
+			pMsgWrapper->pVosMsg->bodyptr = NULL;
+			pMsgWrapper->pVosMsg->bodyval = 0;
+			pMsgWrapper->pVosMsg->type = 0;
+		}
+		cds_core_return_msg(pSchedContext->pVContext, pMsgWrapper);
+	}
+
+	/* Flush the PE Mq */
+	while (NULL != (pMsgWrapper = cds_mq_get(&pSchedContext->peMcMq))) {
+		CDF_TRACE(CDF_MODULE_ID_CDF,
+			  CDF_TRACE_LEVEL_INFO,
+			  "%s: Freeing MC PE MSG message type %d", __func__,
+			  pMsgWrapper->pVosMsg->type);
+		pe_free_msg(cds_ctx->pMACContext,
+			    (tSirMsgQ *) pMsgWrapper->pVosMsg);
+		cds_core_return_msg(pSchedContext->pVContext, pMsgWrapper);
+	}
+	/* Flush the SME Mq */
+	while (NULL != (pMsgWrapper = cds_mq_get(&pSchedContext->smeMcMq))) {
+		CDF_TRACE(CDF_MODULE_ID_CDF,
+			  CDF_TRACE_LEVEL_INFO,
+			  "%s: Freeing MC SME MSG message type %d", __func__,
+			  pMsgWrapper->pVosMsg->type);
+		sme_free_msg(cds_ctx->pMACContext, pMsgWrapper->pVosMsg);
+		cds_core_return_msg(pSchedContext->pVContext, pMsgWrapper);
+	}
+} /* cds_sched_flush_mc_mqs() */
+
+/**
+ * get_cds_sched_ctxt() - get cds scheduler context
+ *
+ * Return: none
+ */
+p_cds_sched_context get_cds_sched_ctxt(void)
+{
+	/* Make sure that Vos Scheduler context has been initialized */
+	if (gp_cds_sched_context == NULL)
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: gp_cds_sched_context == NULL", __func__);
+
+	return gp_cds_sched_context;
+}
+
+/**
+ * cds_ssr_protect_init() - initialize ssr protection debug functionality
+ *
+ * Return:
+ *        void
+ */
+void cds_ssr_protect_init(void)
+{
+	int i = 0;
+
+	spin_lock_init(&ssr_protect_lock);
+
+	while (i < MAX_SSR_PROTECT_LOG) {
+		ssr_protect_log[i].func = NULL;
+		ssr_protect_log[i].free = true;
+		ssr_protect_log[i].pid =  0;
+		i++;
+	}
+}
+
+/**
+ * cds_print_external_threads() - print external threads stuck in driver
+ *
+ * Return:
+ *        void
+ */
+
+static void cds_print_external_threads(void)
+{
+	int i = 0;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
+
+	while (i < MAX_SSR_PROTECT_LOG) {
+		if (!ssr_protect_log[i].free) {
+			CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+			"PID %d is stuck at %s", ssr_protect_log[i].pid,
+			ssr_protect_log[i].func);
+		}
+		i++;
+	}
+
+	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
+}
+
+/**
+ * cds_ssr_protect() - start ssr protection
+ * @caller_func: name of calling function.
+ *
+ * This function is called to keep track of active driver entry points
+ *
+ * Return: none
+ */
+void cds_ssr_protect(const char *caller_func)
+{
+	int count;
+	int i = 0;
+	bool status = false;
+	unsigned long irq_flags;
+
+	count = atomic_inc_return(&ssr_protect_entry_count);
+
+	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
+
+	while (i < MAX_SSR_PROTECT_LOG) {
+		if (ssr_protect_log[i].free) {
+			ssr_protect_log[i].func = caller_func;
+			ssr_protect_log[i].free = false;
+			ssr_protect_log[i].pid = current->pid;
+			status = true;
+			break;
+		}
+		i++;
+	}
+
+	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
+
+	if (!status)
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+		"Could not track PID %d call %s: log is full",
+		current->pid, caller_func);
+}
+
+/**
+ * cds_ssr_unprotect() - stop ssr protection
+ * @caller_func: name of calling function.
+ *
+ * Return: none
+ */
+void cds_ssr_unprotect(const char *caller_func)
+{
+	int count;
+	int i = 0;
+	bool status = false;
+	unsigned long irq_flags;
+
+	count = atomic_dec_return(&ssr_protect_entry_count);
+
+	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
+
+	while (i < MAX_SSR_PROTECT_LOG) {
+		if (!ssr_protect_log[i].free) {
+			if ((ssr_protect_log[i].pid == current->pid) &&
+			     !strcmp(ssr_protect_log[i].func, caller_func)) {
+				ssr_protect_log[i].func = NULL;
+				ssr_protect_log[i].free = true;
+				ssr_protect_log[i].pid =  0;
+				status = true;
+				break;
+			}
+		}
+		i++;
+	}
+
+	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
+
+	if (!status)
+		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+			"Untracked call %s", caller_func);
+}
+
+/**
+ * cds_is_ssr_ready() - check if the calling execution can proceed with ssr
+ *
+ * @caller_func: name of calling function.
+ *
+ * Return: true if there is no active entry points in driver
+ *	   false if there is at least one active entry in driver
+ */
+bool cds_is_ssr_ready(const char *caller_func)
+{
+	int count = MAX_SSR_WAIT_ITERATIONS;
+
+	while (count) {
+
+		if (!atomic_read(&ssr_protect_entry_count))
+			break;
+
+		if (--count) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "%s: Waiting for active entry points to exit",
+				  __func__);
+			msleep(SSR_WAIT_SLEEP_TIME);
+		}
+	}
+	/* at least one external thread is executing */
+	if (!count) {
+		cds_print_external_threads();
+		return false;
+	}
+
+	CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO,
+		  "Allowing SSR for %s", caller_func);
+
+	return true;
+}

+ 1135 - 0
core/cds/src/cds_utils.c

@@ -0,0 +1,1135 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*============================================================================
+   FILE:         cds_utils.c
+
+   OVERVIEW:     This source file contains definitions for CDS crypto APIs
+                The four APIs mentioned in this file are used for
+                initializing, and de-initializing a crypto context, and
+                obtaining truly random data (for keys), as well as
+                SHA1 HMAC, and AES encrypt and decrypt routines.
+
+                The routines include:
+                cds_crypto_init() - Initializes Crypto module
+                cds_crypto_deinit() - De-initializes Crypto module
+                cds_rand_get_bytes() - Generates random byte
+                cds_sha1_hmac_str() - Generate the HMAC-SHA1 of a string given a key
+                cds_encrypt_aes() - Generate AES Encrypted byte stream
+                cds_decrypt_aes() - Decrypts an AES Encrypted byte stream
+
+   DEPENDENCIES:
+   ============================================================================*/
+
+/*----------------------------------------------------------------------------
+ * Include Files
+ * -------------------------------------------------------------------------*/
+
+#include "cdf_trace.h"
+#include "cds_utils.h"
+#include "cdf_memory.h"
+#include "cds_crypto.h"
+
+#include <linux/err.h>
+#include <linux/random.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/completion.h>
+#include <linux/ieee80211.h>
+#include <crypto/hash.h>
+#include <crypto/aes.h>
+
+#include "cds_ieee80211_common.h"
+/*----------------------------------------------------------------------------
+ * Preprocessor Definitions and Constants
+ * -------------------------------------------------------------------------*/
+#define AAD_LEN 20
+#define IV_SIZE_AES_128 16
+#define CMAC_IPN_LEN 6
+#define CMAC_TLEN 8             /* CMAC TLen = 64 bits (8 octets) */
+
+/*----------------------------------------------------------------------------
+ * Type Declarations
+ * -------------------------------------------------------------------------*/
+/*----------------------------------------------------------------------------
+ * Global Data Definitions
+ * -------------------------------------------------------------------------*/
+/*----------------------------------------------------------------------------
+ * Static Variable Definitions
+ * -------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------
+   Function Definitions and Documentation
+ * -------------------------------------------------------------------------*/
+#ifdef CONFIG_ICNSS
+#ifdef WLAN_FEATURE_11W
+static inline void xor_128(const u8 *a, const u8 *b, u8 *out)
+{
+	u8 i;
+
+	for (i = 0; i < AES_BLOCK_SIZE; i++)
+		out[i] = a[i] ^ b[i];
+}
+
+static inline void leftshift_onebit(const u8 *input, u8 *output)
+{
+	int i, overflow = 0;
+
+	for (i = (AES_BLOCK_SIZE - 1); i >= 0; i--) {
+		output[i] = input[i] << 1;
+		output[i] |= overflow;
+		overflow = (input[i] & 0x80) ? 1 : 0;
+	}
+	return;
+}
+
+static void generate_subkey(struct crypto_cipher *tfm, u8 *k1, u8 *k2)
+{
+	u8 l[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE];
+	u8 const_rb[AES_BLOCK_SIZE] = {
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87
+	};
+	u8 const_zero[AES_BLOCK_SIZE] = {
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+	};
+
+	crypto_cipher_encrypt_one(tfm, l, const_zero);
+
+	if ((l[0] & 0x80) == 0) {       /* If MSB(l) = 0, then k1 = l << 1 */
+		leftshift_onebit(l, k1);
+	} else {                /* Else k1 = ( l << 1 ) (+) Rb */
+		leftshift_onebit(l, tmp);
+		xor_128(tmp, const_rb, k1);
+	}
+
+	if ((k1[0] & 0x80) == 0) {
+		leftshift_onebit(k1, k2);
+	} else {
+		leftshift_onebit(k1, tmp);
+		xor_128(tmp, const_rb, k2);
+	}
+}
+
+static inline void padding(u8 *lastb, u8 *pad, u16 length)
+{
+	u8 j;
+
+	/* original last block */
+	for (j = 0; j < AES_BLOCK_SIZE; j++) {
+		if (j < length)
+			pad[j] = lastb[j];
+		else if (j == length)
+			pad[j] = 0x80;
+		else
+			pad[j] = 0x00;
+	}
+}
+
+static void cds_cmac_calc_mic(struct crypto_cipher *tfm,
+		u8 *m, u16 length, u8 *mac)
+{
+	u8 x[AES_BLOCK_SIZE], y[AES_BLOCK_SIZE];
+	u8 m_last[AES_BLOCK_SIZE], padded[AES_BLOCK_SIZE];
+	u8 k1[AES_KEYSIZE_128], k2[AES_KEYSIZE_128];
+	int cmpBlk;
+	int i, nBlocks = (length + 15) / AES_BLOCK_SIZE;
+
+	generate_subkey(tfm, k1, k2);
+
+	if (nBlocks == 0) {
+		nBlocks = 1;
+		cmpBlk = 0;
+	} else {
+		cmpBlk = ((length % AES_BLOCK_SIZE) == 0) ? 1 : 0;
+	}
+
+	if (cmpBlk) {           /* Last block is complete block */
+		xor_128(&m[AES_BLOCK_SIZE * (nBlocks - 1)], k1, m_last);
+	} else {                /* Last block is not complete block */
+		padding(&m[AES_BLOCK_SIZE * (nBlocks - 1)], padded,
+			length % AES_BLOCK_SIZE);
+		xor_128(padded, k2, m_last);
+	}
+
+	for (i = 0; i < AES_BLOCK_SIZE; i++)
+		x[i] = 0;
+
+	for (i = 0; i < (nBlocks - 1); i++) {
+		xor_128(x, &m[AES_BLOCK_SIZE * i], y);  /* y = Mi (+) x */
+		crypto_cipher_encrypt_one(tfm, x, y);   /* x = AES-128(KEY, y) */
+	}
+
+	xor_128(x, m_last, y);
+	crypto_cipher_encrypt_one(tfm, x, y);
+
+	memcpy(mac, x, CMAC_TLEN);
+}
+#endif
+#endif
+
+/*--------------------------------------------------------------------------
+
+   \brief cds_crypto_init() - Initializes Crypto module
+
+   The cds_crypto_init() function initializes Crypto module.
+
+   \param phCryptProv - pointer to the Crypt handle
+
+   \return CDF_STATUS_SUCCESS - Successfully generated random memory.
+
+          CDF_STATUS_E_FAULT  - pbBuf is an invalid pointer.
+
+          CDF_STATUS_E_FAILURE - default return value if it fails due to
+          unknown reasons
+
+  ***CDF_STATUS_E_RESOURCES - System resources (other than memory)
+          are unavailable
+   \sa
+
+    ( *** return value not considered yet )
+   --------------------------------------------------------------------------*/
+CDF_STATUS cds_crypto_init(uint32_t *phCryptProv)
+{
+	CDF_STATUS uResult = CDF_STATUS_E_FAILURE;
+
+	/* This implementation doesn't require a crypto context */
+	*phCryptProv = 0;
+	uResult = CDF_STATUS_SUCCESS;
+	return (uResult);
+}
+
+CDF_STATUS cds_crypto_deinit(uint32_t hCryptProv)
+{
+	CDF_STATUS uResult = CDF_STATUS_E_FAILURE;
+
+	/* CryptReleaseContext succeeded */
+	uResult = CDF_STATUS_SUCCESS;
+
+	return (uResult);
+}
+
+/*--------------------------------------------------------------------------
+
+   \brief cds_rand_get_bytes() - Generates random byte
+
+   The cds_rand_get_bytes() function generate random bytes.
+
+   Buffer should be allocated before calling cds_rand_get_bytes().
+
+   Attempting to initialize an already initialized lock results in
+   a failure.
+
+   \param lock - pointer to the opaque lock object to initialize
+
+   \return CDF_STATUS_SUCCESS - Successfully generated random memory.
+
+          CDF_STATUS_E_FAULT  - pbBuf is an invalid pointer.
+
+          CDF_STATUS_E_FAILURE - default return value if it fails due to
+          unknown reasons
+
+  ***CDF_STATUS_E_RESOURCES - System resources (other than memory)
+          are unavailable
+   \sa
+
+    ( *** return value not considered yet )
+   --------------------------------------------------------------------------*/
+CDF_STATUS
+cds_rand_get_bytes(uint32_t cryptHandle, uint8_t *pbBuf, uint32_t numBytes)
+{
+	CDF_STATUS uResult = CDF_STATUS_E_FAILURE;
+
+	/* check for invalid pointer */
+	if (NULL == pbBuf) {
+		uResult = CDF_STATUS_E_FAULT;
+		return (uResult);
+	}
+
+	get_random_bytes(pbBuf, numBytes);
+	/* "Random sequence generated." */
+	uResult = CDF_STATUS_SUCCESS;
+	return (uResult);
+}
+
+#ifdef WLAN_FEATURE_11W
+uint8_t cds_get_mmie_size()
+{
+	return sizeof(struct ieee80211_mmie);
+}
+
+/*--------------------------------------------------------------------------
+
+   \brief cds_increase_seq() - Increase the IPN aka Sequence number by one unit
+
+   The cds_increase_seq() function increases the IPN by one unit.
+
+   \param ipn - pointer to the IPN aka Sequence number [6 bytes]
+
+   --------------------------------------------------------------------------*/
+static void cds_increase_seq(uint8_t *ipn)
+{
+	uint64_t value = 0;
+	if (ipn) {
+		value = (0xffffffffffff) & (*((uint64_t *) ipn));
+		value = value + 1;
+		cdf_mem_copy(ipn, &value, IEEE80211_MMIE_IPNLEN);
+	}
+}
+
+/*--------------------------------------------------------------------------
+
+   \brief cds_attach_mmie() - attches the complete MMIE at the end of frame
+
+   The cds_attach_mmie() calculates the entire MMIE and attaches at the end
+   of Broadcast/Multicast robust management frames.
+
+   \param igtk - pointer  group key which will be used to calculate
+                the 8 byte MIC.
+   \param ipn - pointer ipn, it is also known as sequence number
+   \param key_id - key identication number
+   \param frm - pointer to the start of the frame.
+   \param efrm - pointer to the end of the frame.
+   \param frmLen - size of the entire frame.
+
+   \return - this function will return true on success and false on
+            failure.
+
+   --------------------------------------------------------------------------*/
+
+bool
+cds_attach_mmie(uint8_t *igtk, uint8_t *ipn, uint16_t key_id,
+		uint8_t *frm, uint8_t *efrm, uint16_t frmLen)
+{
+	struct ieee80211_mmie *mmie;
+	struct ieee80211_frame *wh;
+	uint8_t aad[AAD_LEN], mic[CMAC_TLEN], *input = NULL;
+	uint8_t previous_ipn[IEEE80211_MMIE_IPNLEN] = { 0 };
+	uint16_t nBytes = 0;
+	int ret = 0;
+	struct crypto_cipher *tfm;
+
+	/*  This is how received frame look like
+	 *
+	 *        <------------frmLen---------------------------->
+	 *
+	 *        +---------------+----------------------+-------+
+	 *        | 802.11 HEADER | Management framebody | MMIE  |
+	 *        +---------------+----------------------+-------+
+	 *                                                       ^
+	 *                                                       |
+	 *                                                      efrm
+	 *   This is how MMIE from above frame look like
+	 *
+	 *
+	 *        <------------ 18 Bytes----------------------------->
+	 *        +--------+---------+---------+-----------+---------+
+	 *        |Element | Length  | Key id  |   IPN     |  MIC    |
+	 *        |  id    |         |         |           |         |
+	 *        +--------+---------+---------+-----------+---------+
+	 * Octet     1         1         2         6            8
+	 *
+	 */
+
+	/* Check if frame is invalid length */
+	if (((efrm - frm) != frmLen) || (frmLen < sizeof(*wh))) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Invalid frame length", __func__);
+		return false;
+	}
+	mmie = (struct ieee80211_mmie *)(efrm - sizeof(*mmie));
+
+	/* Copy Element id */
+	mmie->element_id = IEEE80211_ELEMID_MMIE;
+
+	/* Copy Length */
+	mmie->length = sizeof(*mmie) - 2;
+
+	/* Copy Key id */
+	mmie->key_id = key_id;
+
+	/*
+	 * In case of error, revert back to original IPN
+	 * to do that copy the original IPN into previous_ipn
+	 */
+	cdf_mem_copy(&previous_ipn[0], ipn, IEEE80211_MMIE_IPNLEN);
+	cds_increase_seq(ipn);
+	cdf_mem_copy(mmie->sequence_number, ipn, IEEE80211_MMIE_IPNLEN);
+
+	/*
+	 * Calculate MIC and then copy
+	 */
+	tfm = cds_crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		ret = PTR_ERR(tfm);
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: crypto_alloc_cipher failed (%d)", __func__, ret);
+		goto err_tfm;
+	}
+
+	ret = crypto_cipher_setkey(tfm, igtk, AES_KEYSIZE_128);
+	if (ret) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: crypto_cipher_setkey failed (%d)", __func__,
+			  ret);
+		goto err_tfm;
+	}
+
+	/* Construct AAD */
+	wh = (struct ieee80211_frame *)frm;
+
+	/* Generate BIP AAD: FC(masked) || A1 || A2 || A3 */
+
+	/* FC type/subtype */
+	aad[0] = wh->i_fc[0];
+	/* Mask FC Retry, PwrMgt, MoreData flags to zero */
+	aad[1] = wh->i_fc[1] & ~(IEEE80211_FC1_RETRY | IEEE80211_FC1_PWR_MGT |
+				 IEEE80211_FC1_MORE_DATA);
+	/* A1 || A2 || A3 */
+	cdf_mem_copy(aad + 2, wh->i_addr_all, 3 * IEEE80211_ADDR_LEN);
+
+	/* MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */
+	nBytes = AAD_LEN + (frmLen - sizeof(struct ieee80211_frame));
+	input = (uint8_t *) cdf_mem_malloc(nBytes);
+	if (NULL == input) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s: Memory allocation failed", __func__);
+		ret = CDF_STATUS_E_NOMEM;
+		goto err_tfm;
+	}
+
+	/*
+	 * Copy the AAD, Management frame body, and
+	 * MMIE with 8 bit MIC zeroed out
+	 */
+	cdf_mem_zero(input, nBytes);
+	cdf_mem_copy(input, aad, AAD_LEN);
+	/* Copy Management Frame Body and MMIE without MIC */
+	cdf_mem_copy(input + AAD_LEN,
+		     (uint8_t *) (efrm -
+				  (frmLen - sizeof(struct ieee80211_frame))),
+		     nBytes - AAD_LEN - CMAC_TLEN);
+
+	cds_cmac_calc_mic(tfm, input, nBytes, mic);
+	cdf_mem_free(input);
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
+		  "CMAC(T)= %02X %02X %02X %02X %02X %02X %02X %02X",
+		  mic[0], mic[1], mic[2], mic[3],
+		  mic[4], mic[5], mic[6], mic[7]);
+	cdf_mem_copy(mmie->mic, mic, IEEE80211_MMIE_MICLEN);
+
+err_tfm:
+	if (ret) {
+		cdf_mem_copy(ipn, previous_ipn, IEEE80211_MMIE_IPNLEN);
+	}
+
+	if (tfm)
+		cds_crypto_free_cipher(tfm);
+	return !ret ? true : false;
+}
+
+bool
+cds_is_mmie_valid(uint8_t *igtk, uint8_t *ipn, uint8_t *frm, uint8_t *efrm)
+{
+	struct ieee80211_mmie *mmie;
+	struct ieee80211_frame *wh;
+	uint8_t *rx_ipn, aad[AAD_LEN], mic[CMAC_TLEN], *input;
+	uint16_t nBytes = 0;
+	int ret = 0;
+	struct crypto_cipher *tfm;
+
+	/* Check if frame is invalid length */
+	if ((efrm < frm) || ((efrm - frm) < sizeof(*wh))) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "Invalid frame length");
+		return false;
+	}
+
+	mmie = (struct ieee80211_mmie *)(efrm - sizeof(*mmie));
+
+	/* Check Element ID */
+	if ((mmie->element_id != IEEE80211_ELEMID_MMIE) ||
+	    (mmie->length != (sizeof(*mmie) - 2))) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "IE is not Mgmt MIC IE or Invalid length");
+		/* IE is not Mgmt MIC IE or invalid length */
+		return false;
+	}
+
+	/* Validate IPN */
+	rx_ipn = mmie->sequence_number;
+	if (OS_MEMCMP(rx_ipn, ipn, CMAC_IPN_LEN) <= 0) {
+		/* Replay error */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "Replay error mmie ipn %02X %02X %02X %02X %02X %02X"
+			  " drvr ipn %02X %02X %02X %02X %02X %02X",
+			  rx_ipn[0], rx_ipn[1], rx_ipn[2], rx_ipn[3], rx_ipn[4],
+			  rx_ipn[5], ipn[0], ipn[1], ipn[2], ipn[3], ipn[4],
+			  ipn[5]);
+		return false;
+	}
+	tfm = cds_crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		ret = PTR_ERR(tfm);
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "crypto_alloc_cipher failed (%d)", ret);
+		goto err_tfm;
+	}
+
+	ret = crypto_cipher_setkey(tfm, igtk, AES_KEYSIZE_128);
+	if (ret) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "crypto_cipher_setkey failed (%d)", ret);
+		goto err_tfm;
+	}
+
+	/* Construct AAD */
+	wh = (struct ieee80211_frame *)frm;
+
+	/* Generate BIP AAD: FC(masked) || A1 || A2 || A3 */
+
+	/* FC type/subtype */
+	aad[0] = wh->i_fc[0];
+	/* Mask FC Retry, PwrMgt, MoreData flags to zero */
+	aad[1] = wh->i_fc[1] & ~(IEEE80211_FC1_RETRY | IEEE80211_FC1_PWR_MGT |
+				 IEEE80211_FC1_MORE_DATA);
+	/* A1 || A2 || A3 */
+	cdf_mem_copy(aad + 2, wh->i_addr_all, 3 * IEEE80211_ADDR_LEN);
+
+	/* MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */
+	nBytes = AAD_LEN + (efrm - (uint8_t *) (wh + 1));
+	input = (uint8_t *) cdf_mem_malloc(nBytes);
+	if (NULL == input) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "Memory allocation failed");
+		ret = CDF_STATUS_E_NOMEM;
+		goto err_tfm;
+	}
+
+	/* Copy the AAD, MMIE with 8 bit MIC zeroed out */
+	cdf_mem_zero(input, nBytes);
+	cdf_mem_copy(input, aad, AAD_LEN);
+	cdf_mem_copy(input + AAD_LEN, (uint8_t *) (wh + 1),
+		     nBytes - AAD_LEN - CMAC_TLEN);
+
+	cds_cmac_calc_mic(tfm, input, nBytes, mic);
+	cdf_mem_free(input);
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		  "CMAC(T)= %02X %02X %02X %02X %02X %02X %02X %02X",
+		  mic[0], mic[1], mic[2], mic[3],
+		  mic[4], mic[5], mic[6], mic[7]);
+
+	if (OS_MEMCMP(mic, mmie->mic, CMAC_TLEN) != 0) {
+		/* MMIE MIC mismatch */
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "BC/MC MGMT frame MMIE MIC check Failed"
+			  " rmic %02X %02X %02X %02X %02X %02X %02X %02X"
+			  " cmic %02X %02X %02X %02X %02X %02X %02X %02X",
+			  mmie->mic[0], mmie->mic[1], mmie->mic[2],
+			  mmie->mic[3], mmie->mic[4], mmie->mic[5],
+			  mmie->mic[6], mmie->mic[7], mic[0], mic[1], mic[2],
+			  mic[3], mic[4], mic[5], mic[6], mic[7]);
+		return false;
+	}
+
+	/* Update IPN */
+	cdf_mem_copy(ipn, rx_ipn, CMAC_IPN_LEN);
+
+err_tfm:
+	if (tfm)
+		cds_crypto_free_cipher(tfm);
+
+	return !ret ? true : false;
+}
+
+#endif /* WLAN_FEATURE_11W */
+/**
+ * cds_sha1_hmac_str
+ *
+ * FUNCTION:
+ * Generate the HMAC-SHA1 of a string given a key.
+ *
+ * LOGIC:
+ * Standard HMAC processing from RFC 2104. The code is provided in the
+ * appendix of the RFC.
+ *
+ * ASSUMPTIONS:
+ * The RFC is correct.
+ *
+ * @param text text to be hashed
+ * @param textLen length of text
+ * @param key key to use for HMAC
+ * @param keyLen length of key
+ * @param digest holds resultant SHA1 HMAC (20B)
+ *
+ * @return CDF_STATUS_SUCCSS if the operation succeeds
+ *
+ */
+
+struct hmac_sha1_result {
+	struct completion completion;
+	int err;
+};
+
+static void hmac_sha1_complete(struct crypto_async_request *req, int err)
+{
+	struct hmac_sha1_result *r = req->data;
+	if (err == -EINPROGRESS)
+		return;
+	r->err = err;
+	complete(&r->completion);
+}
+
+int
+hmac_sha1(uint8_t *key, uint8_t ksize, char *plaintext, uint8_t psize,
+	  uint8_t *output, uint8_t outlen)
+{
+	int ret = 0;
+	struct crypto_ahash *tfm;
+	struct scatterlist sg;
+	struct ahash_request *req;
+	struct hmac_sha1_result tresult;
+	void *hash_buff = NULL;
+
+	unsigned char hash_result[64];
+	int i;
+
+	memset(output, 0, outlen);
+
+	init_completion(&tresult.completion);
+
+	tfm = cds_crypto_alloc_ahash("hmac(sha1)", CRYPTO_ALG_TYPE_AHASH,
+				 CRYPTO_ALG_TYPE_AHASH_MASK);
+	if (IS_ERR(tfm)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "crypto_alloc_ahash failed");
+		ret = PTR_ERR(tfm);
+		goto err_tfm;
+	}
+
+	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "failed to allocate request for hmac(sha1)");
+		ret = -ENOMEM;
+		goto err_req;
+	}
+
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   hmac_sha1_complete, &tresult);
+
+	hash_buff = kzalloc(psize, GFP_KERNEL);
+	if (!hash_buff) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "failed to kzalloc hash_buff");
+		ret = -ENOMEM;
+		goto err_hash_buf;
+	}
+
+	memset(hash_result, 0, 64);
+	memcpy(hash_buff, plaintext, psize);
+	sg_init_one(&sg, hash_buff, psize);
+
+	if (ksize) {
+		crypto_ahash_clear_flags(tfm, ~0);
+		ret = cds_crypto_ahash_setkey(tfm, key, ksize);
+		if (ret) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "crypto_ahash_setkey failed");
+			goto err_setkey;
+		}
+	}
+
+	ahash_request_set_crypt(req, &sg, hash_result, psize);
+	ret = cds_crypto_ahash_digest(req);
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "ret 0x%x", ret);
+
+	switch (ret) {
+	case 0:
+		for (i = 0; i < outlen; i++)
+			output[i] = hash_result[i];
+		break;
+	case -EINPROGRESS:
+	case -EBUSY:
+		ret = wait_for_completion_interruptible(&tresult.completion);
+		if (!ret && !tresult.err) {
+			INIT_COMPLETION(tresult.completion);
+			break;
+		} else {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "wait_for_completion_interruptible failed");
+			if (!ret)
+				ret = tresult.err;
+			goto out;
+		}
+	default:
+		goto out;
+	}
+
+out:
+err_setkey:
+	kfree(hash_buff);
+err_hash_buf:
+	ahash_request_free(req);
+err_req:
+	cds_crypto_free_ahash(tfm);
+err_tfm:
+	return ret;
+}
+
+CDF_STATUS cds_sha1_hmac_str(uint32_t cryptHandle,      /* Handle */
+			     uint8_t *pText,    /* pointer to data stream */
+			     uint32_t textLen,  /* length of data stream */
+			     uint8_t *pKey,     /* pointer to authentication key */
+			     uint32_t keyLen,   /* length of authentication key */
+			     uint8_t digest[CDS_DIGEST_SHA1_SIZE])
+{                               /* caller digest to be filled in */
+	int ret = 0;
+
+	ret = hmac_sha1(pKey,   /* uint8_t *key, */
+			(uint8_t) keyLen,       /* uint8_t ksize, */
+			(char *)pText,  /* char *plaintext, */
+			(uint8_t) textLen,      /* uint8_t psize, */
+			digest, /* uint8_t *output, */
+			CDS_DIGEST_SHA1_SIZE    /* uint8_t outlen */
+			);
+
+	if (ret != 0) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "hmac_sha1() call failed");
+		return CDF_STATUS_E_FAULT;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * cds_md5_hmac_str
+ *
+ * FUNCTION:
+ * Generate the HMAC-MD5 of a string given a key.
+ *
+ * LOGIC:
+ * Standard HMAC processing from RFC 2104. The code is provided in the
+ * appendix of the RFC.
+ *
+ * ASSUMPTIONS:
+ * The RFC is correct.
+ *
+ * @param text text to be hashed
+ * @param textLen length of text
+ * @param key key to use for HMAC
+ * @param keyLen length of key
+ * @param digest holds resultant MD5 HMAC (20B)
+ *
+ * @return CDF_STATUS_SUCCSS if the operation succeeds
+ *
+ */
+struct hmac_md5_result {
+	struct completion completion;
+	int err;
+};
+
+static void hmac_md5_complete(struct crypto_async_request *req, int err)
+{
+	struct hmac_md5_result *r = req->data;
+	if (err == -EINPROGRESS)
+		return;
+	r->err = err;
+	complete(&r->completion);
+}
+
+int
+hmac_md5(uint8_t *key, uint8_t ksize, char *plaintext, uint8_t psize,
+	 uint8_t *output, uint8_t outlen)
+{
+	int ret = 0;
+	struct crypto_ahash *tfm;
+	struct scatterlist sg;
+	struct ahash_request *req;
+	struct hmac_md5_result tresult = {.err = 0 };
+	void *hash_buff = NULL;
+
+	unsigned char hash_result[64];
+	int i;
+
+	memset(output, 0, outlen);
+
+	init_completion(&tresult.completion);
+
+	tfm = cds_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
+				 CRYPTO_ALG_TYPE_AHASH_MASK);
+	if (IS_ERR(tfm)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "crypto_alloc_ahash failed");
+		ret = PTR_ERR(tfm);
+		goto err_tfm;
+	}
+
+	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "failed to allocate request for hmac(md5)");
+		ret = -ENOMEM;
+		goto err_req;
+	}
+
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   hmac_md5_complete, &tresult);
+
+	hash_buff = kzalloc(psize, GFP_KERNEL);
+	if (!hash_buff) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "failed to kzalloc hash_buff");
+		ret = -ENOMEM;
+		goto err_hash_buf;
+	}
+
+	memset(hash_result, 0, 64);
+	memcpy(hash_buff, plaintext, psize);
+	sg_init_one(&sg, hash_buff, psize);
+
+	if (ksize) {
+		crypto_ahash_clear_flags(tfm, ~0);
+		ret = cds_crypto_ahash_setkey(tfm, key, ksize);
+		if (ret) {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "crypto_ahash_setkey failed");
+			goto err_setkey;
+		}
+	}
+
+	ahash_request_set_crypt(req, &sg, hash_result, psize);
+	ret = cds_crypto_ahash_digest(req);
+
+	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "ret 0x%x", ret);
+
+	switch (ret) {
+	case 0:
+		for (i = 0; i < outlen; i++)
+			output[i] = hash_result[i];
+		break;
+	case -EINPROGRESS:
+	case -EBUSY:
+		ret = wait_for_completion_interruptible(&tresult.completion);
+		if (!ret && !tresult.err) {
+			INIT_COMPLETION(tresult.completion);
+			break;
+		} else {
+			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+				  "wait_for_completion_interruptible failed");
+			if (!ret)
+				ret = tresult.err;
+			goto out;
+		}
+	default:
+		goto out;
+	}
+
+out:
+err_setkey:
+	kfree(hash_buff);
+err_hash_buf:
+	ahash_request_free(req);
+err_req:
+	cds_crypto_free_ahash(tfm);
+err_tfm:
+	return ret;
+}
+
+CDF_STATUS cds_md5_hmac_str(uint32_t cryptHandle,       /* Handle */
+			    uint8_t *pText,     /* pointer to data stream */
+			    uint32_t textLen,   /* length of data stream */
+			    uint8_t *pKey,      /* pointer to authentication key */
+			    uint32_t keyLen,    /* length of authentication key */
+			    uint8_t digest[CDS_DIGEST_MD5_SIZE])
+{                               /* caller digest to be filled in */
+	int ret = 0;
+
+	ret = hmac_md5(pKey,    /* uint8_t *key, */
+		       (uint8_t) keyLen,        /* uint8_t ksize, */
+		       (char *)pText,   /* char *plaintext, */
+		       (uint8_t) textLen,       /* uint8_t psize, */
+		       digest,  /* uint8_t *output, */
+		       CDS_DIGEST_MD5_SIZE      /* uint8_t outlen */
+		       );
+
+	if (ret != 0) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "hmac_md5() call failed");
+		return CDF_STATUS_E_FAULT;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+struct ecb_aes_result {
+	struct completion completion;
+	int err;
+};
+
+static void ecb_aes_complete(struct crypto_async_request *req, int err)
+{
+	struct ecb_aes_result *r = req->data;
+	if (err == -EINPROGRESS)
+		return;
+	r->err = err;
+	complete(&r->completion);
+}
+
+/*--------------------------------------------------------------------------
+
+   \brief cds_encrypt_aes() - Generate AES Encrypted byte stream
+
+   The cds_encrypt_aes() function generates the encrypted byte stream for given text.
+
+   Buffer should be allocated before calling cds_rand_get_bytes().
+
+   Attempting to initialize an already initialized lock results in
+   a failure.
+
+   \param lock - pointer to the opaque lock object to initialize
+
+   \return CDF_STATUS_SUCCESS - Successfully generated random memory.
+
+          CDF_STATUS_E_FAULT  - pbBuf is an invalid pointer.
+
+          CDF_STATUS_E_FAILURE - default return value if it fails due to
+          unknown reasons
+
+  ***CDF_STATUS_E_RESOURCES - System resources (other than memory)
+          are unavailable
+   \sa
+
+    ( *** return value not considered yet )
+   --------------------------------------------------------------------------*/
+
+CDF_STATUS cds_encrypt_aes(uint32_t cryptHandle,        /* Handle */
+			   uint8_t *pPlainText,         /* pointer to data stream */
+			   uint8_t *pCiphertext, uint8_t *pKey)
+{                               /* pointer to authentication key */
+	struct ecb_aes_result result;
+	struct ablkcipher_request *req;
+	struct crypto_ablkcipher *tfm;
+	int ret = 0;
+	char iv[IV_SIZE_AES_128];
+	struct scatterlist sg_in;
+	struct scatterlist sg_out;
+
+	init_completion(&result.completion);
+
+	tfm = cds_crypto_alloc_ablkcipher("cbc(aes)", 0, 0);
+	if (IS_ERR(tfm)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "crypto_alloc_ablkcipher failed");
+		ret = PTR_ERR(tfm);
+		goto err_tfm;
+	}
+
+	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "Failed to allocate request for cbc(aes)");
+		ret = -ENOMEM;
+		goto err_req;
+	}
+
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					ecb_aes_complete, &result);
+
+	crypto_ablkcipher_clear_flags(tfm, ~0);
+
+	ret = crypto_ablkcipher_setkey(tfm, pKey, AES_KEYSIZE_128);
+	if (ret) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "crypto_cipher_setkey failed");
+		goto err_setkey;
+	}
+
+	memset(iv, 0, IV_SIZE_AES_128);
+
+	sg_init_one(&sg_in, pPlainText, AES_BLOCK_SIZE);
+
+	sg_init_one(&sg_out, pCiphertext, AES_BLOCK_SIZE);
+
+	ablkcipher_request_set_crypt(req, &sg_in, &sg_out, AES_BLOCK_SIZE, iv);
+
+	crypto_ablkcipher_encrypt(req);
+
+/* ------------------------------------- */
+err_setkey:
+	cds_ablkcipher_request_free(req);
+err_req:
+	cds_crypto_free_ablkcipher(tfm);
+err_tfm:
+	/* return ret; */
+	if (ret != 0) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s() call failed", __func__);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+/*--------------------------------------------------------------------------
+
+   \brief cds_decrypt_aes() - Decrypts an AES Encrypted byte stream
+
+   The cds_decrypt_aes() function decrypts the encrypted byte stream.
+
+   Buffer should be allocated before calling cds_rand_get_bytes().
+
+   Attempting to initialize an already initialized lock results in
+   a failure.
+
+   \param lock - pointer to the opaque lock object to initialize
+
+   \return CDF_STATUS_SUCCESS - Successfully generated random memory.
+
+          CDF_STATUS_E_FAULT  - pbBuf is an invalid pointer.
+
+          CDF_STATUS_E_FAILURE - default return value if it fails due to
+          unknown reasons
+
+  ***CDF_STATUS_E_RESOURCES - System resources (other than memory)
+          are unavailable
+   \sa
+
+    ( *** return value not considered yet )
+   --------------------------------------------------------------------------*/
+
+CDF_STATUS cds_decrypt_aes(uint32_t cryptHandle,        /* Handle */
+			   uint8_t *pText,      /* pointer to data stream */
+			   uint8_t *pDecrypted, uint8_t *pKey)
+{                               /* pointer to authentication key */
+/*    CDF_STATUS uResult = CDF_STATUS_E_FAILURE; */
+	struct ecb_aes_result result;
+	struct ablkcipher_request *req;
+	struct crypto_ablkcipher *tfm;
+	int ret = 0;
+	char iv[IV_SIZE_AES_128];
+	struct scatterlist sg_in;
+	struct scatterlist sg_out;
+
+	init_completion(&result.completion);
+
+	tfm = cds_crypto_alloc_ablkcipher("cbc(aes)", 0, 0);
+	if (IS_ERR(tfm)) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "crypto_alloc_ablkcipher failed");
+		ret = PTR_ERR(tfm);
+		goto err_tfm;
+	}
+
+	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "Failed to allocate request for cbc(aes)");
+		ret = -ENOMEM;
+		goto err_req;
+	}
+
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					ecb_aes_complete, &result);
+
+	crypto_ablkcipher_clear_flags(tfm, ~0);
+
+	ret = crypto_ablkcipher_setkey(tfm, pKey, AES_KEYSIZE_128);
+	if (ret) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "crypto_cipher_setkey failed");
+		goto err_setkey;
+	}
+
+	memset(iv, 0, IV_SIZE_AES_128);
+
+	sg_init_one(&sg_in, pText, AES_BLOCK_SIZE);
+
+	sg_init_one(&sg_out, pDecrypted, AES_BLOCK_SIZE);
+
+	ablkcipher_request_set_crypt(req, &sg_in, &sg_out, AES_BLOCK_SIZE, iv);
+
+	crypto_ablkcipher_decrypt(req);
+
+/* ------------------------------------- */
+err_setkey:
+	cds_ablkcipher_request_free(req);
+err_req:
+	cds_crypto_free_ablkcipher(tfm);
+err_tfm:
+	/* return ret; */
+	if (ret != 0) {
+		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+			  "%s() call failed", __func__);
+		return CDF_STATUS_E_FAULT;
+	}
+
+	return CDF_STATUS_SUCCESS;
+}
+
+uint32_t cds_chan_to_freq(uint8_t chan)
+{
+	if (chan < CDS_24_GHZ_CHANNEL_14)       /* ch 0 - ch 13 */
+		return CDS_24_GHZ_BASE_FREQ + chan * CDS_CHAN_SPACING_5MHZ;
+	else if (chan == CDS_24_GHZ_CHANNEL_14) /* ch 14 */
+		return CDS_CHAN_14_FREQ;
+	else if (chan < CDS_24_GHZ_CHANNEL_27)  /* ch 15 - ch 26 */
+		return CDS_CHAN_15_FREQ +
+		       (chan - CDS_24_GHZ_CHANNEL_15) * CDS_CHAN_SPACING_20MHZ;
+	else if (chan == CDS_5_GHZ_CHANNEL_170)
+		return CDS_CHAN_170_FREQ;
+	else
+		return CDS_5_GHZ_BASE_FREQ + chan * CDS_CHAN_SPACING_5MHZ;
+}
+
+uint8_t cds_freq_to_chan(uint32_t freq)
+{
+	uint8_t chan;
+
+	if (freq > CDS_24_GHZ_BASE_FREQ && freq < CDS_CHAN_14_FREQ)
+		chan = ((freq - CDS_24_GHZ_BASE_FREQ) / CDS_CHAN_SPACING_5MHZ);
+	else if (freq == CDS_CHAN_14_FREQ)
+		chan = CDS_24_GHZ_CHANNEL_14;
+	else if ((freq > CDS_24_GHZ_BASE_FREQ) && (freq < CDS_5_GHZ_BASE_FREQ))
+		chan = (((freq - CDS_CHAN_15_FREQ) / CDS_CHAN_SPACING_20MHZ) +
+			CDS_24_GHZ_CHANNEL_15);
+	else
+		chan = (freq - CDS_5_GHZ_BASE_FREQ) / CDS_CHAN_SPACING_5MHZ;
+	return chan;
+}
+
+uint8_t cds_chan_to_band(uint32_t chan)
+{
+	if (chan <= CDS_24_GHZ_CHANNEL_14)
+		return CDS_BAND_2GHZ;
+
+	return CDS_BAND_5GHZ;
+}

+ 76 - 0
core/cds/src/i_cds_packet.h

@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#if !defined( __I_CDS_PACKET_H )
+#define __I_CDS_PACKET_H
+
+/**=========================================================================
+
+   \file        i_cds_packet.h
+
+   \brief       Connectivity driver services network packet APIs
+
+   Network Protocol packet/buffer internal include file
+
+   ========================================================================*/
+
+/*--------------------------------------------------------------------------
+   Include Files
+   ------------------------------------------------------------------------*/
+#include "cdf_types.h"
+/*
+ * Rx Packet Struct
+ */
+typedef struct {
+	uint8_t channel;
+	uint8_t snr;
+	uint32_t rssi;
+	uint32_t timestamp;
+	uint8_t *mpdu_hdr_ptr;
+	uint8_t *mpdu_data_ptr;
+	uint32_t mpdu_len;
+	uint32_t mpdu_hdr_len;
+	uint32_t mpdu_data_len;
+	uint8_t offloadScanLearn : 1;
+	uint8_t roamCandidateInd : 1;
+	uint8_t scan : 1;
+	uint8_t scan_src;
+	uint8_t dpuFeedback;
+	uint8_t sessionId;
+	uint32_t tsf_delta;
+} t_packetmeta, *tp_packetmeta;
+
+/* implementation specific cds packet type */
+struct cds_pkt_t {
+	/* Packet Meta Information */
+	t_packetmeta pkt_meta;
+
+	/* Pointer to Packet */
+	void *pkt_buf;
+};
+
+#endif /* !defined( __I_CDS_PACKET_H ) */

+ 571 - 0
core/cds/src/queue.h

@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 1991, 1993
+ *    The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *    @(#)queue.h    8.5 (Berkeley) 8/20/94
+ * $FreeBSD: src/sys/sys/queue.h,v 1.58 2004/04/07 04:19:49 imp Exp $
+ */
+
+#if !defined(__NetBSD__)
+#ifndef _SYS_QUEUE_H_
+#define    _SYS_QUEUE_H_
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ *                SLIST    LIST    STAILQ    TAILQ
+ * _HEAD            +    +    +    +
+ * _HEAD_INITIALIZER        +    +    +    +
+ * _ENTRY            +    +    +    +
+ * _INIT            +    +    +    +
+ * _EMPTY            +    +    +    +
+ * _FIRST            +    +    +    +
+ * _NEXT            +    +    +    +
+ * _PREV            -    -    -    +
+ * _LAST            -    -    +    +
+ * _FOREACH            +    +    +    +
+ * _FOREACH_SAFE        +    +    +    +
+ * _FOREACH_REVERSE        -    -    -    +
+ * _FOREACH_REVERSE_SAFE    -    -    -    +
+ * _INSERT_HEAD            +    +    +    +
+ * _INSERT_BEFORE        -    +    -    +
+ * _INSERT_AFTER        +    +    +    +
+ * _INSERT_TAIL            -    -    +    +
+ * _CONCAT            -    -    +    +
+ * _REMOVE_HEAD            +    -    +    -
+ * _REMOVE            +    +    +    +
+ *
+ */
+#define    QUEUE_MACRO_DEBUG 0
+#if QUEUE_MACRO_DEBUG
+/* Store the last 2 places the queue element or head was altered */
+struct qm_trace {
+	char *lastfile;
+	int lastline;
+	char *prevfile;
+	int prevline;
+};
+
+#define    TRACEBUF    struct qm_trace trace;
+#define    TRASHIT(x)    do {(x) = (void *)NULL; } while (0)
+
+#define    QMD_TRACE_HEAD(head) do {			\
+		(head)->trace.prevline = (head)->trace.lastline;	\
+		(head)->trace.prevfile = (head)->trace.lastfile;	\
+		(head)->trace.lastline = __LINE__;		  \
+		(head)->trace.lastfile = __FILE__;		  \
+} while (0)
+
+#define    QMD_TRACE_ELEM(elem) do {			\
+		(elem)->trace.prevline = (elem)->trace.lastline;	\
+		(elem)->trace.prevfile = (elem)->trace.lastfile;	\
+		(elem)->trace.lastline = __LINE__;		  \
+		(elem)->trace.lastfile = __FILE__;		  \
+} while (0)
+
+#else
+#define    QMD_TRACE_ELEM(elem)
+#define    QMD_TRACE_HEAD(head)
+#define    TRACEBUF
+#define TRASHIT(x) do {(x) = (void *)0; } while (0)
+#endif /* QUEUE_MACRO_DEBUG */
+
+#ifdef ATHR_RNWF
+/* NDIS contains a defn for SLIST_ENTRY and SINGLE_LIST_ENTRY */
+#endif
+
+/*
+ * Singly-linked List declarations.
+ */
+#define    SLIST_HEAD(name, type)			 \
+	struct name {				     \
+		struct type *slh_first; /* first element */	       \
+	}
+
+#define    SLIST_HEAD_INITIALIZER(head)			   \
+	{ NULL }
+
+#define    SING_LIST_ENTRY(type)			\
+	struct {				\
+		struct type *sle_next; /* next element */	     \
+	}
+
+/*
+ * Singly-linked List functions.
+ */
+#define    SLIST_EMPTY(head)    ((head)->slh_first == NULL)
+
+#define    SLIST_FIRST(head)    ((head)->slh_first)
+
+#define    SLIST_FOREACH(var, head, field)		      \
+	for ((var) = SLIST_FIRST((head));		 \
+	     (var);			       \
+	     (var) = SLIST_NEXT((var), field))
+
+#define    SLIST_FOREACH_SAFE(var, head, field, tvar)		 \
+	for ((var) = SLIST_FIRST((head));		 \
+	     (var) && ((tvar) = SLIST_NEXT((var), field), 1);	     \
+	     (var) = (tvar))
+
+#define    SLIST_FOREACH_PREVPTR(var, varp, head, field)	    \
+	for ((varp) = &SLIST_FIRST((head));		   \
+	     ((var) = *(varp)) != NULL;			   \
+	     (varp) = &SLIST_NEXT((var), field))
+
+#define    SLIST_INIT(head) do {			\
+		SLIST_FIRST((head)) = NULL;		       \
+} while (0)
+
+#define    SLIST_INSERT_AFTER(slistelm, elm, field) do {	    \
+		SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field);    \
+		SLIST_NEXT((slistelm), field) = (elm);		      \
+} while (0)
+
+#define    SLIST_INSERT_HEAD(head, elm, field) do {	       \
+		SLIST_NEXT((elm), field) = SLIST_FIRST((head));		   \
+		SLIST_FIRST((head)) = (elm);			\
+} while (0)
+
+#define    SLIST_NEXT(elm, field)    ((elm)->field.sle_next)
+
+#define    SLIST_REMOVE(head, elm, type, field) do {		\
+		if (SLIST_FIRST((head)) == (elm)) {		   \
+			SLIST_REMOVE_HEAD((head), field);	     \
+		}				 \
+		else {				      \
+			struct type *curelm = SLIST_FIRST((head));	  \
+			while (SLIST_NEXT(curelm, field) != (elm))	  \
+				curelm = SLIST_NEXT(curelm, field);	   \
+			SLIST_NEXT(curelm, field) =		   \
+				SLIST_NEXT(SLIST_NEXT(curelm, field), field);	 \
+		}				 \
+} while (0)
+
+#define    SLIST_REMOVE_HEAD(head, field) do {		      \
+		SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field);	 \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define    STAILQ_HEAD(name, type)			  \
+	struct name {				     \
+		struct type *stqh_first; /* first element */		\
+		struct type **stqh_last; /* addr of last next element */	 \
+	}
+
+#define    STAILQ_HEAD_INITIALIZER(head)		    \
+	{ NULL, &(head).stqh_first }
+
+#define    STAILQ_ENTRY(type)			     \
+	struct {				\
+		struct type *stqe_next; /* next element */	      \
+	}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define    STAILQ_CONCAT(head1, head2) do {		   \
+		if (!STAILQ_EMPTY((head2))) {			 \
+			*(head1)->stqh_last = (head2)->stqh_first;	  \
+			(head1)->stqh_last = (head2)->stqh_last;	\
+			STAILQ_INIT((head2));			 \
+		}				 \
+} while (0)
+
+#define    STAILQ_EMPTY(head)    ((head)->stqh_first == NULL)
+
+#define    STAILQ_FIRST(head)    ((head)->stqh_first)
+
+#define    STAILQ_FOREACH(var, head, field)		   \
+	for((var) = STAILQ_FIRST((head));		 \
+	    (var);			      \
+	    (var) = STAILQ_NEXT((var), field))
+
+#define    STAILQ_FOREACH_SAFE(var, head, field, tvar)		  \
+	for ((var) = STAILQ_FIRST((head));		  \
+	     (var) && ((tvar) = STAILQ_NEXT((var), field), 1);	      \
+	     (var) = (tvar))
+
+#define    STAILQ_INIT(head) do {			 \
+		STAILQ_FIRST((head)) = NULL;			\
+		(head)->stqh_last = &STAILQ_FIRST((head));	      \
+} while (0)
+
+#define    STAILQ_INSERT_AFTER(head, tqelm, elm, field) do {	    \
+		if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL) \
+			(head)->stqh_last = &STAILQ_NEXT((elm), field);	       \
+		STAILQ_NEXT((tqelm), field) = (elm);		    \
+} while (0)
+
+#define    STAILQ_INSERT_HEAD(head, elm, field) do {		\
+		if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL)	   \
+			(head)->stqh_last = &STAILQ_NEXT((elm), field);	       \
+		STAILQ_FIRST((head)) = (elm);			 \
+} while (0)
+
+#define    STAILQ_INSERT_TAIL(head, elm, field) do {		\
+		STAILQ_NEXT((elm), field) = NULL;		 \
+		*(head)->stqh_last = (elm);		       \
+		(head)->stqh_last = &STAILQ_NEXT((elm), field);		   \
+} while (0)
+
+#define    STAILQ_LAST(head, type, field)		     \
+	(STAILQ_EMPTY((head)) ?			       \
+	 NULL :				   \
+	 ((struct type *)		     \
+	  ((char *)((head)->stqh_last) - __offsetof(struct type, field))))
+
+#define    STAILQ_NEXT(elm, field)    ((elm)->field.stqe_next)
+
+#define    STAILQ_REMOVE(head, elm, type, field) do {		 \
+		if (STAILQ_FIRST((head)) == (elm)) {		    \
+			STAILQ_REMOVE_HEAD((head), field);	      \
+		}				 \
+		else {				      \
+			struct type *curelm = STAILQ_FIRST((head));	   \
+			while (STAILQ_NEXT(curelm, field) != (elm))	   \
+				curelm = STAILQ_NEXT(curelm, field);	    \
+			if ((STAILQ_NEXT(curelm, field) =	     \
+				     STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL) \
+				(head)->stqh_last = &STAILQ_NEXT((curelm), field); \
+		}				 \
+} while (0)
+
+#define    STAILQ_REMOVE_AFTER(head, elm, field) do {		 \
+		if (STAILQ_NEXT(elm, field)) {	      \
+			if ((STAILQ_NEXT(elm, field) =		  \
+				     STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
+				(head)->stqh_last = &STAILQ_NEXT((elm), field);	\
+		}				 \
+} while (0)
+
+#define    STAILQ_REMOVE_HEAD(head, field) do {		       \
+		if ((STAILQ_FIRST((head)) =		       \
+			     STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL)	       \
+			(head)->stqh_last = &STAILQ_FIRST((head));	  \
+} while (0)
+
+#define    STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do {	      \
+		if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL)	   \
+			(head)->stqh_last = &STAILQ_FIRST((head));	  \
+} while (0)
+
+/*
+ * List declarations.
+ */
+#define    ATH_LIST_HEAD(name, type)			\
+	struct name {				     \
+		struct type *lh_first; /* first element */	      \
+	}
+
+#ifndef LIST_HEAD
+#define LIST_HEAD ATH_LIST_HEAD
+#endif
+
+#define    LIST_HEAD_INITIALIZER(head)			  \
+	{ NULL }
+
+#define    LIST_ENTRY(type)			   \
+	struct {				\
+		struct type *le_next; /* next element */	    \
+		struct type **le_prev; /* address of previous next element */	  \
+	}
+
+/*
+ * List functions.
+ */
+
+#define    LIST_EMPTY(head)    ((head)->lh_first == NULL)
+
+#define    LIST_FIRST(head)    ((head)->lh_first)
+
+#define    LIST_FOREACH(var, head, field)		     \
+	for ((var) = LIST_FIRST((head));		\
+	     (var);			       \
+	     (var) = LIST_NEXT((var), field))
+
+#define    LIST_FOREACH_SAFE(var, head, field, tvar)		\
+	for ((var) = LIST_FIRST((head));		\
+	     (var) && ((tvar) = LIST_NEXT((var), field), 1);	    \
+	     (var) = (tvar))
+
+#define    LIST_INIT(head) do {			       \
+		LIST_FIRST((head)) = NULL;		      \
+} while (0)
+
+#define    LIST_INSERT_AFTER(listelm, elm, field) do {		  \
+		if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL) \
+			LIST_NEXT((listelm), field)->field.le_prev =	    \
+				&LIST_NEXT((elm), field);		 \
+		LIST_NEXT((listelm), field) = (elm);		    \
+		(elm)->field.le_prev = &LIST_NEXT((listelm), field);	    \
+} while (0)
+
+#define    LIST_INSERT_BEFORE(listelm, elm, field) do {		   \
+		(elm)->field.le_prev = (listelm)->field.le_prev;	\
+		LIST_NEXT((elm), field) = (listelm);		    \
+		*(listelm)->field.le_prev = (elm);		  \
+		(listelm)->field.le_prev = &LIST_NEXT((elm), field);	    \
+} while (0)
+
+#define    LIST_INSERT_HEAD(head, elm, field) do {		  \
+		if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL)    \
+			LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field); \
+		LIST_FIRST((head)) = (elm);		       \
+		(elm)->field.le_prev = &LIST_FIRST((head));	       \
+} while (0)
+
+#define    LIST_NEXT(elm, field)    ((elm)->field.le_next)
+
+#define    LIST_REMOVE(elm, field) do {			   \
+		if (LIST_NEXT((elm), field) != NULL)		    \
+			LIST_NEXT((elm), field)->field.le_prev =	 \
+				(elm)->field.le_prev;		     \
+		*(elm)->field.le_prev = LIST_NEXT((elm), field);	\
+} while (0)
+
+/*
+ * Tail queue declarations.
+ */
+#define  HEADNAME
+#define  COPY_HEADNAME(head)
+
+#define    TAILQ_HEAD(name, type)			 \
+	struct name {				     \
+		struct type *tqh_first; /* first element */	       \
+		struct type **tqh_last; /* addr of last next element */	\
+		HEADNAME			    \
+			TRACEBUF			    \
+	}
+
+#define    TAILQ_HEAD_INITIALIZER(head)			   \
+	{ NULL, &(head).tqh_first }
+
+#define    TAILQ_ENTRY(type)			    \
+	struct {				\
+		struct type *tqe_next; /* next element */	     \
+		struct type **tqe_prev; /* address of previous next element */	   \
+		TRACEBUF			    \
+	}
+
+/*
+ * Tail queue functions.
+ */
+
+#define    TAILQ_EMPTY(head)    ((head)->tqh_first == NULL)
+
+#define    TAILQ_FIRST(head)    ((head)->tqh_first)
+
+#define    TAILQ_FOREACH(var, head, field)		      \
+	for ((var) = TAILQ_FIRST((head));		 \
+	     (var);			       \
+	     (var) = TAILQ_NEXT((var), field))
+
+#define    TAILQ_FOREACH_SAFE(var, head, field, tvar)		 \
+	for ((var) = TAILQ_FIRST((head));		 \
+	     (var) && ((tvar) = TAILQ_NEXT((var), field), 1);	     \
+	     (var) = (tvar))
+
+#define    TAILQ_FOREACH_REVERSE(var, head, headname, field)	    \
+	for ((var) = TAILQ_LAST((head), headname);	      \
+	     (var);			       \
+	     (var) = TAILQ_PREV((var), headname, field))
+
+#define    TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar)	   \
+	for ((var) = TAILQ_LAST((head), headname);	      \
+	     (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1);	   \
+	     (var) = (tvar))
+
+#define    TAILQ_INIT(head) do {			\
+		TAILQ_FIRST((head)) = NULL;		       \
+		(head)->tqh_last = &TAILQ_FIRST((head));	    \
+		COPY_HEADNAME(head);			     \
+		QMD_TRACE_HEAD(head);			     \
+} while (0)
+
+#define    TAILQ_INSERT_AFTER(head, listelm, elm, field) do {	     \
+		if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL) \
+			TAILQ_NEXT((elm), field)->field.tqe_prev =	   \
+				&TAILQ_NEXT((elm), field);		  \
+		else {				      \
+			(head)->tqh_last = &TAILQ_NEXT((elm), field);	     \
+			QMD_TRACE_HEAD(head);			 \
+		}				 \
+		TAILQ_NEXT((listelm), field) = (elm);		     \
+		(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field);	      \
+		QMD_TRACE_ELEM(&(elm)->field);			  \
+		QMD_TRACE_ELEM(&listelm->field);		\
+} while (0)
+
+#define    TAILQ_INSERT_BEFORE(listelm, elm, field) do {	    \
+		(elm)->field.tqe_prev = (listelm)->field.tqe_prev;	  \
+		TAILQ_NEXT((elm), field) = (listelm);		     \
+		*(listelm)->field.tqe_prev = (elm);		   \
+		(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field);	      \
+		QMD_TRACE_ELEM(&(elm)->field);			  \
+		QMD_TRACE_ELEM(&listelm->field);		\
+} while (0)
+
+#define    TAILQ_INSERT_HEAD(head, elm, field) do {	       \
+		if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL)	 \
+			TAILQ_FIRST((head))->field.tqe_prev =		 \
+				&TAILQ_NEXT((elm), field);		  \
+		else				    \
+			(head)->tqh_last = &TAILQ_NEXT((elm), field);	     \
+		TAILQ_FIRST((head)) = (elm);			\
+		(elm)->field.tqe_prev = &TAILQ_FIRST((head));		 \
+		QMD_TRACE_HEAD(head);			     \
+		QMD_TRACE_ELEM(&(elm)->field);			  \
+} while (0)
+
+#define    TAILQ_INSERT_TAIL(head, elm, field) do {	       \
+		TAILQ_NEXT((elm), field) = NULL;		\
+		(elm)->field.tqe_prev = (head)->tqh_last;	     \
+		*(head)->tqh_last = (elm);		      \
+		(head)->tqh_last = &TAILQ_NEXT((elm), field);		 \
+		QMD_TRACE_HEAD(head);			     \
+		QMD_TRACE_ELEM(&(elm)->field);			  \
+} while (0)
+
+#define    TAILQ_LAST(head, headname)			 \
+	(*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define    TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define    TAILQ_PREV(elm, headname, field)		   \
+	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define    TAILQ_REMOVE(head, elm, field) do {		      \
+		if ((TAILQ_NEXT((elm), field)) != NULL)		       \
+			TAILQ_NEXT((elm), field)->field.tqe_prev =	   \
+				(elm)->field.tqe_prev;		      \
+		else {				      \
+			(head)->tqh_last = (elm)->field.tqe_prev;	 \
+			QMD_TRACE_HEAD(head);			 \
+		}				 \
+		*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field);	  \
+		TRASHIT((elm)->field.tqe_next);			   \
+		TRASHIT((elm)->field.tqe_prev);			   \
+		QMD_TRACE_ELEM(&(elm)->field);			  \
+} while (0)
+
+#define TAILQ_CONCAT(head1, head2, field)  do {			 \
+		if (!TAILQ_EMPTY(head2)) {				     \
+			*(head1)->tqh_last = (head2)->tqh_first;		 \
+			(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;	 \
+			(head1)->tqh_last  = (head2)->tqh_last;			 \
+			TAILQ_INIT((head2));					 \
+		}							     \
+} while (0)
+
+#ifdef _KERNEL
+
+/*
+ * XXX insque() and remque() are an old way of handling certain queues.
+ * They bogusly assumes that all queue heads look alike.
+ */
+
+struct quehead {
+	struct quehead *qh_link;
+	struct quehead *qh_rlink;
+};
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+
+static __inline void insque(void *a, void *b)
+{
+	struct quehead *element = (struct quehead *)a,
+	*head = (struct quehead *)b;
+
+	element->qh_link = head->qh_link;
+	element->qh_rlink = head;
+	head->qh_link = element;
+	element->qh_link->qh_rlink = element;
+}
+
+static __inline void remque(void *a)
+{
+	struct quehead *element = (struct quehead *)a;
+
+	element->qh_link->qh_rlink = element->qh_rlink;
+	element->qh_rlink->qh_link = element->qh_link;
+	element->qh_rlink = 0;
+}
+
+#else                           /* !(__GNUC__ || __INTEL_COMPILER) */
+
+void insque(void *a, void *b);
+void remque(void *a);
+
+#endif /* __GNUC__ || __INTEL_COMPILER */
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_QUEUE_H_ */
+#else                           /* !__NetBSD__ */
+#include_next <sys/queue.h>
+#endif /* __NetBSD__ */

+ 562 - 0
core/dp/htt/htt.c

@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt.c
+ * @brief Provide functions to create+init and destroy a HTT instance.
+ * @details
+ *  This file contains functions for creating a HTT instance; initializing
+ *  the HTT instance, e.g. by allocating a pool of HTT tx descriptors and
+ *  connecting the HTT service with HTC; and deleting a HTT instance.
+ */
+
+#include <cdf_memory.h>         /* cdf_mem_malloc */
+#include <cdf_types.h>          /* cdf_device_t, cdf_print */
+
+#include <htt.h>                /* htt_tx_msdu_desc_t */
+#include <ol_cfg.h>
+#include <ol_txrx_htt_api.h>    /* ol_tx_dowload_done_ll, etc. */
+#include <ol_htt_api.h>
+
+#include <htt_internal.h>
+#include "hif.h"
+
+#define HTT_HTC_PKT_POOL_INIT_SIZE 100  /* enough for a large A-MPDU */
+
+A_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
+
+#ifdef IPA_OFFLOAD
+A_STATUS htt_ipa_config(htt_pdev_handle pdev, A_STATUS status)
+{
+	if ((A_OK == status) &&
+	    ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
+		status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev);
+	return status;
+}
+
+#define HTT_IPA_CONFIG htt_ipa_config
+#else
+#define HTT_IPA_CONFIG(pdev, status) status     /* no-op */
+#endif /* IPA_OFFLOAD */
+
+struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt_union *pkt = NULL;
+
+	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
+	if (pdev->htt_htc_pkt_freelist) {
+		pkt = pdev->htt_htc_pkt_freelist;
+		pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next;
+	}
+	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
+
+	if (pkt == NULL)
+		pkt = cdf_mem_malloc(sizeof(*pkt));
+
+	return &pkt->u.pkt;     /* not actually a dereference */
+}
+
+void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
+{
+	struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
+
+	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
+	u_pkt->u.next = pdev->htt_htc_pkt_freelist;
+	pdev->htt_htc_pkt_freelist = u_pkt;
+	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
+}
+
+void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt_union *pkt, *next;
+	pkt = pdev->htt_htc_pkt_freelist;
+	while (pkt) {
+		next = pkt->u.next;
+		cdf_mem_free(pkt);
+		pkt = next;
+	}
+	pdev->htt_htc_pkt_freelist = NULL;
+}
+
+#ifdef ATH_11AC_TXCOMPACT
+void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
+{
+	struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
+
+	HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
+	if (pdev->htt_htc_pkt_misclist) {
+		u_pkt->u.next = pdev->htt_htc_pkt_misclist;
+		pdev->htt_htc_pkt_misclist = u_pkt;
+	} else {
+		pdev->htt_htc_pkt_misclist = u_pkt;
+	}
+	HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
+}
+
+void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt_union *pkt, *next;
+	cdf_nbuf_t netbuf;
+	pkt = pdev->htt_htc_pkt_misclist;
+
+	while (pkt) {
+		next = pkt->u.next;
+		netbuf = (cdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
+		cdf_nbuf_unmap(pdev->osdev, netbuf, CDF_DMA_TO_DEVICE);
+		cdf_nbuf_free(netbuf);
+		cdf_mem_free(pkt);
+		pkt = next;
+	}
+	pdev->htt_htc_pkt_misclist = NULL;
+}
+#endif
+
+/**
+ * htt_pdev_alloc() - allocate HTT pdev
+ * @txrx_pdev: txrx pdev
+ * @ctrl_pdev: cfg pdev
+ * @htc_pdev: HTC pdev
+ * @osdev: os device
+ *
+ * Return: HTT pdev handle
+ */
+htt_pdev_handle
+htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
+	   ol_pdev_handle ctrl_pdev,
+	   HTC_HANDLE htc_pdev, cdf_device_t osdev)
+{
+	struct htt_pdev_t *pdev;
+
+	pdev = cdf_mem_malloc(sizeof(*pdev));
+	if (!pdev)
+		goto fail1;
+
+	pdev->osdev = osdev;
+	pdev->ctrl_pdev = ctrl_pdev;
+	pdev->txrx_pdev = txrx_pdev;
+	pdev->htc_pdev = htc_pdev;
+
+	cdf_mem_set(&pdev->stats, sizeof(pdev->stats), 0);
+	pdev->htt_htc_pkt_freelist = NULL;
+#ifdef ATH_11AC_TXCOMPACT
+	pdev->htt_htc_pkt_misclist = NULL;
+#endif
+	pdev->cfg.default_tx_comp_req =
+			!ol_cfg_tx_free_at_download(pdev->ctrl_pdev);
+
+	pdev->cfg.is_full_reorder_offload =
+			ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev);
+	cdf_print("is_full_reorder_offloaded? %d\n",
+		  (int)pdev->cfg.is_full_reorder_offload);
+
+	pdev->cfg.ce_classify_enabled =
+		ol_cfg_is_ce_classify_enabled(ctrl_pdev);
+	cdf_print("ce_classify_enabled %d\n",
+		  pdev->cfg.ce_classify_enabled);
+
+	pdev->targetdef = htc_get_targetdef(htc_pdev);
+#if defined(HELIUMPLUS_PADDR64)
+	/* TODO: OKA: Remove hard-coding */
+	HTT_SET_WIFI_IP(pdev, 2, 0);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+	/*
+	 * Connect to HTC service.
+	 * This has to be done before calling htt_rx_attach,
+	 * since htt_rx_attach involves sending a rx ring configure
+	 * message to the target.
+	 */
+/* AR6004 don't need HTT layer. */
+#ifndef AR6004_HW
+	if (htt_htc_attach(pdev))
+		goto fail2;
+#endif
+
+	return pdev;
+
+fail2:
+	cdf_mem_free(pdev);
+
+fail1:
+	return NULL;
+
+}
+
+/**
+ * htt_attach() - Allocate and setup HTT TX/RX descriptors
+ * @pdev: pdev ptr
+ * @desc_pool_size: size of tx descriptors
+ *
+ * Return: 0 for success or error code.
+ */
+int
+htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
+{
+	int i;
+	enum wlan_frm_fmt frm_type;
+	int ret = 0;
+
+	ret = htt_tx_attach(pdev, desc_pool_size);
+	if (ret)
+		goto fail1;
+
+	ret = htt_rx_attach(pdev);
+	if (ret)
+		goto fail2;
+
+	HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex);
+	HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);
+
+	/* pre-allocate some HTC_PACKET objects */
+	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
+		struct htt_htc_pkt_union *pkt;
+		pkt = cdf_mem_malloc(sizeof(*pkt));
+		if (!pkt)
+			break;
+		htt_htc_pkt_free(pdev, &pkt->u.pkt);
+	}
+
+	/*
+	 * LL - download just the initial portion of the frame.
+	 * Download enough to cover the encapsulation headers checked
+	 * by the target's tx classification descriptor engine.
+	 */
+
+	/* account for the 802.3 or 802.11 header */
+	frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
+	if (frm_type == wlan_frm_fmt_native_wifi) {
+		pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
+	} else if (frm_type == wlan_frm_fmt_802_3) {
+		pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
+	} else {
+		cdf_print("Unexpected frame type spec: %d\n", frm_type);
+		HTT_ASSERT0(0);
+	}
+	/*
+	 * Account for the optional L2 / ethernet header fields:
+	 * 802.1Q, LLC/SNAP
+	 */
+	pdev->download_len +=
+		HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
+
+	/*
+	 * Account for the portion of the L3 (IP) payload that the
+	 * target needs for its tx classification.
+	 */
+	pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
+
+	/*
+	 * Account for the HTT tx descriptor, including the
+	 * HTC header + alignment padding.
+	 */
+	pdev->download_len += sizeof(struct htt_host_tx_desc_t);
+
+	/*
+	 * The TXCOMPACT htt_tx_sched function uses pdev->download_len
+	 * to apply for all requeued tx frames.  Thus,
+	 * pdev->download_len has to be the largest download length of
+	 * any tx frame that will be downloaded.
+	 * This maximum download length is for management tx frames,
+	 * which have an 802.11 header.
+	 */
+#ifdef ATH_11AC_TXCOMPACT
+	pdev->download_len = sizeof(struct htt_host_tx_desc_t)
+		+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+		+ HTT_TX_HDR_SIZE_802_1Q
+		+ HTT_TX_HDR_SIZE_LLC_SNAP
+		+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
+#endif
+	pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
+
+	/*
+	 * For LL, the FW rx desc is alongside the HW rx desc fields in
+	 * the htt_host_rx_desc_base struct/.
+	 */
+	pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
+
+	htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
+
+	return 0;
+
+fail2:
+	htt_tx_detach(pdev);
+
+fail1:
+	return ret;
+}
+
+A_STATUS htt_attach_target(htt_pdev_handle pdev)
+{
+	A_STATUS status;
+
+	status = htt_h2t_ver_req_msg(pdev);
+	if (status != A_OK)
+		return status;
+
+#if defined(HELIUMPLUS_PADDR64)
+	/*
+	 * Send the frag_desc info to target.
+	 */
+	htt_h2t_frag_desc_bank_cfg_msg(pdev);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+
+	/*
+	 * If applicable, send the rx ring config message to the target.
+	 * The host could wait for the HTT version number confirmation message
+	 * from the target before sending any further HTT messages, but it's
+	 * reasonable to assume that the host and target HTT version numbers
+	 * match, and proceed immediately with the remaining configuration
+	 * handshaking.
+	 */
+
+	status = htt_h2t_rx_ring_cfg_msg(pdev);
+	status = HTT_IPA_CONFIG(pdev, status);
+
+	return status;
+}
+
+void htt_detach(htt_pdev_handle pdev)
+{
+	htt_rx_detach(pdev);
+	htt_tx_detach(pdev);
+	htt_htc_pkt_pool_free(pdev);
+#ifdef ATH_11AC_TXCOMPACT
+	htt_htc_misc_pkt_pool_free(pdev);
+#endif
+	HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
+	HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
+}
+
+/**
+ * htt_pdev_free() - Free HTT pdev
+ * @pdev: htt pdev
+ *
+ * Return: none
+ */
+void htt_pdev_free(htt_pdev_handle pdev)
+{
+	cdf_mem_free(pdev);
+}
+
+void htt_detach_target(htt_pdev_handle pdev)
+{
+}
+
+#ifdef WLAN_FEATURE_FASTPATH
+/**
+ * htt_pkt_dl_len_get() HTT packet download length for fastpath case
+ *
+ * @htt_dev: pointer to htt device.
+ *
+ * As fragment one already downloaded HTT/HTC header, download length is
+ * remaining bytes.
+ *
+ * Return: download length
+ */
+int htt_pkt_dl_len_get(struct htt_pdev_t *htt_dev)
+{
+	return htt_dev->download_len - sizeof(struct htt_host_tx_desc_t);
+}
+#else
+int htt_pkt_dl_len_get(struct htt_pdev_t *htt_dev)
+{
+	return 0;
+}
+#endif
+
+int htt_htc_attach(struct htt_pdev_t *pdev)
+{
+	HTC_SERVICE_CONNECT_REQ connect;
+	HTC_SERVICE_CONNECT_RESP response;
+	A_STATUS status;
+
+	cdf_mem_set(&connect, sizeof(connect), 0);
+	cdf_mem_set(&response, sizeof(response), 0);
+
+	connect.pMetaData = NULL;
+	connect.MetaDataLength = 0;
+	connect.EpCallbacks.pContext = pdev;
+	connect.EpCallbacks.EpTxComplete = htt_h2t_send_complete;
+	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
+	connect.EpCallbacks.EpRecv = htt_t2h_msg_handler;
+
+	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
+	connect.EpCallbacks.EpRecvRefill = NULL;
+	connect.EpCallbacks.RecvRefillWaterMark = 1;
+	/* N/A, fill is done by HIF */
+
+	connect.EpCallbacks.EpSendFull = htt_h2t_full;
+	/*
+	 * Specify how deep to let a queue get before htc_send_pkt will
+	 * call the EpSendFull function due to excessive send queue depth.
+	 */
+	connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
+
+	/* disable flow control for HTT data message service */
+#ifndef HIF_SDIO
+	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+#endif
+
+	/* connect to control service */
+	connect.ServiceID = HTT_DATA_MSG_SVC;
+
+	status = htc_connect_service(pdev->htc_pdev, &connect, &response);
+
+	if (status != A_OK)
+		return -EIO;       /* failure */
+
+	pdev->htc_endpoint = response.Endpoint;
+#if defined(HIF_PCI)
+	hif_save_htc_htt_config_endpoint(pdev->htc_endpoint);
+#endif
+
+	return 0;               /* success */
+}
+
+#if HTT_DEBUG_LEVEL > 5
+void htt_display(htt_pdev_handle pdev, int indent)
+{
+	cdf_print("%*s%s:\n", indent, " ", "HTT");
+	cdf_print("%*stx desc pool: %d elems of %d bytes, %d allocated\n",
+		  indent + 4, " ",
+		  pdev->tx_descs.pool_elems,
+		  pdev->tx_descs.size, pdev->tx_descs.alloc_cnt);
+	cdf_print("%*srx ring: space for %d elems, filled with %d buffers\n",
+		  indent + 4, " ",
+		  pdev->rx_ring.size, pdev->rx_ring.fill_level);
+	cdf_print("%*sat %p (%#x paddr)\n", indent + 8, " ",
+		  pdev->rx_ring.buf.paddrs_ring, pdev->rx_ring.base_paddr);
+	cdf_print("%*snetbuf ring @ %p\n", indent + 8, " ",
+		  pdev->rx_ring.buf.netbufs_ring);
+	cdf_print("%*sFW_IDX shadow register: vaddr = %p, paddr = %#x\n",
+		  indent + 8, " ",
+		  pdev->rx_ring.alloc_idx.vaddr, pdev->rx_ring.alloc_idx.paddr);
+	cdf_print("%*sSW enqueue idx= %d, SW dequeue idx: desc= %d, buf= %d\n",
+		  indent + 8, " ", *pdev->rx_ring.alloc_idx.vaddr,
+		  pdev->rx_ring.sw_rd_idx.msdu_desc,
+		  pdev->rx_ring.sw_rd_idx.msdu_payld);
+}
+#endif
+
+/* Disable ASPM : Disable PCIe low power */
+void htt_htc_disable_aspm(void)
+{
+	htc_disable_aspm();
+}
+
+#ifdef IPA_OFFLOAD
+/*
+ * Attach resource for micro controller data path
+ */
+int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
+{
+	int error;
+
+	/* TX resource attach */
+	error = htt_tx_ipa_uc_attach(
+		pdev,
+		ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
+		ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev),
+		ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev));
+	if (error) {
+		cdf_print("HTT IPA UC TX attach fail code %d\n", error);
+		HTT_ASSERT0(0);
+		return error;
+	}
+
+	/* RX resource attach */
+	error = htt_rx_ipa_uc_attach(
+		pdev,
+		ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+	if (error) {
+		cdf_print("HTT IPA UC RX attach fail code %d\n", error);
+		htt_tx_ipa_uc_detach(pdev);
+		HTT_ASSERT0(0);
+		return error;
+	}
+
+	return 0;               /* success */
+}
+
+void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+	/* TX IPA micro controller detach */
+	htt_tx_ipa_uc_detach(pdev);
+
+	/* RX IPA micro controller detach */
+	htt_rx_ipa_uc_detach(pdev);
+}
+
+/*
+ * Distribute micro controller resource to control module
+ */
+int
+htt_ipa_uc_get_resource(htt_pdev_handle pdev,
+			uint32_t *ce_sr_base_paddr,
+			uint32_t *ce_sr_ring_size,
+			cdf_dma_addr_t *ce_reg_paddr,
+			uint32_t *tx_comp_ring_base_paddr,
+			uint32_t *tx_comp_ring_size,
+			uint32_t *tx_num_alloc_buffer,
+			uint32_t *rx_rdy_ring_base_paddr,
+			uint32_t *rx_rdy_ring_size,
+			uint32_t *rx_proc_done_idx_paddr)
+{
+	/* Release allocated resource to client */
+	*tx_comp_ring_base_paddr =
+		(uint32_t) pdev->ipa_uc_tx_rsc.tx_comp_base.paddr;
+	*tx_comp_ring_size =
+		(uint32_t) ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev);
+	*tx_num_alloc_buffer = (uint32_t) pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
+	*rx_rdy_ring_base_paddr =
+		(uint32_t) pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr;
+	*rx_rdy_ring_size = (uint32_t) pdev->ipa_uc_rx_rsc.rx_ind_ring_size;
+	*rx_proc_done_idx_paddr =
+		(uint32_t) pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr;
+
+	/* Get copy engine, bus resource */
+	htc_ipa_get_ce_resource(pdev->htc_pdev,
+				ce_sr_base_paddr,
+				ce_sr_ring_size, ce_reg_paddr);
+
+	return 0;
+}
+
+/*
+ * Distribute micro controller doorbell register to firmware
+ */
+int
+htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
+			      uint32_t ipa_uc_tx_doorbell_paddr,
+			      uint32_t ipa_uc_rx_doorbell_paddr)
+{
+	pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr;
+	pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr;
+	return 0;
+}
+#endif /* IPA_OFFLOAD */

+ 1155 - 0
core/dp/htt/htt_fw_stats.c

@@ -0,0 +1,1155 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_fw_stats.c
+ * @brief Provide functions to process FW status retrieved from FW.
+ */
+
+#include <htc_api.h>            /* HTC_PACKET */
+#include <htt.h>                /* HTT_T2H_MSG_TYPE, etc. */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t */
+#include <cdf_memory.h>         /* cdf_mem_set */
+#include <ol_fw_tx_dbg.h>       /* ol_fw_tx_dbg_ppdu_base */
+
+#include <ol_htt_rx_api.h>
+#include <ol_txrx_htt_api.h>    /* htt_tx_status */
+
+#include <htt_internal.h>
+
+#include <wlan_defs.h>
+
+#define ROUND_UP_TO_4(val) (((val) + 3) & ~0x3)
+
+
+static char *bw_str_arr[] = {"20MHz", "40MHz", "80MHz", "160MHz"};
+
+/*
+ * Defined the macro tx_rate_stats_print_cmn()
+ * so that this could be used in both
+ * htt_t2h_stats_tx_rate_stats_print() &
+ * htt_t2h_stats_tx_rate_stats_print_v2().
+ * Each of these functions take a different structure as argument,
+ * but with common fields in the structures--so using a macro
+ * to bypass the strong type-checking of a function seems a simple
+ * trick to use to avoid the code duplication.
+ */
+#define tx_rate_stats_print_cmn(_tx_rate_info, _concise) \
+{														\
+	int i;												\
+														\
+	cdf_print("TX Rate Info:\n");						\
+														\
+	/* MCS */											\
+	cdf_print("MCS counts (0..9): ");					\
+	cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+		  _tx_rate_info->mcs[0],						\
+		  _tx_rate_info->mcs[1],						\
+		  _tx_rate_info->mcs[2],						\
+		  _tx_rate_info->mcs[3],						\
+		  _tx_rate_info->mcs[4],						\
+		  _tx_rate_info->mcs[5],						\
+		  _tx_rate_info->mcs[6],						\
+		  _tx_rate_info->mcs[7],						\
+		  _tx_rate_info->mcs[8],						\
+		  _tx_rate_info->mcs[9]);						\
+														\
+	/* SGI */											\
+	cdf_print("SGI counts (0..9): ");					\
+	cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+		  _tx_rate_info->sgi[0],						\
+		  _tx_rate_info->sgi[1],						\
+		  _tx_rate_info->sgi[2],						\
+		  _tx_rate_info->sgi[3],						\
+		  _tx_rate_info->sgi[4],						\
+		  _tx_rate_info->sgi[5],						\
+		  _tx_rate_info->sgi[6],						\
+		  _tx_rate_info->sgi[7],						\
+		  _tx_rate_info->sgi[8],						\
+		  _tx_rate_info->sgi[9]);						\
+														\
+	/* NSS */											\
+	cdf_print("NSS  counts: ");							\
+	cdf_print("1x1 %d, 2x2 %d, 3x3 %d\n",				\
+		  _tx_rate_info->nss[0],						\
+		  _tx_rate_info->nss[1], _tx_rate_info->nss[2]);\
+														\
+	/* BW */											\
+	cdf_print("BW counts: ");							\
+														\
+	for (i = 0;											\
+		i < sizeof(_tx_rate_info->bw) / sizeof(_tx_rate_info->bw[0]);\
+		i++) {											\
+			cdf_print("%s %d ", bw_str_arr[i], _tx_rate_info->bw[i]);\
+	}													\
+	cdf_print("\n");									\
+														\
+	/* Preamble */										\
+	cdf_print("Preamble (O C H V) counts: ");			\
+	cdf_print("%d, %d, %d, %d\n",						\
+		  _tx_rate_info->pream[0],						\
+		  _tx_rate_info->pream[1],						\
+		  _tx_rate_info->pream[2],						\
+		  _tx_rate_info->pream[3]);						\
+														\
+	/* STBC rate counts */								\
+	cdf_print("STBC rate counts (0..9): ");				\
+	cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+		  _tx_rate_info->stbc[0],						\
+		  _tx_rate_info->stbc[1],						\
+		  _tx_rate_info->stbc[2],						\
+		  _tx_rate_info->stbc[3],						\
+		  _tx_rate_info->stbc[4],						\
+		  _tx_rate_info->stbc[5],						\
+		  _tx_rate_info->stbc[6],						\
+		  _tx_rate_info->stbc[7],						\
+		  _tx_rate_info->stbc[8],						\
+		  _tx_rate_info->stbc[9]);						\
+														\
+	/* LDPC and TxBF counts */							\
+	cdf_print("LDPC Counts: ");							\
+	cdf_print("%d\n", _tx_rate_info->ldpc);				\
+	cdf_print("RTS Counts: ");							\
+	cdf_print("%d\n", _tx_rate_info->rts_cnt);			\
+	/* RSSI Values for last ack frames */				\
+	cdf_print("Ack RSSI: %d\n", _tx_rate_info->ack_rssi);\
+}
+
+static void htt_t2h_stats_tx_rate_stats_print(wlan_dbg_tx_rate_info_t *
+					      tx_rate_info, int concise)
+{
+	tx_rate_stats_print_cmn(tx_rate_info, concise);
+}
+
+static void htt_t2h_stats_tx_rate_stats_print_v2(wlan_dbg_tx_rate_info_v2_t *
+					      tx_rate_info, int concise)
+{
+	tx_rate_stats_print_cmn(tx_rate_info, concise);
+}
+
+/*
+ * Defined the macro rx_rate_stats_print_cmn()
+ * so that this could be used in both
+ * htt_t2h_stats_rx_rate_stats_print() &
+ * htt_t2h_stats_rx_rate_stats_print_v2().
+ * Each of these functions take a different structure as argument,
+ * but with common fields in the structures -- so using a macro
+ * to bypass the strong type-checking of a function seems a simple
+ * trick to use to avoid the code duplication.
+ */
+#define rx_rate_stats_print_cmn(_rx_phy_info, _concise) \
+{														\
+	int i;												\
+														\
+	cdf_print("RX Rate Info:\n");						\
+														\
+	/* MCS */											\
+	cdf_print("MCS counts (0..9): ");					\
+	cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+		  _rx_phy_info->mcs[0],							\
+		  _rx_phy_info->mcs[1],							\
+		  _rx_phy_info->mcs[2],							\
+		  _rx_phy_info->mcs[3],							\
+		  _rx_phy_info->mcs[4],							\
+		  _rx_phy_info->mcs[5],							\
+		  _rx_phy_info->mcs[6],							\
+		  _rx_phy_info->mcs[7],							\
+		  _rx_phy_info->mcs[8],							\
+		  _rx_phy_info->mcs[9]);						\
+														\
+	/* SGI */											\
+	cdf_print("SGI counts (0..9): ");					\
+	cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+		  _rx_phy_info->sgi[0],							\
+		  _rx_phy_info->sgi[1],							\
+		  _rx_phy_info->sgi[2],							\
+		  _rx_phy_info->sgi[3],							\
+		  _rx_phy_info->sgi[4],							\
+		  _rx_phy_info->sgi[5],							\
+		  _rx_phy_info->sgi[6],							\
+		  _rx_phy_info->sgi[7],							\
+		  _rx_phy_info->sgi[8],							\
+		  _rx_phy_info->sgi[9]);						\
+														\
+	/* NSS */											\
+	cdf_print("NSS  counts: ");							\
+	/* nss[0] just holds the count of non-stbc frames that were sent at 1x1 \
+	 * rates and nsts holds the count of frames sent with stbc.	\
+	 * It was decided to not include PPDUs sent w/ STBC in nss[0]\
+	 * since it would be easier to change the value that needs to be\
+	 * printed (from "stbc+non-stbc count to only non-stbc count")\
+	 * if needed in the future. Hence the addition in the host code\
+	 * at this line. */									\
+	cdf_print("1x1 %d, 2x2 %d, 3x3 %d, 4x4 %d\n",		\
+		  _rx_phy_info->nss[0] + _rx_phy_info->nsts,	\
+		  _rx_phy_info->nss[1],							\
+		  _rx_phy_info->nss[2],							\
+		  _rx_phy_info->nss[3]);						\
+														\
+	/* NSTS */											\
+	cdf_print("NSTS count: ");							\
+	cdf_print("%d\n", _rx_phy_info->nsts);				\
+														\
+	/* BW */											\
+	cdf_print("BW counts: ");							\
+	for (i = 0;											\
+		i < sizeof(_rx_phy_info->bw) / sizeof(_rx_phy_info->bw[0]);	\
+		i++) {											\
+			cdf_print("%s %d ", bw_str_arr[i], _rx_phy_info->bw[i]);\
+	}													\
+	cdf_print("\n");									\
+														\
+	/* Preamble */										\
+	cdf_print("Preamble counts: ");						\
+	cdf_print("%d, %d, %d, %d, %d, %d\n",				\
+		  _rx_phy_info->pream[0],						\
+		  _rx_phy_info->pream[1],						\
+		  _rx_phy_info->pream[2],						\
+		  _rx_phy_info->pream[3],						\
+		  _rx_phy_info->pream[4],						\
+		  _rx_phy_info->pream[5]);						\
+														\
+	/* STBC rate counts */								\
+	cdf_print("STBC rate counts (0..9): ");				\
+	cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+		  _rx_phy_info->stbc[0],						\
+		  _rx_phy_info->stbc[1],						\
+		  _rx_phy_info->stbc[2],						\
+		  _rx_phy_info->stbc[3],						\
+		  _rx_phy_info->stbc[4],						\
+		  _rx_phy_info->stbc[5],						\
+		  _rx_phy_info->stbc[6],						\
+		  _rx_phy_info->stbc[7],						\
+		  _rx_phy_info->stbc[8],						\
+		  _rx_phy_info->stbc[9]);						\
+														\
+	/* LDPC and TxBF counts */							\
+	cdf_print("LDPC TXBF Counts: ");					\
+	cdf_print("%d, %d\n", _rx_phy_info->ldpc, _rx_phy_info->txbf);\
+	/* RSSI Values for last received frames */			\
+	cdf_print("RSSI (data, mgmt): %d, %d\n", _rx_phy_info->data_rssi,\
+		  _rx_phy_info->mgmt_rssi);						\
+														\
+	cdf_print("RSSI Chain 0 (0x%02x 0x%02x 0x%02x 0x%02x)\n",\
+		  ((_rx_phy_info->rssi_chain0 >> 24) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain0 >> 16) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain0 >> 8) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain0 >> 0) & 0xff));	\
+														\
+	cdf_print("RSSI Chain 1 (0x%02x 0x%02x 0x%02x 0x%02x)\n",\
+		  ((_rx_phy_info->rssi_chain1 >> 24) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain1 >> 16) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain1 >> 8) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain1 >> 0) & 0xff));	\
+														\
+	cdf_print("RSSI Chain 2 (0x%02x 0x%02x 0x%02x 0x%02x)\n",\
+		  ((_rx_phy_info->rssi_chain2 >> 24) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain2 >> 16) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain2 >> 8) & 0xff),	\
+		  ((_rx_phy_info->rssi_chain2 >> 0) & 0xff));	\
+}
+
+static void htt_t2h_stats_rx_rate_stats_print(wlan_dbg_rx_rate_info_t *
+					      rx_phy_info, int concise)
+{
+	rx_rate_stats_print_cmn(rx_phy_info, concise);
+}
+
+static void htt_t2h_stats_rx_rate_stats_print_v2(wlan_dbg_rx_rate_info_v2_t *
+					      rx_phy_info, int concise)
+{
+	rx_rate_stats_print_cmn(rx_phy_info, concise);
+}
+
+static void
+htt_t2h_stats_pdev_stats_print(struct wlan_dbg_stats *wlan_pdev_stats,
+			       int concise)
+{
+	struct wlan_dbg_tx_stats *tx = &wlan_pdev_stats->tx;
+	struct wlan_dbg_rx_stats *rx = &wlan_pdev_stats->rx;
+
+	cdf_print("WAL Pdev stats:\n");
+	cdf_print("\n### Tx ###\n");
+
+	/* Num HTT cookies queued to dispatch list */
+	cdf_print("comp_queued       :\t%d\n", tx->comp_queued);
+	/* Num HTT cookies dispatched */
+	cdf_print("comp_delivered    :\t%d\n", tx->comp_delivered);
+	/* Num MSDU queued to WAL */
+	cdf_print("msdu_enqued       :\t%d\n", tx->msdu_enqued);
+	/* Num MPDU queued to WAL */
+	cdf_print("mpdu_enqued       :\t%d\n", tx->mpdu_enqued);
+	/* Num MSDUs dropped by WMM limit */
+	cdf_print("wmm_drop          :\t%d\n", tx->wmm_drop);
+	/* Num Local frames queued */
+	cdf_print("local_enqued      :\t%d\n", tx->local_enqued);
+	/* Num Local frames done */
+	cdf_print("local_freed       :\t%d\n", tx->local_freed);
+	/* Num queued to HW */
+	cdf_print("hw_queued         :\t%d\n", tx->hw_queued);
+	/* Num PPDU reaped from HW */
+	cdf_print("hw_reaped         :\t%d\n", tx->hw_reaped);
+	/* Num underruns */
+	cdf_print("mac underrun      :\t%d\n", tx->underrun);
+	/* Num underruns */
+	cdf_print("phy underrun      :\t%d\n", tx->phy_underrun);
+	/* Num PPDUs cleaned up in TX abort */
+	cdf_print("tx_abort          :\t%d\n", tx->tx_abort);
+	/* Num MPDUs requed by SW */
+	cdf_print("mpdus_requed      :\t%d\n", tx->mpdus_requed);
+	/* Excessive retries */
+	cdf_print("excess retries    :\t%d\n", tx->tx_ko);
+	/* last data rate */
+	cdf_print("last rc           :\t%d\n", tx->data_rc);
+	/* scheduler self triggers */
+	cdf_print("sched self trig   :\t%d\n", tx->self_triggers);
+	/* SW retry failures */
+	cdf_print("ampdu retry failed:\t%d\n", tx->sw_retry_failure);
+	/* ilegal phy rate errirs */
+	cdf_print("illegal rate errs :\t%d\n", tx->illgl_rate_phy_err);
+	/* pdev continous excessive retries  */
+	cdf_print("pdev cont xretry  :\t%d\n", tx->pdev_cont_xretry);
+	/* pdev continous excessive retries  */
+	cdf_print("pdev tx timeout   :\t%d\n", tx->pdev_tx_timeout);
+	/* pdev resets  */
+	cdf_print("pdev resets       :\t%d\n", tx->pdev_resets);
+	/* PPDU > txop duration  */
+	cdf_print("ppdu txop ovf     :\t%d\n", tx->txop_ovf);
+
+	cdf_print("\n### Rx ###\n");
+	/* Cnts any change in ring routing mid-ppdu */
+	cdf_print("ppdu_route_change :\t%d\n", rx->mid_ppdu_route_change);
+	/* Total number of statuses processed */
+	cdf_print("status_rcvd       :\t%d\n", rx->status_rcvd);
+	/* Extra frags on rings 0-3 */
+	cdf_print("r0_frags          :\t%d\n", rx->r0_frags);
+	cdf_print("r1_frags          :\t%d\n", rx->r1_frags);
+	cdf_print("r2_frags          :\t%d\n", rx->r2_frags);
+	cdf_print("r3_frags          :\t%d\n", rx->r3_frags);
+	/* MSDUs / MPDUs delivered to HTT */
+	cdf_print("htt_msdus         :\t%d\n", rx->htt_msdus);
+	cdf_print("htt_mpdus         :\t%d\n", rx->htt_mpdus);
+	/* MSDUs / MPDUs delivered to local stack */
+	cdf_print("loc_msdus         :\t%d\n", rx->loc_msdus);
+	cdf_print("loc_mpdus         :\t%d\n", rx->loc_mpdus);
+	/* AMSDUs that have more MSDUs than the status ring size */
+	cdf_print("oversize_amsdu    :\t%d\n", rx->oversize_amsdu);
+	/* Number of PHY errors */
+	cdf_print("phy_errs          :\t%d\n", rx->phy_errs);
+	/* Number of PHY errors dropped */
+	cdf_print("phy_errs dropped  :\t%d\n", rx->phy_err_drop);
+	/* Number of mpdu errors - FCS, MIC, ENC etc. */
+	cdf_print("mpdu_errs         :\t%d\n", rx->mpdu_errs);
+
+}
+
+static void
+htt_t2h_stats_rx_reorder_stats_print(struct rx_reorder_stats *stats_ptr,
+				     int concise)
+{
+	cdf_print("Rx reorder statistics:\n");
+	cdf_print("  %u non-QoS frames received\n", stats_ptr->deliver_non_qos);
+	cdf_print("  %u frames received in-order\n",
+		  stats_ptr->deliver_in_order);
+	cdf_print("  %u frames flushed due to timeout\n",
+		  stats_ptr->deliver_flush_timeout);
+	cdf_print("  %u frames flushed due to moving out of window\n",
+		  stats_ptr->deliver_flush_oow);
+	cdf_print("  %u frames flushed due to receiving DELBA\n",
+		  stats_ptr->deliver_flush_delba);
+	cdf_print("  %u frames discarded due to FCS error\n",
+		  stats_ptr->fcs_error);
+	cdf_print("  %u frames discarded due to invalid peer\n",
+		  stats_ptr->invalid_peer);
+	cdf_print
+		("  %u frames discarded due to duplication (non aggregation)\n",
+		stats_ptr->dup_non_aggr);
+	cdf_print("  %u frames discarded due to duplication in reorder queue\n",
+		 stats_ptr->dup_in_reorder);
+	cdf_print("  %u frames discarded due to processed before\n",
+		  stats_ptr->dup_past);
+	cdf_print("  %u times reorder timeout happened\n",
+		  stats_ptr->reorder_timeout);
+	cdf_print("  %u times incorrect bar received\n",
+		  stats_ptr->invalid_bar_ssn);
+	cdf_print("  %u times bar ssn reset happened\n",
+			stats_ptr->ssn_reset);
+	cdf_print("  %u times flushed due to peer delete\n",
+			stats_ptr->deliver_flush_delpeer);
+	cdf_print("  %u times flushed due to offload\n",
+			stats_ptr->deliver_flush_offload);
+	cdf_print("  %u times flushed due to ouf of buffer\n",
+			stats_ptr->deliver_flush_oob);
+	cdf_print("  %u MPDU's dropped due to PN check fail\n",
+			stats_ptr->pn_fail);
+	cdf_print("  %u MPDU's dropped due to lack of memory\n",
+			stats_ptr->store_fail);
+	cdf_print("  %u times tid pool alloc succeeded\n",
+			stats_ptr->tid_pool_alloc_succ);
+	cdf_print("  %u times MPDU pool alloc succeeded\n",
+			stats_ptr->mpdu_pool_alloc_succ);
+	cdf_print("  %u times MSDU pool alloc succeeded\n",
+			stats_ptr->msdu_pool_alloc_succ);
+	cdf_print("  %u times tid pool alloc failed\n",
+			stats_ptr->tid_pool_alloc_fail);
+	cdf_print("  %u times MPDU pool alloc failed\n",
+			stats_ptr->mpdu_pool_alloc_fail);
+	cdf_print("  %u times MSDU pool alloc failed\n",
+			stats_ptr->msdu_pool_alloc_fail);
+	cdf_print("  %u times tid pool freed\n",
+			stats_ptr->tid_pool_free);
+	cdf_print("  %u times MPDU pool freed\n",
+			stats_ptr->mpdu_pool_free);
+	cdf_print("  %u times MSDU pool freed\n",
+			stats_ptr->msdu_pool_free);
+	cdf_print("  %u MSDUs undelivered to HTT, queued to Rx MSDU free list\n",
+			stats_ptr->msdu_queued);
+	cdf_print("  %u MSDUs released from Rx MSDU list to MAC ring\n",
+			stats_ptr->msdu_recycled);
+	cdf_print("  %u MPDUs with invalid peer but A2 found in AST\n",
+			stats_ptr->invalid_peer_a2_in_ast);
+	cdf_print("  %u MPDUs with invalid peer but A3 found in AST\n",
+			stats_ptr->invalid_peer_a3_in_ast);
+	cdf_print("  %u MPDUs with invalid peer, Broadcast or Mulitcast frame\n",
+			stats_ptr->invalid_peer_bmc_mpdus);
+	cdf_print("  %u MSDUs with err attention word\n",
+			stats_ptr->rxdesc_err_att);
+	cdf_print("  %u MSDUs with flag of peer_idx_invalid\n",
+			stats_ptr->rxdesc_err_peer_idx_inv);
+	cdf_print("  %u MSDUs with  flag of peer_idx_timeout\n",
+			stats_ptr->rxdesc_err_peer_idx_to);
+	cdf_print("  %u MSDUs with  flag of overflow\n",
+			stats_ptr->rxdesc_err_ov);
+	cdf_print("  %u MSDUs with  flag of msdu_length_err\n",
+			stats_ptr->rxdesc_err_msdu_len);
+	cdf_print("  %u MSDUs with  flag of mpdu_length_err\n",
+			stats_ptr->rxdesc_err_mpdu_len);
+	cdf_print("  %u MSDUs with  flag of tkip_mic_err\n",
+			stats_ptr->rxdesc_err_tkip_mic);
+	cdf_print("  %u MSDUs with  flag of decrypt_err\n",
+			stats_ptr->rxdesc_err_decrypt);
+	cdf_print("  %u MSDUs with  flag of fcs_err\n",
+			stats_ptr->rxdesc_err_fcs);
+	cdf_print("  %u Unicast frames with invalid peer handler\n",
+			stats_ptr->rxdesc_uc_msdus_inv_peer);
+	cdf_print("  %u unicast frame directly to DUT with invalid peer handler\n",
+			stats_ptr->rxdesc_direct_msdus_inv_peer);
+	cdf_print("  %u Broadcast/Multicast frames with invalid peer handler\n",
+			stats_ptr->rxdesc_bmc_msdus_inv_peer);
+	cdf_print("  %u MSDUs dropped due to no first MSDU flag\n",
+			stats_ptr->rxdesc_no_1st_msdu);
+	cdf_print("  %u MSDUs dropped due to ring overflow\n",
+			stats_ptr->msdu_drop_ring_ov);
+	cdf_print("  %u MSDUs dropped due to FC mismatch\n",
+			stats_ptr->msdu_drop_fc_mismatch);
+	cdf_print("  %u MSDUs dropped due to mgt frame in Remote ring\n",
+			stats_ptr->msdu_drop_mgmt_remote_ring);
+	cdf_print("  %u MSDUs dropped due to misc non error\n",
+			stats_ptr->msdu_drop_misc);
+	cdf_print("  %u MSDUs go to offload before reorder\n",
+			stats_ptr->offload_msdu_wal);
+	cdf_print("  %u data frame dropped by offload after reorder\n",
+			stats_ptr->offload_msdu_reorder);
+	cdf_print("  %u  MPDUs with SN in the past & within BA window\n",
+			stats_ptr->dup_past_within_window);
+	cdf_print("  %u  MPDUs with SN in the past & outside BA window\n",
+			stats_ptr->dup_past_outside_window);
+}
+
+static void
+htt_t2h_stats_rx_rem_buf_stats_print(
+    struct rx_remote_buffer_mgmt_stats *stats_ptr, int concise)
+{
+	cdf_print("Rx Remote Buffer Statistics:\n");
+	cdf_print("  %u MSDU's reaped for Rx processing\n",
+			stats_ptr->remote_reaped);
+	cdf_print("  %u MSDU's recycled within firmware\n",
+			stats_ptr->remote_recycled);
+	cdf_print("  %u MSDU's stored by Data Rx\n",
+			stats_ptr->data_rx_msdus_stored);
+	cdf_print("  %u HTT indications from WAL Rx MSDU\n",
+			stats_ptr->wal_rx_ind);
+	cdf_print("  %u HTT indications unconsumed from WAL Rx MSDU\n",
+			stats_ptr->wal_rx_ind_unconsumed);
+	cdf_print("  %u HTT indications from Data Rx MSDU\n",
+			stats_ptr->data_rx_ind);
+	cdf_print("  %u HTT indications unconsumed from Data Rx MSDU\n",
+			stats_ptr->data_rx_ind_unconsumed);
+	cdf_print("  %u HTT indications from ATHBUF\n",
+			stats_ptr->athbuf_rx_ind);
+	cdf_print("  %u Remote buffers requested for refill\n",
+			stats_ptr->refill_buf_req);
+	cdf_print("  %u Remote buffers filled by host\n",
+			stats_ptr->refill_buf_rsp);
+	cdf_print("  %u times MAC has no buffers\n",
+			stats_ptr->mac_no_bufs);
+	cdf_print("  %u times f/w write & read indices on MAC ring are equal\n",
+			stats_ptr->fw_indices_equal);
+	cdf_print("  %u times f/w has no remote buffers to post to MAC\n",
+			stats_ptr->host_no_bufs);
+}
+
+static void
+htt_t2h_stats_txbf_info_buf_stats_print(
+	struct wlan_dbg_txbf_data_stats *stats_ptr)
+{
+	cdf_print("TXBF data Statistics:\n");
+	cdf_print("tx_txbf_vht (0..9): ");
+	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u, %d\n",
+		  stats_ptr->tx_txbf_vht[0],
+		  stats_ptr->tx_txbf_vht[1],
+		  stats_ptr->tx_txbf_vht[2],
+		  stats_ptr->tx_txbf_vht[3],
+		  stats_ptr->tx_txbf_vht[4],
+		  stats_ptr->tx_txbf_vht[5],
+		  stats_ptr->tx_txbf_vht[6],
+		  stats_ptr->tx_txbf_vht[7],
+		  stats_ptr->tx_txbf_vht[8],
+		  stats_ptr->tx_txbf_vht[9]);
+	cdf_print("rx_txbf_vht (0..9): ");
+	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n",
+		  stats_ptr->rx_txbf_vht[0],
+		  stats_ptr->rx_txbf_vht[1],
+		  stats_ptr->rx_txbf_vht[2],
+		  stats_ptr->rx_txbf_vht[3],
+		  stats_ptr->rx_txbf_vht[4],
+		  stats_ptr->rx_txbf_vht[5],
+		  stats_ptr->rx_txbf_vht[6],
+		  stats_ptr->rx_txbf_vht[7],
+		  stats_ptr->rx_txbf_vht[8],
+		  stats_ptr->rx_txbf_vht[9]);
+	cdf_print("tx_txbf_ht (0..7): ");
+	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u\n",
+		  stats_ptr->tx_txbf_ht[0],
+		  stats_ptr->tx_txbf_ht[1],
+		  stats_ptr->tx_txbf_ht[2],
+		  stats_ptr->tx_txbf_ht[3],
+		  stats_ptr->tx_txbf_ht[4],
+		  stats_ptr->tx_txbf_ht[5],
+		  stats_ptr->tx_txbf_ht[6],
+		  stats_ptr->tx_txbf_ht[7]);
+	cdf_print("tx_txbf_ofdm (0..7): ");
+	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u\n",
+		  stats_ptr->tx_txbf_ofdm[0],
+		  stats_ptr->tx_txbf_ofdm[1],
+		  stats_ptr->tx_txbf_ofdm[2],
+		  stats_ptr->tx_txbf_ofdm[3],
+		  stats_ptr->tx_txbf_ofdm[4],
+		  stats_ptr->tx_txbf_ofdm[5],
+		  stats_ptr->tx_txbf_ofdm[6],
+		  stats_ptr->tx_txbf_ofdm[7]);
+	cdf_print("tx_txbf_cck (0..6): ");
+	cdf_print("%u, %u, %u, %u, %u, %u, %u\n",
+		  stats_ptr->tx_txbf_cck[0],
+		  stats_ptr->tx_txbf_cck[1],
+		  stats_ptr->tx_txbf_cck[2],
+		  stats_ptr->tx_txbf_cck[3],
+		  stats_ptr->tx_txbf_cck[4],
+		  stats_ptr->tx_txbf_cck[5],
+		  stats_ptr->tx_txbf_cck[6]);
+}
+
+static void
+htt_t2h_stats_txbf_snd_buf_stats_print(
+	struct wlan_dbg_txbf_snd_stats *stats_ptr)
+{
+	cdf_print("TXBF snd Buffer Statistics:\n");
+	cdf_print("cbf_20: ");
+	cdf_print("%u, %u, %u, %u\n",
+		  stats_ptr->cbf_20[0],
+		  stats_ptr->cbf_20[1],
+		  stats_ptr->cbf_20[2],
+		  stats_ptr->cbf_20[3]);
+	cdf_print("cbf_40: ");
+	cdf_print("%u, %u, %u, %u\n",
+		  stats_ptr->cbf_40[0],
+		  stats_ptr->cbf_40[1],
+		  stats_ptr->cbf_40[2],
+		  stats_ptr->cbf_40[3]);
+	cdf_print("cbf_80: ");
+	cdf_print("%u, %u, %u, %u\n",
+		  stats_ptr->cbf_80[0],
+		  stats_ptr->cbf_80[1],
+		  stats_ptr->cbf_80[2],
+		  stats_ptr->cbf_80[3]);
+	cdf_print("sounding: ");
+	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u\n",
+		  stats_ptr->sounding[0],
+		  stats_ptr->sounding[1],
+		  stats_ptr->sounding[2],
+		  stats_ptr->sounding[3],
+		  stats_ptr->sounding[4],
+		  stats_ptr->sounding[5],
+		  stats_ptr->sounding[6],
+		  stats_ptr->sounding[7],
+		  stats_ptr->sounding[8]);
+}
+
+static void
+htt_t2h_stats_tx_selfgen_buf_stats_print(
+	struct wlan_dbg_tx_selfgen_stats *stats_ptr)
+{
+	cdf_print("Tx selfgen Buffer Statistics:\n");
+	cdf_print("  %u su_ndpa\n",
+			stats_ptr->su_ndpa);
+	cdf_print("  %u mu_ndp\n",
+			stats_ptr->mu_ndp);
+	cdf_print("  %u mu_ndpa\n",
+			stats_ptr->mu_ndpa);
+	cdf_print("  %u mu_ndp\n",
+			stats_ptr->mu_ndp);
+	cdf_print("  %u mu_brpoll_1\n",
+			stats_ptr->mu_brpoll_1);
+	cdf_print("  %u mu_brpoll_2\n",
+			stats_ptr->mu_brpoll_2);
+	cdf_print("  %u mu_bar_1\n",
+			stats_ptr->mu_bar_1);
+	cdf_print("  %u mu_bar_2\n",
+			stats_ptr->mu_bar_2);
+	cdf_print("  %u cts_burst\n",
+			stats_ptr->cts_burst);
+	cdf_print("  %u su_ndp_err\n",
+			stats_ptr->su_ndp_err);
+	cdf_print("  %u su_ndpa_err\n",
+			stats_ptr->su_ndpa_err);
+	cdf_print("  %u mu_ndp_err\n",
+			stats_ptr->mu_ndp_err);
+	cdf_print("  %u mu_brp1_err\n",
+			stats_ptr->mu_brp1_err);
+	cdf_print("  %u mu_brp2_err\n",
+			stats_ptr->mu_brp2_err);
+}
+
+static void
+htt_t2h_stats_wifi2_error_stats_print(
+	struct wlan_dbg_wifi2_error_stats *stats_ptr)
+{
+	int i;
+
+	cdf_print("Scheduler error Statistics:\n");
+	cdf_print("urrn_stats: ");
+	cdf_print("%d, %d, %d\n",
+		  stats_ptr->urrn_stats[0],
+		  stats_ptr->urrn_stats[1],
+		  stats_ptr->urrn_stats[2]);
+	cdf_print("flush_errs (0..%d): ",
+			WHAL_DBG_FLUSH_REASON_MAXCNT);
+	for (i = 0; i < WHAL_DBG_FLUSH_REASON_MAXCNT; i++)
+		cdf_print("  %u", stats_ptr->flush_errs[i]);
+	cdf_print("\n");
+	cdf_print("schd_stall_errs (0..3): ");
+	cdf_print("%d, %d, %d, %d\n",
+		  stats_ptr->schd_stall_errs[0],
+		  stats_ptr->schd_stall_errs[1],
+		  stats_ptr->schd_stall_errs[2],
+		  stats_ptr->schd_stall_errs[3]);
+	cdf_print("schd_cmd_result (0..%d): ",
+			WHAL_DBG_CMD_RESULT_MAXCNT);
+	for (i = 0; i < WHAL_DBG_CMD_RESULT_MAXCNT; i++)
+		cdf_print("  %u", stats_ptr->schd_cmd_result[i]);
+	cdf_print("\n");
+	cdf_print("sifs_status (0..%d): ",
+			WHAL_DBG_SIFS_STATUS_MAXCNT);
+	for (i = 0; i < WHAL_DBG_SIFS_STATUS_MAXCNT; i++)
+		cdf_print("  %u", stats_ptr->sifs_status[i]);
+	cdf_print("\n");
+	cdf_print("phy_errs (0..%d): ",
+			WHAL_DBG_PHY_ERR_MAXCNT);
+	for (i = 0; i < WHAL_DBG_PHY_ERR_MAXCNT; i++)
+		cdf_print("  %u", stats_ptr->phy_errs[i]);
+	cdf_print("\n");
+	cdf_print("  %u rx_rate_inval\n",
+			stats_ptr->rx_rate_inval);
+}
+
+static void
+htt_t2h_rx_musu_ndpa_pkts_stats_print(
+	struct rx_txbf_musu_ndpa_pkts_stats *stats_ptr)
+{
+	cdf_print("Rx TXBF MU/SU Packets and NDPA Statistics:\n");
+	cdf_print("  %u Number of TXBF MU packets received\n",
+			stats_ptr->number_mu_pkts);
+	cdf_print("  %u Number of TXBF SU packets received\n",
+			stats_ptr->number_su_pkts);
+	cdf_print("  %u Number of TXBF directed NDPA\n",
+			stats_ptr->txbf_directed_ndpa_count);
+	cdf_print("  %u Number of TXBF retried NDPA\n",
+			stats_ptr->txbf_ndpa_retry_count);
+	cdf_print("  %u Total number of TXBF NDPA\n",
+			stats_ptr->txbf_total_ndpa_count);
+}
+
+#define HTT_TICK_TO_USEC(ticks, microsec_per_tick) (ticks * microsec_per_tick)
+static inline int htt_rate_flags_to_mhz(uint8_t rate_flags)
+{
+	if (rate_flags & 0x20)
+		return 40;      /* WHAL_RC_FLAG_40MHZ */
+	if (rate_flags & 0x40)
+		return 80;      /* WHAL_RC_FLAG_80MHZ */
+	if (rate_flags & 0x80)
+		return 160;     /* WHAL_RC_FLAG_160MHZ */
+	return 20;
+}
+
+#define HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW 64
+
+static void
+htt_t2h_tx_ppdu_bitmaps_pr(uint32_t *queued_ptr, uint32_t *acked_ptr)
+{
+	char queued_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW + 1];
+	char acked_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW + 1];
+	int i, j, word;
+
+	cdf_mem_set(queued_str, HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW, '0');
+	cdf_mem_set(acked_str, HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW, '-');
+	i = 0;
+	for (word = 0; word < 2; word++) {
+		uint32_t queued = *(queued_ptr + word);
+		uint32_t acked = *(acked_ptr + word);
+		for (j = 0; j < 32; j++, i++) {
+			if (queued & (1 << j)) {
+				queued_str[i] = '1';
+				acked_str[i] = (acked & (1 << j)) ? 'y' : 'N';
+			}
+		}
+	}
+	queued_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW] = '\0';
+	acked_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW] = '\0';
+	cdf_print("%s\n", queued_str);
+	cdf_print("%s\n", acked_str);
+}
+
+static inline uint16_t htt_msg_read16(uint16_t *p16)
+{
+#ifdef BIG_ENDIAN_HOST
+	/*
+	 * During upload, the bytes within each uint32_t word were
+	 * swapped by the HIF HW.  This results in the lower and upper bytes
+	 * of each uint16_t to be in the correct big-endian order with
+	 * respect to each other, but for each even-index uint16_t to
+	 * have its position switched with its successor neighbor uint16_t.
+	 * Undo this uint16_t position swapping.
+	 */
+	return (((size_t) p16) & 0x2) ? *(p16 - 1) : *(p16 + 1);
+#else
+	return *p16;
+#endif
+}
+
+static inline uint8_t htt_msg_read8(uint8_t *p8)
+{
+#ifdef BIG_ENDIAN_HOST
+	/*
+	 * During upload, the bytes within each uint32_t word were
+	 * swapped by the HIF HW.
+	 * Undo this byte swapping.
+	 */
+	switch (((size_t) p8) & 0x3) {
+	case 0:
+		return *(p8 + 3);
+	case 1:
+		return *(p8 + 1);
+	case 2:
+		return *(p8 - 1);
+	default /* 3 */:
+		return *(p8 - 3);
+	}
+#else
+	return *p8;
+#endif
+}
+
+void htt_make_u8_list_str(uint32_t *aligned_data,
+			  char *buffer, int space, int max_elems)
+{
+	uint8_t *p8 = (uint8_t *) aligned_data;
+	char *buf_p = buffer;
+	while (max_elems-- > 0) {
+		int bytes;
+		uint8_t val;
+
+		val = htt_msg_read8(p8);
+		if (val == 0)
+			/* not enough data to fill the reserved msg buffer*/
+			break;
+
+		bytes = cdf_snprint(buf_p, space, "%d,", val);
+		space -= bytes;
+		if (space > 0)
+			buf_p += bytes;
+		else /* not enough print buffer space for all the data */
+			break;
+		p8++;
+	}
+	if (buf_p == buffer)
+		*buf_p = '\0';        /* nothing was written */
+	else
+		*(buf_p - 1) = '\0';  /* erase the final comma */
+
+}
+
+void htt_make_u16_list_str(uint32_t *aligned_data,
+			   char *buffer, int space, int max_elems)
+{
+	uint16_t *p16 = (uint16_t *) aligned_data;
+	char *buf_p = buffer;
+	while (max_elems-- > 0) {
+		int bytes;
+		uint16_t val;
+
+		val = htt_msg_read16(p16);
+		if (val == 0)
+			/* not enough data to fill the reserved msg buffer */
+			break;
+		bytes = cdf_snprint(buf_p, space, "%d,", val);
+		space -= bytes;
+		if (space > 0)
+			buf_p += bytes;
+		else /* not enough print buffer space for all the data */
+			break;
+
+		p16++;
+	}
+	if (buf_p == buffer)
+		*buf_p = '\0';  /* nothing was written */
+	else
+		*(buf_p - 1) = '\0';    /* erase the final comma */
+}
+
+void
+htt_t2h_tx_ppdu_log_print(struct ol_fw_tx_dbg_ppdu_msg_hdr *hdr,
+			  struct ol_fw_tx_dbg_ppdu_base *record,
+			  int length, int concise)
+{
+	int i;
+	int record_size;
+	int num_records;
+
+	record_size =
+		sizeof(*record) +
+		hdr->mpdu_bytes_array_len * sizeof(uint16_t) +
+		hdr->mpdu_msdus_array_len * sizeof(uint8_t) +
+		hdr->msdu_bytes_array_len * sizeof(uint16_t);
+	num_records = (length - sizeof(*hdr)) / record_size;
+	cdf_print("Tx PPDU log elements:\n");
+
+	for (i = 0; i < num_records; i++) {
+		uint16_t start_seq_num;
+		uint16_t start_pn_lsbs;
+		uint8_t num_mpdus;
+		uint16_t peer_id;
+		uint8_t ext_tid;
+		uint8_t rate_code;
+		uint8_t rate_flags;
+		uint8_t tries;
+		uint8_t complete;
+		uint32_t time_enqueue_us;
+		uint32_t time_completion_us;
+		uint32_t *msg_word = (uint32_t *) record;
+
+		/* fields used for both concise and complete printouts */
+		start_seq_num =
+			((*(msg_word + OL_FW_TX_DBG_PPDU_START_SEQ_NUM_16)) &
+			 OL_FW_TX_DBG_PPDU_START_SEQ_NUM_M) >>
+			OL_FW_TX_DBG_PPDU_START_SEQ_NUM_S;
+		complete =
+			((*(msg_word + OL_FW_TX_DBG_PPDU_COMPLETE_16)) &
+			 OL_FW_TX_DBG_PPDU_COMPLETE_M) >>
+			OL_FW_TX_DBG_PPDU_COMPLETE_S;
+
+		/* fields used only for complete printouts */
+		if (!concise) {
+#define BUF_SIZE 80
+			char buf[BUF_SIZE];
+			uint8_t *p8;
+			time_enqueue_us =
+				HTT_TICK_TO_USEC(record->timestamp_enqueue,
+						 hdr->microsec_per_tick);
+			time_completion_us =
+				HTT_TICK_TO_USEC(record->timestamp_completion,
+						 hdr->microsec_per_tick);
+
+			start_pn_lsbs =
+				((*
+				  (msg_word +
+				   OL_FW_TX_DBG_PPDU_START_PN_LSBS_16)) &
+				 OL_FW_TX_DBG_PPDU_START_PN_LSBS_M) >>
+				OL_FW_TX_DBG_PPDU_START_PN_LSBS_S;
+			num_mpdus =
+				((*(msg_word + OL_FW_TX_DBG_PPDU_NUM_MPDUS_16))&
+				 OL_FW_TX_DBG_PPDU_NUM_MPDUS_M) >>
+				OL_FW_TX_DBG_PPDU_NUM_MPDUS_S;
+			peer_id =
+				((*(msg_word + OL_FW_TX_DBG_PPDU_PEER_ID_16)) &
+				 OL_FW_TX_DBG_PPDU_PEER_ID_M) >>
+				OL_FW_TX_DBG_PPDU_PEER_ID_S;
+			ext_tid =
+				((*(msg_word + OL_FW_TX_DBG_PPDU_EXT_TID_16)) &
+				 OL_FW_TX_DBG_PPDU_EXT_TID_M) >>
+				OL_FW_TX_DBG_PPDU_EXT_TID_S;
+			rate_code =
+				((*(msg_word + OL_FW_TX_DBG_PPDU_RATE_CODE_16))&
+				 OL_FW_TX_DBG_PPDU_RATE_CODE_M) >>
+				OL_FW_TX_DBG_PPDU_RATE_CODE_S;
+			rate_flags =
+				((*(msg_word + OL_FW_TX_DBG_PPDU_RATEFLAGS_16))&
+				 OL_FW_TX_DBG_PPDU_RATE_FLAGS_M) >>
+				OL_FW_TX_DBG_PPDU_RATE_FLAGS_S;
+			tries =
+				((*(msg_word + OL_FW_TX_DBG_PPDU_TRIES_16)) &
+				 OL_FW_TX_DBG_PPDU_TRIES_M) >>
+				OL_FW_TX_DBG_PPDU_TRIES_S;
+
+			cdf_print(" - PPDU tx to peer %d, TID %d\n", peer_id,
+				  ext_tid);
+			cdf_print
+				("   start seq num= %u, start PN LSBs= %#04x\n",
+				start_seq_num, start_pn_lsbs);
+			cdf_print
+				("   PPDU: %d MPDUs, (?) MSDUs, %d bytes\n",
+				num_mpdus,
+				 /* num_msdus - not yet computed in target */
+				record->num_bytes);
+			if (complete) {
+				cdf_print
+				      ("   enqueued: %u, completed: %u usec)\n",
+				       time_enqueue_us, time_completion_us);
+				cdf_print
+					("   %d tries, last tx used rate %d ",
+					 tries, rate_code);
+				cdf_print("on %d MHz chan (flags = %#x)\n",
+					  htt_rate_flags_to_mhz
+					  (rate_flags), rate_flags);
+				cdf_print
+				      ("  enqueued and acked MPDU bitmaps:\n");
+				htt_t2h_tx_ppdu_bitmaps_pr(msg_word +
+					   OL_FW_TX_DBG_PPDU_ENQUEUED_LSBS_16,
+							   msg_word +
+					   OL_FW_TX_DBG_PPDU_BLOCK_ACK_LSBS_16);
+			} else {
+				cdf_print
+				      ("  enqueued: %d us, not yet completed\n",
+					time_enqueue_us);
+			}
+			/* skip the regular msg fields to reach the tail area */
+			p8 = (uint8_t *) record;
+			p8 += sizeof(struct ol_fw_tx_dbg_ppdu_base);
+			if (hdr->mpdu_bytes_array_len) {
+				htt_make_u16_list_str((uint32_t *) p8, buf,
+						      BUF_SIZE,
+						      hdr->
+						      mpdu_bytes_array_len);
+				cdf_print("   MPDU bytes: %s\n", buf);
+			}
+			p8 += hdr->mpdu_bytes_array_len * sizeof(uint16_t);
+			if (hdr->mpdu_msdus_array_len) {
+				htt_make_u8_list_str((uint32_t *) p8, buf,
+						     BUF_SIZE,
+						     hdr->mpdu_msdus_array_len);
+				cdf_print("   MPDU MSDUs: %s\n", buf);
+			}
+			p8 += hdr->mpdu_msdus_array_len * sizeof(uint8_t);
+			if (hdr->msdu_bytes_array_len) {
+				htt_make_u16_list_str((uint32_t *) p8, buf,
+						      BUF_SIZE,
+						      hdr->
+						      msdu_bytes_array_len);
+				cdf_print("   MSDU bytes: %s\n", buf);
+			}
+		} else {
+			/* concise */
+			cdf_print("start seq num = %u ", start_seq_num);
+			cdf_print("enqueued and acked MPDU bitmaps:\n");
+			if (complete) {
+				htt_t2h_tx_ppdu_bitmaps_pr(msg_word +
+					OL_FW_TX_DBG_PPDU_ENQUEUED_LSBS_16,
+							   msg_word +
+					OL_FW_TX_DBG_PPDU_BLOCK_ACK_LSBS_16);
+			} else {
+				cdf_print("(not completed)\n");
+			}
+		}
+		record = (struct ol_fw_tx_dbg_ppdu_base *)
+			 (((uint8_t *) record) + record_size);
+	}
+}
+
+void htt_t2h_stats_print(uint8_t *stats_data, int concise)
+{
+	uint32_t *msg_word = (uint32_t *) stats_data;
+	enum htt_dbg_stats_type type;
+	enum htt_dbg_stats_status status;
+	int length;
+
+	type = HTT_T2H_STATS_CONF_TLV_TYPE_GET(*msg_word);
+	status = HTT_T2H_STATS_CONF_TLV_STATUS_GET(*msg_word);
+	length = HTT_T2H_STATS_CONF_TLV_LENGTH_GET(*msg_word);
+
+	/* check that we've been given a valid stats type */
+	if (status == HTT_DBG_STATS_STATUS_SERIES_DONE) {
+		return;
+	} else if (status == HTT_DBG_STATS_STATUS_INVALID) {
+		cdf_print("Target doesn't support stats type %d\n", type);
+		return;
+	} else if (status == HTT_DBG_STATS_STATUS_ERROR) {
+		cdf_print("Target couldn't upload stats type %d (no mem?)\n",
+			  type);
+		return;
+	}
+	/* got valid (though perhaps partial) stats - process them */
+	switch (type) {
+	case HTT_DBG_STATS_WAL_PDEV_TXRX:
+	{
+		struct wlan_dbg_stats *wlan_dbg_stats_ptr;
+
+		wlan_dbg_stats_ptr =
+			(struct wlan_dbg_stats *)(msg_word + 1);
+		htt_t2h_stats_pdev_stats_print(wlan_dbg_stats_ptr,
+					       concise);
+		break;
+	}
+	case HTT_DBG_STATS_RX_REORDER:
+	{
+		struct rx_reorder_stats *rx_reorder_stats_ptr;
+
+		rx_reorder_stats_ptr =
+			(struct rx_reorder_stats *)(msg_word + 1);
+		htt_t2h_stats_rx_reorder_stats_print
+			(rx_reorder_stats_ptr, concise);
+		break;
+	}
+
+	case HTT_DBG_STATS_RX_RATE_INFO:
+	{
+		wlan_dbg_rx_rate_info_t *rx_phy_info;
+		rx_phy_info =
+			(wlan_dbg_rx_rate_info_t *) (msg_word + 1);
+
+		htt_t2h_stats_rx_rate_stats_print(rx_phy_info, concise);
+
+		break;
+	}
+	case HTT_DBG_STATS_RX_RATE_INFO_V2:
+	{
+		wlan_dbg_rx_rate_info_v2_t *rx_phy_info;
+		rx_phy_info =
+			(wlan_dbg_rx_rate_info_v2_t *) (msg_word + 1);
+		htt_t2h_stats_rx_rate_stats_print_v2(rx_phy_info, concise);
+		break;
+	}
+	case HTT_DBG_STATS_TX_PPDU_LOG:
+	{
+		struct ol_fw_tx_dbg_ppdu_msg_hdr *hdr;
+		struct ol_fw_tx_dbg_ppdu_base *record;
+
+		if (status == HTT_DBG_STATS_STATUS_PARTIAL
+		    && length == 0) {
+			cdf_print
+				("HTT_DBG_STATS_TX_PPDU_LOG -- length = 0!\n");
+			break;
+		}
+		hdr =
+			(struct ol_fw_tx_dbg_ppdu_msg_hdr *)(msg_word + 1);
+		record = (struct ol_fw_tx_dbg_ppdu_base *)(hdr + 1);
+		htt_t2h_tx_ppdu_log_print(hdr, record, length, concise);
+	}
+	break;
+	case HTT_DBG_STATS_TX_RATE_INFO:
+	{
+		wlan_dbg_tx_rate_info_t *tx_rate_info;
+		tx_rate_info =
+			(wlan_dbg_tx_rate_info_t *) (msg_word + 1);
+
+		htt_t2h_stats_tx_rate_stats_print(tx_rate_info, concise);
+
+		break;
+	}
+	case HTT_DBG_STATS_TX_RATE_INFO_V2:
+	{
+		wlan_dbg_tx_rate_info_v2_t *tx_rate_info;
+		tx_rate_info =
+			(wlan_dbg_tx_rate_info_v2_t *) (msg_word + 1);
+		htt_t2h_stats_tx_rate_stats_print_v2(tx_rate_info, concise);
+		break;
+	}
+	case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
+	{
+		struct rx_remote_buffer_mgmt_stats *rx_rem_buf;
+
+		rx_rem_buf = (struct rx_remote_buffer_mgmt_stats *)(msg_word + 1);
+		htt_t2h_stats_rx_rem_buf_stats_print(rx_rem_buf, concise);
+		break;
+	}
+	case HTT_DBG_STATS_TXBF_INFO:
+	{
+		struct wlan_dbg_txbf_data_stats *txbf_info_buf;
+
+		txbf_info_buf =
+			(struct wlan_dbg_txbf_data_stats *)(msg_word + 1);
+		htt_t2h_stats_txbf_info_buf_stats_print(txbf_info_buf);
+		break;
+	}
+	case HTT_DBG_STATS_SND_INFO:
+	{
+		struct wlan_dbg_txbf_snd_stats *txbf_snd_buf;
+
+		txbf_snd_buf =
+			(struct wlan_dbg_txbf_snd_stats *)(msg_word + 1);
+		htt_t2h_stats_txbf_snd_buf_stats_print(txbf_snd_buf);
+		break;
+	}
+	case HTT_DBG_STATS_TX_SELFGEN_INFO:
+	{
+		struct wlan_dbg_tx_selfgen_stats  *tx_selfgen_buf;
+
+		tx_selfgen_buf =
+			(struct wlan_dbg_tx_selfgen_stats  *)(msg_word + 1);
+		htt_t2h_stats_tx_selfgen_buf_stats_print(tx_selfgen_buf);
+		break;
+	}
+	case HTT_DBG_STATS_ERROR_INFO:
+	{
+		struct wlan_dbg_wifi2_error_stats  *wifi2_error_buf;
+
+		wifi2_error_buf =
+			(struct wlan_dbg_wifi2_error_stats  *)(msg_word + 1);
+		htt_t2h_stats_wifi2_error_stats_print(wifi2_error_buf);
+		break;
+	}
+	case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
+	{
+		struct rx_txbf_musu_ndpa_pkts_stats *rx_musu_ndpa_stats;
+
+		rx_musu_ndpa_stats = (struct rx_txbf_musu_ndpa_pkts_stats *)
+								(msg_word + 1);
+		htt_t2h_rx_musu_ndpa_pkts_stats_print(rx_musu_ndpa_stats);
+		break;
+	}
+	default:
+		break;
+	}
+}

+ 904 - 0
core/dp/htt/htt_h2t.c

@@ -0,0 +1,904 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_h2t.c
+ * @brief Provide functions to send host->target HTT messages.
+ * @details
+ *  This file contains functions related to host->target HTT messages.
+ *  There are a couple aspects of this host->target messaging:
+ *  1.  This file contains the function that is called by HTC when
+ *      a host->target send completes.
+ *      This send-completion callback is primarily relevant to HL,
+ *      to invoke the download scheduler to set up a new download,
+ *      and optionally free the tx frame whose download is completed.
+ *      For both HL and LL, this completion callback frees up the
+ *      HTC_PACKET object used to specify the download.
+ *  2.  This file contains functions for creating messages to send
+ *      from the host to the target.
+ */
+
+#include <cdf_memory.h>         /* cdf_mem_copy */
+#include <cdf_nbuf.h>           /* cdf_nbuf_map_single */
+#include <htc_api.h>            /* HTC_PACKET */
+#include <htc.h>                /* HTC_HDR_ALIGNMENT_PADDING */
+#include <htt.h>                /* HTT host->target msg defs */
+#include <ol_txrx_htt_api.h>    /* ol_tx_completion_handler, htt_tx_status */
+#include <ol_htt_tx_api.h>
+
+#include <htt_internal.h>
+
+#define HTT_MSG_BUF_SIZE(msg_bytes) \
+	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+	((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
+#endif
+
+static void
+htt_h2t_send_complete_free_netbuf(void *pdev, A_STATUS status,
+				  cdf_nbuf_t netbuf, uint16_t msdu_id)
+{
+	cdf_nbuf_free(netbuf);
+}
+
+void htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
+{
+	void (*send_complete_part2)(void *pdev, A_STATUS status,
+				    cdf_nbuf_t msdu, uint16_t msdu_id);
+	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
+	struct htt_htc_pkt *htt_pkt;
+	cdf_nbuf_t netbuf;
+
+	send_complete_part2 = htc_pkt->pPktContext;
+
+	htt_pkt = container_of(htc_pkt, struct htt_htc_pkt, htc_pkt);
+
+	/* process (free or keep) the netbuf that held the message */
+	netbuf = (cdf_nbuf_t) htc_pkt->pNetBufContext;
+	if (send_complete_part2 != NULL) {
+		send_complete_part2(htt_pkt->pdev_ctxt, htc_pkt->Status, netbuf,
+				    htt_pkt->msdu_id);
+	}
+	/* free the htt_htc_pkt / HTC_PACKET object */
+	htt_htc_pkt_free(pdev, htt_pkt);
+}
+
+HTC_SEND_FULL_ACTION htt_h2t_full(void *context, HTC_PACKET *pkt)
+{
+/* FIX THIS */
+	return HTC_SEND_FULL_KEEP;
+}
+
+#if defined(HELIUMPLUS_PADDR64)
+A_STATUS htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev)
+{
+	A_STATUS rc = A_OK;
+
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	u_int32_t *msg_word;
+	struct htt_tx_frag_desc_bank_cfg_t *bank_cfg;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_ERROR; /* failure */
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful)
+	 */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+	msg = cdf_nbuf_alloc(
+		pdev->osdev,
+		HTT_MSG_BUF_SIZE(sizeof(struct htt_tx_frag_desc_bank_cfg_t)),
+		/* reserve room for the HTC header */
+		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_ERROR; /* failure */
+	}
+
+	/*
+	 * Set the length of the message.
+	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+	 * separately during the below call to adf_nbuf_push_head.
+	 * The contribution from the HTC header is added separately inside HTC.
+	 */
+	cdf_nbuf_put_tail(msg, sizeof(struct htt_tx_frag_desc_bank_cfg_t));
+
+	/* fill in the message contents */
+	msg_word = (u_int32_t *) cdf_nbuf_data(msg);
+
+	memset(msg_word, 0 , sizeof(struct htt_tx_frag_desc_bank_cfg_t));
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG);
+
+	bank_cfg = (struct htt_tx_frag_desc_bank_cfg_t *)msg_word;
+
+	/** @note @todo Hard coded to 0 Assuming just one pdev for now.*/
+	HTT_H2T_FRAG_DESC_BANK_PDEVID_SET(*msg_word, 0);
+	/** @note Hard coded to 1.*/
+	HTT_H2T_FRAG_DESC_BANK_NUM_BANKS_SET(*msg_word, 1);
+	HTT_H2T_FRAG_DESC_BANK_DESC_SIZE_SET(*msg_word, pdev->frag_descs.size);
+	HTT_H2T_FRAG_DESC_BANK_SWAP_SET(*msg_word, 0);
+
+	/** Bank specific data structure.*/
+#if HTT_PADDR64
+	bank_cfg->bank_base_address[0].lo = pdev->frag_descs.pool_paddr;
+	bank_cfg->bank_base_address[0].hi = 0;
+#else /* ! HTT_PADDR64 */
+	bank_cfg->bank_base_address[0] = pdev->frag_descs.pool_paddr;
+#endif /* HTT_PADDR64 */
+	/* Logical Min index */
+	HTT_H2T_FRAG_DESC_BANK_MIN_IDX_SET(bank_cfg->bank_info[0], 0);
+	/* Logical Max index */
+	HTT_H2T_FRAG_DESC_BANK_MAX_IDX_SET(bank_cfg->bank_info[0],
+					   pdev->frag_descs.pool_elems-1);
+
+	SET_HTC_PACKET_INFO_TX(
+		&pkt->htc_pkt,
+		htt_h2t_send_complete_free_netbuf,
+		cdf_nbuf_data(msg),
+		cdf_nbuf_len(msg),
+		pdev->htc_endpoint,
+		1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+	rc = htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+	return rc;
+}
+
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_ERROR; /* failure */
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful)
+	 */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+	/* reserve room for the HTC header */
+	msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     true);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_ERROR; /* failure */
+	}
+
+	/*
+	 * Set the length of the message.
+	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+	 * separately during the below call to cdf_nbuf_push_head.
+	 * The contribution from the HTC header is added separately inside HTC.
+	 */
+	cdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES);
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg), cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+	if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+		htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+	return A_OK;
+}
+
+A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+	int enable_ctrl_data, enable_mgmt_data,
+	    enable_null_data, enable_phy_data, enable_hdr,
+	    enable_ppdu_start, enable_ppdu_end;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_ERROR; /* failure */
+
+	/* show that this is not a tx frame download
+	   (not required, but helpful)
+	*/
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+	/* reserve room for the HTC header */
+	msg = cdf_nbuf_alloc(pdev->osdev,
+			     HTT_MSG_BUF_SIZE(HTT_RX_RING_CFG_BYTES(1)),
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     true);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_ERROR; /* failure */
+	}
+	/*
+	 * Set the length of the message.
+	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+	 * separately during the below call to cdf_nbuf_push_head.
+	 * The contribution from the HTC header is added separately inside HTC.
+	 */
+	cdf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1));
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_CFG);
+	HTT_RX_RING_CFG_NUM_RINGS_SET(*msg_word, 1);
+
+	msg_word++;
+	*msg_word = 0;
+#if HTT_PADDR64
+	HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_LO_SET(*msg_word,
+						    pdev->rx_ring.alloc_idx.paddr);
+	msg_word++;
+	HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_HI_SET(*msg_word, 0);
+#else /* ! HTT_PADDR64 */
+	HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_SET(*msg_word,
+						 pdev->rx_ring.alloc_idx.paddr);
+#endif /* HTT_PADDR64 */
+
+	msg_word++;
+	*msg_word = 0;
+#if HTT_PADDR64
+	HTT_RX_RING_CFG_BASE_PADDR_LO_SET(*msg_word,
+					  pdev->rx_ring.base_paddr);
+	msg_word++;
+	HTT_RX_RING_CFG_BASE_PADDR_HI_SET(*msg_word, 0);
+#else /* ! HTT_PADDR64 */
+	HTT_RX_RING_CFG_BASE_PADDR_SET(*msg_word, pdev->rx_ring.base_paddr);
+#endif /* HTT_PADDR64 */
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_LEN_SET(*msg_word, pdev->rx_ring.size);
+	HTT_RX_RING_CFG_BUF_SZ_SET(*msg_word, HTT_RX_BUF_SIZE);
+
+/* FIX THIS: if the FW creates a complete translated rx descriptor,
+ * then the MAC DMA of the HW rx descriptor should be disabled.
+ */
+	msg_word++;
+	*msg_word = 0;
+#ifndef REMOVE_PKT_LOG
+	if (ol_cfg_is_packet_log_enabled(pdev->ctrl_pdev)) {
+		enable_ctrl_data = 1;
+		enable_mgmt_data = 1;
+		enable_null_data = 1;
+		enable_phy_data = 1;
+		enable_hdr = 1;
+		enable_ppdu_start = 1;
+		enable_ppdu_end = 1;
+		/* Disable ASPM when pkt log is enabled */
+		cdf_print("Pkt log is enabled\n");
+		htt_htc_disable_aspm();
+	} else {
+		cdf_print("Pkt log is disabled\n");
+		enable_ctrl_data = 0;
+		enable_mgmt_data = 0;
+		enable_null_data = 0;
+		enable_phy_data = 0;
+		enable_hdr = 0;
+		enable_ppdu_start = 0;
+		enable_ppdu_end = 0;
+	}
+#else
+	enable_ctrl_data = 0;
+	enable_mgmt_data = 0;
+	enable_null_data = 0;
+	enable_phy_data = 0;
+	enable_hdr = 0;
+	enable_ppdu_start = 0;
+	enable_ppdu_end = 0;
+#endif
+	HTT_RX_RING_CFG_ENABLED_802_11_HDR_SET(*msg_word, enable_hdr);
+	HTT_RX_RING_CFG_ENABLED_MSDU_PAYLD_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_PPDU_START_SET(*msg_word, enable_ppdu_start);
+	HTT_RX_RING_CFG_ENABLED_PPDU_END_SET(*msg_word, enable_ppdu_end);
+	HTT_RX_RING_CFG_ENABLED_MPDU_START_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_MPDU_END_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_MSDU_START_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_MSDU_END_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_RX_ATTN_SET(*msg_word, 1);
+	/* always present? */
+	HTT_RX_RING_CFG_ENABLED_FRAG_INFO_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_UCAST_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_MCAST_SET(*msg_word, 1);
+	/* Must change to dynamic enable at run time
+	 * rather than at compile time
+	 */
+	HTT_RX_RING_CFG_ENABLED_CTRL_SET(*msg_word, enable_ctrl_data);
+	HTT_RX_RING_CFG_ENABLED_MGMT_SET(*msg_word, enable_mgmt_data);
+	HTT_RX_RING_CFG_ENABLED_NULL_SET(*msg_word, enable_null_data);
+	HTT_RX_RING_CFG_ENABLED_PHY_SET(*msg_word, enable_phy_data);
+	HTT_RX_RING_CFG_IDX_INIT_VAL_SET(*msg_word,
+					 *pdev->rx_ring.alloc_idx.vaddr);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_802_11_HDR_SET(*msg_word,
+					      RX_DESC_HDR_STATUS_OFFSET32);
+	HTT_RX_RING_CFG_OFFSET_MSDU_PAYLD_SET(*msg_word,
+					      HTT_RX_DESC_RESERVATION32);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_PPDU_START_SET(*msg_word,
+					      RX_DESC_PPDU_START_OFFSET32);
+	HTT_RX_RING_CFG_OFFSET_PPDU_END_SET(*msg_word,
+					    RX_DESC_PPDU_END_OFFSET32);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_MPDU_START_SET(*msg_word,
+					      RX_DESC_MPDU_START_OFFSET32);
+	HTT_RX_RING_CFG_OFFSET_MPDU_END_SET(*msg_word,
+					    RX_DESC_MPDU_END_OFFSET32);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_MSDU_START_SET(*msg_word,
+					      RX_DESC_MSDU_START_OFFSET32);
+	HTT_RX_RING_CFG_OFFSET_MSDU_END_SET(*msg_word,
+					    RX_DESC_MSDU_END_OFFSET32);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_RX_ATTN_SET(*msg_word,
+					   RX_DESC_ATTN_OFFSET32);
+	HTT_RX_RING_CFG_OFFSET_FRAG_INFO_SET(*msg_word,
+					     RX_DESC_FRAG_INFO_OFFSET32);
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg),
+			       cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+	if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+		htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+	return A_OK;
+}
+
+int
+htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
+		      uint32_t stats_type_upload_mask,
+		      uint32_t stats_type_reset_mask,
+		      uint8_t cfg_stat_type, uint32_t cfg_val, uint64_t cookie)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return -EINVAL;      /* failure */
+
+	if (stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
+	    stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
+		/* FIX THIS - add more details? */
+		cdf_print("%#x %#x stats not supported\n",
+			  stats_type_upload_mask, stats_type_reset_mask);
+		return -EINVAL;      /* failure */
+	}
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful)
+	 */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+
+	msg = cdf_nbuf_alloc(pdev->osdev,
+			     HTT_MSG_BUF_SIZE(HTT_H2T_STATS_REQ_MSG_SZ),
+			     /* reserve room for HTC header */
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     false);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return -EINVAL;      /* failure */
+	}
+	/* set the length of the message */
+	cdf_nbuf_put_tail(msg, HTT_H2T_STATS_REQ_MSG_SZ);
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_STATS_REQ);
+	HTT_H2T_STATS_REQ_UPLOAD_TYPES_SET(*msg_word, stats_type_upload_mask);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_H2T_STATS_REQ_RESET_TYPES_SET(*msg_word, stats_type_reset_mask);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_H2T_STATS_REQ_CFG_VAL_SET(*msg_word, cfg_val);
+	HTT_H2T_STATS_REQ_CFG_STAT_TYPE_SET(*msg_word, cfg_stat_type);
+
+	/* cookie LSBs */
+	msg_word++;
+	*msg_word = cookie & 0xffffffff;
+
+	/* cookie MSBs */
+	msg_word++;
+	*msg_word = cookie >> 32;
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg),
+			       cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+	if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+		htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+	return 0;
+}
+
+A_STATUS htt_h2t_sync_msg(struct htt_pdev_t *pdev, uint8_t sync_cnt)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_NO_MEMORY;
+
+	/* show that this is not a tx frame download
+	   (not required, but helpful)
+	*/
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+	/* reserve room for HTC header */
+	msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_SYNC_MSG_SZ),
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     false);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_NO_MEMORY;
+	}
+	/* set the length of the message */
+	cdf_nbuf_put_tail(msg, HTT_H2T_SYNC_MSG_SZ);
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SYNC);
+	HTT_H2T_SYNC_COUNT_SET(*msg_word, sync_cnt);
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg),
+			       cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+	if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+		htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+	return A_OK;
+}
+
+int
+htt_h2t_aggr_cfg_msg(struct htt_pdev_t *pdev,
+		     int max_subfrms_ampdu, int max_subfrms_amsdu)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return -EINVAL;      /* failure */
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful)
+	 */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+	/* reserve room for HTC header */
+	msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_AGGR_CFG_MSG_SZ),
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     false);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return -EINVAL;      /* failure */
+	}
+	/* set the length of the message */
+	cdf_nbuf_put_tail(msg, HTT_AGGR_CFG_MSG_SZ);
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_AGGR_CFG);
+
+	if (max_subfrms_ampdu && (max_subfrms_ampdu <= 64)) {
+		HTT_AGGR_CFG_MAX_NUM_AMPDU_SUBFRM_SET(*msg_word,
+						      max_subfrms_ampdu);
+	}
+
+	if (max_subfrms_amsdu && (max_subfrms_amsdu < 32)) {
+		HTT_AGGR_CFG_MAX_NUM_AMSDU_SUBFRM_SET(*msg_word,
+						      max_subfrms_amsdu);
+	}
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg),
+			       cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+	if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+		htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+	return 0;
+}
+
+#ifdef IPA_OFFLOAD
+int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_NO_MEMORY;
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful)
+	 */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+	/* reserve room for HTC header */
+	msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     false);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_NO_MEMORY;
+	}
+	/* set the length of the message */
+	cdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
+		pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG);
+
+	msg_word++;
+	*msg_word = 0;
+        /* TX COMP RING BASE LO */
+	HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
+	msg_word++;
+	*msg_word = 0;
+        /* TX COMP RING BASE HI, NONE */
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word,
+		(unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev));
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr);
+	msg_word++;
+	*msg_word = 0;
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
+	msg_word++;
+	*msg_word = 0;
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_HI_SET(*msg_word,
+		0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word,
+		(unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_HI_SET(*msg_word,
+		0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr);
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_HI_SET(*msg_word,
+		0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_RING2_BASE_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr);
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_RING2_BASE_ADDR_HI_SET(*msg_word,
+		0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_RING2_SIZE_SET(*msg_word,
+		(unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_RING2_RD_IDX_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_RING2_RD_IDX_ADDR_HI_SET(*msg_word,
+		0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_RING2_WR_IDX_ADDR_LO_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_rx_rsc.rx2_rdy_idx_paddr);
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_RING2_WR_IDX_ADDR_HI_SET(*msg_word,
+		0);
+
+	cdf_trace_hex_dump(CDF_MODULE_ID_HTT, CDF_TRACE_LEVEL_FATAL,
+		(void *)cdf_nbuf_data(msg), 40);
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg),
+			       cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+	return A_OK;
+}
+
+int htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev,
+			      bool uc_active, bool is_tx)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+	uint8_t active_target = 0;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_NO_MEMORY;
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful)
+	 */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+	/* reserve room for HTC header */
+	msg = cdf_nbuf_alloc(pdev->osdev,
+			     HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     false);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_NO_MEMORY;
+	}
+	/* set the length of the message */
+	cdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	if (uc_active && is_tx)
+		active_target = HTT_WDI_IPA_OPCODE_TX_RESUME;
+	else if (!uc_active && is_tx)
+		active_target = HTT_WDI_IPA_OPCODE_TX_SUSPEND;
+	else if (uc_active && !is_tx)
+		active_target = HTT_WDI_IPA_OPCODE_RX_RESUME;
+	else if (!uc_active && !is_tx)
+		active_target = HTT_WDI_IPA_OPCODE_RX_SUSPEND;
+
+	HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word, active_target);
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ);
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg),
+			       cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+	return A_OK;
+}
+
+int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_NO_MEMORY;
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful)
+	 */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+	/* reserve room for HTC header */
+	msg = cdf_nbuf_alloc(pdev->osdev,
+			     HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     false);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_NO_MEMORY;
+	}
+	/* set the length of the message */
+	cdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word,
+					   HTT_WDI_IPA_OPCODE_DBG_STATS);
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ);
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg),
+			       cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+	return A_OK;
+}
+#endif /* IPA_OFFLOAD */

+ 500 - 0
core/dp/htt/htt_internal.h

@@ -0,0 +1,500 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _HTT_INTERNAL__H_
+#define _HTT_INTERNAL__H_
+
+#include <athdefs.h>            /* A_STATUS */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t */
+#include <cdf_util.h>           /* cdf_assert */
+#include <htc_api.h>            /* HTC_PACKET */
+
+#include <htt_types.h>
+
+#ifndef offsetof
+#define offsetof(type, field)   ((size_t)(&((type *)0)->field))
+#endif
+
+#undef MS
+#define MS(_v, _f) (((_v) & _f ## _MASK) >> _f ## _LSB)
+#undef SM
+#define SM(_v, _f) (((_v) << _f ## _LSB) & _f ## _MASK)
+#undef WO
+#define WO(_f)      ((_f ## _OFFSET) >> 2)
+
+#define GET_FIELD(_addr, _f) MS(*((A_UINT32 *)(_addr) + WO(_f)), _f)
+
+#include <rx_desc.h>
+#include <wal_rx_desc.h>        /* struct rx_attention, etc */
+
+struct htt_host_fw_desc_base {
+	union {
+		struct fw_rx_desc_base val;
+		A_UINT32 dummy_pad;     /* make sure it is DOWRD aligned */
+	} u;
+};
+
+/*
+ * This struct defines the basic descriptor information used by host,
+ * which is written either by the 11ac HW MAC into the host Rx data
+ * buffer ring directly or generated by FW and copied from Rx indication
+ */
+#define RX_HTT_HDR_STATUS_LEN 64
+struct htt_host_rx_desc_base {
+	struct htt_host_fw_desc_base fw_desc;
+	struct rx_attention attention;
+	struct rx_frag_info frag_info;
+	struct rx_mpdu_start mpdu_start;
+	struct rx_msdu_start msdu_start;
+	struct rx_msdu_end msdu_end;
+	struct rx_mpdu_end mpdu_end;
+	struct rx_ppdu_start ppdu_start;
+	struct rx_ppdu_end ppdu_end;
+	char rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+};
+
+#define RX_STD_DESC_ATTN_OFFSET	\
+	(offsetof(struct htt_host_rx_desc_base, attention))
+#define RX_STD_DESC_FRAG_INFO_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, frag_info))
+#define RX_STD_DESC_MPDU_START_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, mpdu_start))
+#define RX_STD_DESC_MSDU_START_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, msdu_start))
+#define RX_STD_DESC_MSDU_END_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, msdu_end))
+#define RX_STD_DESC_MPDU_END_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, mpdu_end))
+#define RX_STD_DESC_PPDU_START_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, ppdu_start))
+#define RX_STD_DESC_PPDU_END_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, ppdu_end))
+#define RX_STD_DESC_HDR_STATUS_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, rx_hdr_status))
+
+#define RX_STD_DESC_FW_MSDU_OFFSET \
+	(offsetof(struct htt_host_rx_desc_base, fw_desc))
+
+#define RX_STD_DESC_SIZE (sizeof(struct htt_host_rx_desc_base))
+
+#define RX_DESC_ATTN_OFFSET32       (RX_STD_DESC_ATTN_OFFSET >> 2)
+#define RX_DESC_FRAG_INFO_OFFSET32  (RX_STD_DESC_FRAG_INFO_OFFSET >> 2)
+#define RX_DESC_MPDU_START_OFFSET32 (RX_STD_DESC_MPDU_START_OFFSET >> 2)
+#define RX_DESC_MSDU_START_OFFSET32 (RX_STD_DESC_MSDU_START_OFFSET >> 2)
+#define RX_DESC_MSDU_END_OFFSET32   (RX_STD_DESC_MSDU_END_OFFSET >> 2)
+#define RX_DESC_MPDU_END_OFFSET32   (RX_STD_DESC_MPDU_END_OFFSET >> 2)
+#define RX_DESC_PPDU_START_OFFSET32 (RX_STD_DESC_PPDU_START_OFFSET >> 2)
+#define RX_DESC_PPDU_END_OFFSET32   (RX_STD_DESC_PPDU_END_OFFSET >> 2)
+#define RX_DESC_HDR_STATUS_OFFSET32 (RX_STD_DESC_HDR_STATUS_OFFSET >> 2)
+
+#define RX_STD_DESC_SIZE_DWORD      (RX_STD_DESC_SIZE >> 2)
+
+/*
+ * Make sure there is a minimum headroom provided in the rx netbufs
+ * for use by the OS shim and OS and rx data consumers.
+ */
+#define HTT_RX_BUF_OS_MIN_HEADROOM 32
+#define HTT_RX_STD_DESC_RESERVATION  \
+	((HTT_RX_BUF_OS_MIN_HEADROOM > RX_STD_DESC_SIZE) ? \
+	 HTT_RX_BUF_OS_MIN_HEADROOM : RX_STD_DESC_SIZE)
+#define HTT_RX_DESC_RESERVATION32 \
+	(HTT_RX_STD_DESC_RESERVATION >> 2)
+
+#define HTT_RX_DESC_ALIGN_MASK 7        /* 8-byte alignment */
+static inline struct htt_host_rx_desc_base *htt_rx_desc(cdf_nbuf_t msdu)
+{
+	return (struct htt_host_rx_desc_base *)
+	       (((size_t) (cdf_nbuf_head(msdu) + HTT_RX_DESC_ALIGN_MASK)) &
+		~HTT_RX_DESC_ALIGN_MASK);
+}
+
+#if defined(FEATURE_LRO)
+/**
+ * htt_print_rx_desc_lro() - print LRO information in the rx
+ * descriptor
+ * @rx_desc: HTT rx descriptor
+ *
+ * Prints the LRO related fields in the HTT rx descriptor
+ *
+ * Return: none
+ */
+static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
+{
+	cdf_print
+		("----------------------RX DESC LRO----------------------\n");
+	cdf_print("msdu_end.lro_eligible:0x%x\n",
+		 rx_desc->msdu_end.lro_eligible);
+	cdf_print("msdu_start.tcp_only_ack:0x%x\n",
+		 rx_desc->msdu_start.tcp_only_ack);
+	cdf_print("msdu_end.tcp_udp_chksum:0x%x\n",
+		 rx_desc->msdu_end.tcp_udp_chksum);
+	cdf_print("msdu_end.tcp_seq_number:0x%x\n",
+		 rx_desc->msdu_end.tcp_seq_number);
+	cdf_print("msdu_end.tcp_ack_number:0x%x\n",
+		 rx_desc->msdu_end.tcp_ack_number);
+	cdf_print("msdu_start.tcp_proto:0x%x\n",
+		 rx_desc->msdu_start.tcp_proto);
+	cdf_print("msdu_start.ipv6_proto:0x%x\n",
+		 rx_desc->msdu_start.ipv6_proto);
+	cdf_print("msdu_start.ipv4_proto:0x%x\n",
+		 rx_desc->msdu_start.ipv4_proto);
+	cdf_print("msdu_start.l3_offset:0x%x\n",
+		 rx_desc->msdu_start.l3_offset);
+	cdf_print("msdu_start.l4_offset:0x%x\n",
+		 rx_desc->msdu_start.l4_offset);
+	cdf_print("msdu_start.flow_id_toeplitz:0x%x\n",
+			   rx_desc->msdu_start.flow_id_toeplitz);
+	cdf_print
+		("---------------------------------------------------------\n");
+}
+
+/**
+ * htt_print_rx_desc_lro() - extract LRO information from the rx
+ * descriptor
+ * @msdu: network buffer
+ * @rx_desc: HTT rx descriptor
+ *
+ * Extracts the LRO related fields from the HTT rx descriptor
+ * and stores them in the network buffer's control block
+ *
+ * Return: none
+ */
+static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
+	 struct htt_host_rx_desc_base *rx_desc)
+{
+	NBUF_LRO_ELIGIBLE(msdu) = rx_desc->msdu_end.lro_eligible;
+	if (rx_desc->msdu_end.lro_eligible) {
+		NBUF_TCP_PURE_ACK(msdu) = rx_desc->msdu_start.tcp_only_ack;
+		NBUF_TCP_CHKSUM(msdu) = rx_desc->msdu_end.tcp_udp_chksum;
+		NBUF_TCP_SEQ_NUM(msdu) = rx_desc->msdu_end.tcp_seq_number;
+		NBUF_TCP_ACK_NUM(msdu) = rx_desc->msdu_end.tcp_ack_number;
+		NBUF_TCP_WIN(msdu) = rx_desc->msdu_end.window_size;
+		NBUF_TCP_PROTO(msdu) = rx_desc->msdu_start.tcp_proto;
+		NBUF_IPV6_PROTO(msdu) = rx_desc->msdu_start.ipv6_proto;
+		NBUF_IP_OFFSET(msdu) = rx_desc->msdu_start.l3_offset;
+		NBUF_TCP_OFFSET(msdu) = rx_desc->msdu_start.l4_offset;
+		NBUF_FLOW_ID_TOEPLITZ(msdu) =
+			 rx_desc->msdu_start.flow_id_toeplitz;
+	}
+}
+#else
+static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
+{}
+static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
+	 struct htt_host_rx_desc_base *rx_desc) {}
+#endif /* FEATURE_LRO */
+
+static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
+{
+	cdf_print
+		("----------------------RX DESC----------------------------\n");
+	cdf_print("attention: %#010x\n",
+		  (unsigned int)(*(uint32_t *) &rx_desc->attention));
+	cdf_print("frag_info: %#010x\n",
+		  (unsigned int)(*(uint32_t *) &rx_desc->frag_info));
+	cdf_print("mpdu_start: %#010x %#010x %#010x\n",
+		  (unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[0]),
+		  (unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[1]),
+		  (unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[2]));
+	cdf_print("msdu_start: %#010x %#010x %#010x\n",
+		  (unsigned int)(((uint32_t *) &rx_desc->msdu_start)[0]),
+		  (unsigned int)(((uint32_t *) &rx_desc->msdu_start)[1]),
+		  (unsigned int)(((uint32_t *) &rx_desc->msdu_start)[2]));
+	cdf_print("msdu_end: %#010x %#010x %#010x %#010x %#010x\n",
+		  (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[0]),
+		  (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[1]),
+		  (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[2]),
+		  (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[3]),
+		  (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[4]));
+	cdf_print("mpdu_end: %#010x\n",
+		  (unsigned int)(*(uint32_t *) &rx_desc->mpdu_end));
+	cdf_print("ppdu_start: " "%#010x %#010x %#010x %#010x %#010x\n"
+		  "%#010x %#010x %#010x %#010x %#010x\n",
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[0]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[1]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[2]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[3]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[4]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[5]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[6]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[7]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[8]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[9]));
+	cdf_print("ppdu_end:" "%#010x %#010x %#010x %#010x %#010x\n"
+		  "%#010x %#010x %#010x %#010x %#010x\n"
+		  "%#010x,%#010x %#010x %#010x %#010x\n"
+		  "%#010x %#010x %#010x %#010x %#010x\n" "%#010x %#010x\n",
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[0]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[1]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[2]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[3]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[4]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[5]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[6]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[7]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[8]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[9]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[10]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[11]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[12]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[13]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[14]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[15]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[16]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[17]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[18]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[19]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[20]),
+		  (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[21]));
+	cdf_print
+		("---------------------------------------------------------\n");
+}
+
+#ifndef HTT_ASSERT_LEVEL
+#define HTT_ASSERT_LEVEL 3
+#endif
+
+#define HTT_ASSERT_ALWAYS(condition) cdf_assert_always((condition))
+
+#define HTT_ASSERT0(condition) cdf_assert((condition))
+#if HTT_ASSERT_LEVEL > 0
+#define HTT_ASSERT1(condition) cdf_assert((condition))
+#else
+#define HTT_ASSERT1(condition)
+#endif
+
+#if HTT_ASSERT_LEVEL > 1
+#define HTT_ASSERT2(condition) cdf_assert((condition))
+#else
+#define HTT_ASSERT2(condition)
+#endif
+
+#if HTT_ASSERT_LEVEL > 2
+#define HTT_ASSERT3(condition) cdf_assert((condition))
+#else
+#define HTT_ASSERT3(condition)
+#endif
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * HTT_MAX_SEND_QUEUE_DEPTH -
+ * How many packets HTC should allow to accumulate in a send queue
+ * before calling the EpSendFull callback to see whether to retain
+ * or drop packets.
+ * This is not relevant for LL, where tx descriptors should be immediately
+ * downloaded to the target.
+ * This is not very relevant for HL either, since it is anticipated that
+ * the HL tx download scheduler will not work this far in advance - rather,
+ * it will make its decisions just-in-time, so it can be responsive to
+ * changing conditions.
+ * Hence, this queue depth threshold spec is mostly just a formality.
+ */
+#define HTT_MAX_SEND_QUEUE_DEPTH 64
+
+#define IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1)
+
+/* FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 1920
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7  /* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+#ifdef BIG_ENDIAN_HOST
+/*
+ * big-endian: bytes within a 4-byte "word" are swapped:
+ * pre-swap  post-swap
+ *  index     index
+ *    0         3
+ *    1         2
+ *    2         1
+ *    3         0
+ *    4         7
+ *    5         6
+ * etc.
+ * To compute the post-swap index from the pre-swap index, compute
+ * the byte offset for the start of the word (index & ~0x3) and add
+ * the swapped byte offset within the word (3 - (index & 0x3)).
+ */
+#define HTT_ENDIAN_BYTE_IDX_SWAP(idx) (((idx) & ~0x3) + (3 - ((idx) & 0x3)))
+#else
+/* little-endian: no adjustment needed */
+#define HTT_ENDIAN_BYTE_IDX_SWAP(idx) idx
+#endif
+
+#define HTT_TX_MUTEX_INIT(_mutex)			\
+	cdf_spinlock_init(_mutex)
+
+#define HTT_TX_MUTEX_ACQUIRE(_mutex)			\
+	cdf_spin_lock_bh(_mutex)
+
+#define HTT_TX_MUTEX_RELEASE(_mutex)			\
+	cdf_spin_unlock_bh(_mutex)
+
+#define HTT_TX_MUTEX_DESTROY(_mutex)			\
+	cdf_spinlock_destroy(_mutex)
+
+#define HTT_TX_DESC_PADDR(_pdev, _tx_desc_vaddr)       \
+	((_pdev)->tx_descs.pool_paddr +  (uint32_t)	  \
+	 ((char *)(_tx_desc_vaddr) -			   \
+	  (char *)((_pdev)->tx_descs.pool_vaddr)))
+
+#ifdef ATH_11AC_TXCOMPACT
+
+#define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev)		\
+	cdf_spinlock_init(&_pdev->txnbufq_mutex)
+
+#define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev)	       \
+	HTT_TX_MUTEX_DESTROY(&_pdev->txnbufq_mutex)
+
+#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu)	do {	\
+	HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex);	\
+	_msdu =  cdf_nbuf_queue_remove(&_pdev->txnbufq);\
+	HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex);    \
+	} while (0)
+
+#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu) do {	\
+	HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex);	\
+	cdf_nbuf_queue_add(&_pdev->txnbufq, _msdu);     \
+	HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex);    \
+	} while (0)
+
+#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu) do {   \
+	HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex);	   \
+	cdf_nbuf_queue_insert_head(&_pdev->txnbufq, _msdu);\
+	HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex);       \
+	} while (0)
+#else
+
+#define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev)
+#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu)
+#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu)
+#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu)
+#define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev)
+
+#endif
+
+#ifdef ATH_11AC_TXCOMPACT
+#define HTT_TX_SCHED htt_tx_sched
+#else
+#define HTT_TX_SCHED(pdev)      /* no-op */
+#endif
+
+int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems);
+
+void htt_tx_detach(struct htt_pdev_t *pdev);
+
+int htt_rx_attach(struct htt_pdev_t *pdev);
+
+void htt_rx_detach(struct htt_pdev_t *pdev);
+
+int htt_htc_attach(struct htt_pdev_t *pdev);
+
+void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt);
+
+void htt_h2t_send_complete(void *context, HTC_PACKET *pkt);
+
+A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev);
+
+#if defined(HELIUMPLUS_PADDR64)
+A_STATUS
+htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+extern A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev);
+extern A_STATUS (*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
+
+HTC_SEND_FULL_ACTION htt_h2t_full(void *context, HTC_PACKET *pkt);
+
+struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev);
+
+void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
+
+void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev);
+
+#ifdef ATH_11AC_TXCOMPACT
+void
+htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
+
+void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev);
+#endif
+
+void htt_htc_disable_aspm(void);
+
+int
+htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
+			cdf_nbuf_t netbuf);
+
+cdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr);
+
+#ifdef IPA_OFFLOAD
+int
+htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+		     unsigned int uc_tx_buf_sz,
+		     unsigned int uc_tx_buf_cnt,
+		     unsigned int uc_tx_partition_base);
+
+int
+htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size);
+
+int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev);
+
+int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
+#else
+static inline int
+htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+		     unsigned int uc_tx_buf_sz,
+		     unsigned int uc_tx_buf_cnt,
+		     unsigned int uc_tx_partition_base)
+{
+	return 0;
+}
+
+static inline int
+htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size)
+{
+	return 0;
+}
+
+static inline int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+	return 0;
+}
+
+static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+	return 0;
+}
+#endif /* IPA_OFFLOAD */
+#endif /* _HTT_INTERNAL__H_ */

+ 2444 - 0
core/dp/htt/htt_rx.c

@@ -0,0 +1,2444 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_rx.c
+ * @brief Implement receive aspects of HTT.
+ * @details
+ *  This file contains three categories of HTT rx code:
+ *  1.  An abstraction of the rx descriptor, to hide the
+ *      differences between the HL vs. LL rx descriptor.
+ *  2.  Functions for providing access to the (series of)
+ *      rx descriptor(s) and rx frame(s) associated with
+ *      an rx indication message.
+ *  3.  Functions for setting up and using the MAC DMA
+ *      rx ring (applies to LL only).
+ */
+
+#include <cdf_memory.h>         /* cdf_mem_malloc,free, etc. */
+#include <cdf_types.h>          /* cdf_print, bool */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t, etc. */
+#include <cdf_softirq_timer.h>  /* cdf_softirq_timer_free */
+
+#include <htt.h>                /* HTT_HL_RX_DESC_SIZE */
+#include <ol_cfg.h>
+#include <ol_rx.h>
+#include <ol_htt_rx_api.h>
+#include <htt_internal.h>       /* HTT_ASSERT, htt_pdev_t, HTT_RX_BUF_SIZE */
+#include "regtable.h"
+
+#include <cds_ieee80211_common.h>   /* ieee80211_frame, ieee80211_qoscntl */
+#include <cds_ieee80211_defines.h>  /* ieee80211_rx_status */
+
+#ifdef DEBUG_DMA_DONE
+#include <asm/barrier.h>
+#include <wma_api.h>
+#endif
+
+/* AR9888v1 WORKAROUND for EV#112367 */
+/* FIX THIS - remove this WAR when the bug is fixed */
+#define PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR
+
+/*--- setup / tear-down functions -------------------------------------------*/
+
+#ifndef HTT_RX_RING_SIZE_MIN
+#define HTT_RX_RING_SIZE_MIN 128        /* slightly > than one large A-MPDU */
+#endif
+
+#ifndef HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_SIZE_MAX 2048       /* ~20 ms @ 1 Gbps of 1500B MSDUs */
+#endif
+
+#ifndef HTT_RX_AVG_FRM_BYTES
+#define HTT_RX_AVG_FRM_BYTES 1000
+#endif
+
+#ifndef HTT_RX_HOST_LATENCY_MAX_MS
+#define HTT_RX_HOST_LATENCY_MAX_MS 20 /* ms */	/* very conservative */
+#endif
+
+#ifndef HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
+#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 /* ms */	/* conservative */
+#endif
+
+#ifndef HTT_RX_RING_REFILL_RETRY_TIME_MS
+#define HTT_RX_RING_REFILL_RETRY_TIME_MS    50
+#endif
+
+/*--- RX In Order Definitions ------------------------------------------------*/
+
+/* Number of buckets in the hash table */
+#define RX_NUM_HASH_BUCKETS 1024        /* This should always be a power of 2 */
+#define RX_NUM_HASH_BUCKETS_MASK (RX_NUM_HASH_BUCKETS - 1)
+
+/* Number of hash entries allocated per bucket */
+#define RX_ENTRIES_SIZE 10
+
+#define RX_HASH_FUNCTION(a) (((a >> 14) ^ (a >> 4)) & RX_NUM_HASH_BUCKETS_MASK)
+
+#ifdef RX_HASH_DEBUG_LOG
+#define RX_HASH_LOG(x) x
+#else
+#define RX_HASH_LOG(x)          /* no-op */
+#endif
+
+/* De -initialization function of the rx buffer hash table. This function will
+   free up the hash table which includes freeing all the pending rx buffers*/
+void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
+{
+
+	uint32_t i;
+	struct htt_rx_hash_entry *hash_entry;
+	struct htt_list_node *list_iter = NULL;
+
+	if (NULL == pdev->rx_ring.hash_table)
+		return;
+	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
+		/* Free the hash entries in hash bucket i */
+		list_iter = pdev->rx_ring.hash_table[i].listhead.next;
+		while (list_iter != &pdev->rx_ring.hash_table[i].listhead) {
+			hash_entry =
+				(struct htt_rx_hash_entry *)((char *)list_iter -
+							     pdev->rx_ring.
+							     listnode_offset);
+			if (hash_entry->netbuf) {
+				cdf_nbuf_free(hash_entry->netbuf);
+				hash_entry->paddr = 0;
+			}
+			list_iter = list_iter->next;
+
+			if (!hash_entry->fromlist)
+				cdf_mem_free(hash_entry);
+		}
+
+		cdf_mem_free(pdev->rx_ring.hash_table[i].entries);
+
+	}
+	cdf_mem_free(pdev->rx_ring.hash_table);
+	pdev->rx_ring.hash_table = NULL;
+}
+
+static int ceil_pwr2(int value)
+{
+	int log2;
+	if (IS_PWR2(value))
+		return value;
+
+	log2 = 0;
+	while (value) {
+		value >>= 1;
+		log2++;
+	}
+	return 1 << log2;
+}
+
+static bool
+htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)msdu_desc;
+	return (bool)
+		(((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
+		  RX_MSDU_END_4_FIRST_MSDU_MASK) >>
+		 RX_MSDU_END_4_FIRST_MSDU_LSB);
+}
+
+static int htt_rx_ring_size(struct htt_pdev_t *pdev)
+{
+	int size;
+
+	/*
+	 * It is expected that the host CPU will typically be able to service
+	 * the rx indication from one A-MPDU before the rx indication from
+	 * the subsequent A-MPDU happens, roughly 1-2 ms later.
+	 * However, the rx ring should be sized very conservatively, to
+	 * accomodate the worst reasonable delay before the host CPU services
+	 * a rx indication interrupt.
+	 * The rx ring need not be kept full of empty buffers.  In theory,
+	 * the htt host SW can dynamically track the low-water mark in the
+	 * rx ring, and dynamically adjust the level to which the rx ring
+	 * is filled with empty buffers, to dynamically meet the desired
+	 * low-water mark.
+	 * In contrast, it's difficult to resize the rx ring itself, once
+	 * it's in use.
+	 * Thus, the ring itself should be sized very conservatively, while
+	 * the degree to which the ring is filled with empty buffers should
+	 * be sized moderately conservatively.
+	 */
+	size =
+		ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
+		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */  /
+		(8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
+
+	if (size < HTT_RX_RING_SIZE_MIN)
+		size = HTT_RX_RING_SIZE_MIN;
+	else if (size > HTT_RX_RING_SIZE_MAX)
+		size = HTT_RX_RING_SIZE_MAX;
+
+	size = ceil_pwr2(size);
+	return size;
+}
+
+static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
+{
+	int size;
+
+	size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
+		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */  /
+		8 * HTT_RX_AVG_FRM_BYTES * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
+	/*
+	 * Make sure the fill level is at least 1 less than the ring size.
+	 * Leaving 1 element empty allows the SW to easily distinguish
+	 * between a full ring vs. an empty ring.
+	 */
+	if (size >= pdev->rx_ring.size)
+		size = pdev->rx_ring.size - 1;
+
+	return size;
+}
+
+static void htt_rx_ring_refill_retry(void *arg)
+{
+	htt_pdev_handle pdev = (htt_pdev_handle) arg;
+	htt_rx_msdu_buff_replenish(pdev);
+}
+
+void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
+{
+	int idx;
+	CDF_STATUS status;
+	struct htt_host_rx_desc_base *rx_desc;
+
+	idx = *(pdev->rx_ring.alloc_idx.vaddr);
+	while (num > 0) {
+		uint32_t paddr;
+		cdf_nbuf_t rx_netbuf;
+		int headroom;
+
+		rx_netbuf =
+			cdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
+				       0, 4, false);
+		if (!rx_netbuf) {
+			cdf_softirq_timer_cancel(&pdev->rx_ring.
+						 refill_retry_timer);
+			/*
+			 * Failed to fill it to the desired level -
+			 * we'll start a timer and try again next time.
+			 * As long as enough buffers are left in the ring for
+			 * another A-MPDU rx, no special recovery is needed.
+			 */
+#ifdef DEBUG_DMA_DONE
+			pdev->rx_ring.dbg_refill_cnt++;
+#endif
+			cdf_softirq_timer_start(
+				&pdev->rx_ring.refill_retry_timer,
+				HTT_RX_RING_REFILL_RETRY_TIME_MS);
+			goto fail;
+		}
+
+		/* Clear rx_desc attention word before posting to Rx ring */
+		rx_desc = htt_rx_desc(rx_netbuf);
+		*(uint32_t *) &rx_desc->attention = 0;
+
+#ifdef DEBUG_DMA_DONE
+		*(uint32_t *) &rx_desc->msdu_end = 1;
+
+#define MAGIC_PATTERN 0xDEADBEEF
+		*(uint32_t *) &rx_desc->msdu_start = MAGIC_PATTERN;
+
+		/* To ensure that attention bit is reset and msdu_end is set
+		   before calling dma_map */
+		smp_mb();
+#endif
+		/*
+		 * Adjust cdf_nbuf_data to point to the location in the buffer
+		 * where the rx descriptor will be filled in.
+		 */
+		headroom = cdf_nbuf_data(rx_netbuf) - (uint8_t *) rx_desc;
+		cdf_nbuf_push_head(rx_netbuf, headroom);
+
+#ifdef DEBUG_DMA_DONE
+		status =
+			cdf_nbuf_map(pdev->osdev, rx_netbuf,
+						CDF_DMA_BIDIRECTIONAL);
+#else
+		status =
+			cdf_nbuf_map(pdev->osdev, rx_netbuf,
+						CDF_DMA_FROM_DEVICE);
+#endif
+		if (status != CDF_STATUS_SUCCESS) {
+			cdf_nbuf_free(rx_netbuf);
+			goto fail;
+		}
+		paddr = cdf_nbuf_get_frag_paddr_lo(rx_netbuf, 0);
+		if (pdev->cfg.is_full_reorder_offload) {
+			if (cdf_unlikely
+				    (htt_rx_hash_list_insert(pdev, paddr,
+							     rx_netbuf))) {
+				cdf_print("%s: hash insert failed!\n",
+					  __func__);
+#ifdef DEBUG_DMA_DONE
+				cdf_nbuf_unmap(pdev->osdev, rx_netbuf,
+					       CDF_DMA_BIDIRECTIONAL);
+#else
+				cdf_nbuf_unmap(pdev->osdev, rx_netbuf,
+					       CDF_DMA_FROM_DEVICE);
+#endif
+				cdf_nbuf_free(rx_netbuf);
+				goto fail;
+			}
+		} else {
+			pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
+		}
+#if HTT_PADDR64
+		pdev->rx_ring.buf.paddrs_ring[idx] = 0;
+		pdev->rx_ring.buf.paddrs_ring[idx] = (uint32_t)paddr;
+#else
+		pdev->rx_ring.buf.paddrs_ring[idx] = paddr;
+#endif /* HTT_PADDR64 */
+		pdev->rx_ring.fill_cnt++;
+
+		num--;
+		idx++;
+		idx &= pdev->rx_ring.size_mask;
+	}
+
+fail:
+	*(pdev->rx_ring.alloc_idx.vaddr) = idx;
+	return;
+}
+
+unsigned htt_rx_ring_elems(struct htt_pdev_t *pdev)
+{
+	return
+		(*pdev->rx_ring.alloc_idx.vaddr -
+		 pdev->rx_ring.sw_rd_idx.msdu_payld) & pdev->rx_ring.size_mask;
+}
+
+unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
+{
+	return
+		(*pdev->rx_ring.alloc_idx.vaddr -
+		 *pdev->rx_ring.target_idx.vaddr) &
+		pdev->rx_ring.size_mask;
+}
+
+void htt_rx_detach(struct htt_pdev_t *pdev)
+{
+	cdf_softirq_timer_cancel(&pdev->rx_ring.refill_retry_timer);
+	cdf_softirq_timer_free(&pdev->rx_ring.refill_retry_timer);
+
+	if (pdev->cfg.is_full_reorder_offload) {
+		cdf_os_mem_free_consistent(pdev->osdev,
+					   sizeof(uint32_t),
+					   pdev->rx_ring.target_idx.vaddr,
+					   pdev->rx_ring.target_idx.paddr,
+					   cdf_get_dma_mem_context((&pdev->
+								    rx_ring.
+								    target_idx),
+								   memctx));
+		htt_rx_hash_deinit(pdev);
+	} else {
+		int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
+
+		while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
+#ifdef DEBUG_DMA_DONE
+			cdf_nbuf_unmap(pdev->osdev,
+				       pdev->rx_ring.buf.
+				       netbufs_ring[sw_rd_idx],
+				       CDF_DMA_BIDIRECTIONAL);
+#else
+			cdf_nbuf_unmap(pdev->osdev,
+				       pdev->rx_ring.buf.
+				       netbufs_ring[sw_rd_idx],
+				       CDF_DMA_FROM_DEVICE);
+#endif
+			cdf_nbuf_free(pdev->rx_ring.buf.
+				      netbufs_ring[sw_rd_idx]);
+			sw_rd_idx++;
+			sw_rd_idx &= pdev->rx_ring.size_mask;
+		}
+		cdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
+	}
+
+	cdf_os_mem_free_consistent(pdev->osdev,
+				   sizeof(uint32_t),
+				   pdev->rx_ring.alloc_idx.vaddr,
+				   pdev->rx_ring.alloc_idx.paddr,
+				   cdf_get_dma_mem_context((&pdev->rx_ring.
+							    alloc_idx),
+							   memctx));
+
+	cdf_os_mem_free_consistent(pdev->osdev,
+				   pdev->rx_ring.size * sizeof(uint32_t),
+				   pdev->rx_ring.buf.paddrs_ring,
+				   pdev->rx_ring.base_paddr,
+				   cdf_get_dma_mem_context((&pdev->rx_ring.buf),
+							   memctx));
+}
+
+/*--- rx descriptor field access functions ----------------------------------*/
+/*
+ * These functions need to use bit masks and shifts to extract fields
+ * from the rx descriptors, rather than directly using the bitfields.
+ * For example, use
+ *     (desc & FIELD_MASK) >> FIELD_LSB
+ * rather than
+ *     desc.field
+ * This allows the functions to work correctly on either little-endian
+ * machines (no endianness conversion needed) or big-endian machines
+ * (endianness conversion provided automatically by the HW DMA's
+ * byte-swizzling).
+ */
+/* FIX THIS: APPLIES TO LL ONLY */
+
+/**
+ * htt_rx_mpdu_desc_retry_ll() - Returns the retry bit from the Rx descriptor
+ *                               for the Low Latency driver
+ * @pdev:                          Handle (pointer) to HTT pdev.
+ * @mpdu_desc:                     Void pointer to the Rx descriptor for MPDU
+ *                                 before the beginning of the payload.
+ *
+ *  This function returns the retry bit of the 802.11 header for the
+ *  provided rx MPDU descriptor.
+ *
+ * Return:        boolean -- true if retry is set, false otherwise
+ */
+bool
+htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *) mpdu_desc;
+
+	return
+		(bool)(((*((uint32_t *) &rx_desc->mpdu_start)) &
+		RX_MPDU_START_0_RETRY_MASK) >>
+		RX_MPDU_START_0_RETRY_LSB);
+}
+
+uint16_t htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)mpdu_desc;
+
+	return
+		(uint16_t) (((*((uint32_t *) &rx_desc->mpdu_start)) &
+			     RX_MPDU_START_0_SEQ_NUM_MASK) >>
+			    RX_MPDU_START_0_SEQ_NUM_LSB);
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+void
+htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,
+		       void *mpdu_desc, union htt_rx_pn_t *pn, int pn_len_bits)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)mpdu_desc;
+
+	switch (pn_len_bits) {
+	case 24:
+		/* bits 23:0 */
+		pn->pn24 = rx_desc->mpdu_start.pn_31_0 & 0xffffff;
+		break;
+	case 48:
+		/* bits 31:0 */
+		pn->pn48 = rx_desc->mpdu_start.pn_31_0;
+		/* bits 47:32 */
+		pn->pn48 |= ((uint64_t)
+			     ((*(((uint32_t *) &rx_desc->mpdu_start) + 2))
+			      & RX_MPDU_START_2_PN_47_32_MASK))
+			<< (32 - RX_MPDU_START_2_PN_47_32_LSB);
+		break;
+	case 128:
+		/* bits 31:0 */
+		pn->pn128[0] = rx_desc->mpdu_start.pn_31_0;
+		/* bits 47:32 */
+		pn->pn128[0] |=
+			((uint64_t) ((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
+				     & RX_MPDU_START_2_PN_47_32_MASK))
+			<< (32 - RX_MPDU_START_2_PN_47_32_LSB);
+		/* bits 63:48 */
+		pn->pn128[0] |=
+			((uint64_t) ((*(((uint32_t *) &rx_desc->msdu_end) + 2))
+				     & RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK))
+			<< (48 - RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB);
+		/* bits 95:64 */
+		pn->pn128[1] = rx_desc->msdu_end.ext_wapi_pn_95_64;
+		/* bits 127:96 */
+		pn->pn128[1] |=
+			((uint64_t) rx_desc->msdu_end.ext_wapi_pn_127_96) << 32;
+		break;
+	default:
+		cdf_print("Error: invalid length spec (%d bits) for PN\n",
+			  pn_len_bits);
+	};
+}
+
+/**
+ * htt_rx_mpdu_desc_tid_ll() - Returns the TID value from the Rx descriptor
+ *                             for Low Latency driver
+ * @pdev:                        Handle (pointer) to HTT pdev.
+ * @mpdu_desc:                   Void pointer to the Rx descriptor for the MPDU
+ *                               before the beginning of the payload.
+ *
+ * This function returns the TID set in the 802.11 QoS Control for the MPDU
+ * in the packet header, by looking at the mpdu_start of the Rx descriptor.
+ * Rx descriptor gets a copy of the TID from the MAC.
+ *
+ * Return:        Actual TID set in the packet header.
+ */
+uint8_t
+htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *) mpdu_desc;
+
+	return
+		(uint8_t)(((*(((uint32_t *) &rx_desc->mpdu_start) + 2)) &
+		RX_MPDU_START_2_TID_MASK) >>
+		RX_MPDU_START_2_TID_LSB);
+}
+
+uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc)
+{
+/* FIX THIS */
+	return 0;
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)mpdu_desc;
+	return rx_desc->rx_hdr_status;
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+bool htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)msdu_desc;
+	return (bool)
+		(((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
+		  RX_MSDU_END_4_LAST_MSDU_MASK) >> RX_MSDU_END_4_LAST_MSDU_LSB);
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+int htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)msdu_desc;
+	/* HW rx desc: the mcast_bcast flag is only valid
+	   if first_msdu is set */
+	return
+		((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
+		 RX_MSDU_END_4_FIRST_MSDU_MASK) >> RX_MSDU_END_4_FIRST_MSDU_LSB;
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+bool htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)msdu_desc;
+	return
+		((*((uint32_t *) &rx_desc->attention)) &
+		 RX_ATTENTION_0_MCAST_BCAST_MASK)
+		>> RX_ATTENTION_0_MCAST_BCAST_LSB;
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+int htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)msdu_desc;
+	return
+		((*((uint32_t *) &rx_desc->attention)) &
+		 RX_ATTENTION_0_FRAGMENT_MASK) >> RX_ATTENTION_0_FRAGMENT_LSB;
+}
+
+static inline
+uint8_t htt_rx_msdu_fw_desc_get(htt_pdev_handle pdev, void *msdu_desc)
+{
+	/*
+	 * HL and LL use the same format for FW rx desc, but have the FW rx desc
+	 * in different locations.
+	 * In LL, the FW rx descriptor has been copied into the same
+	 * htt_host_rx_desc_base struct that holds the HW rx desc.
+	 * In HL, the FW rx descriptor, along with the MSDU payload,
+	 * is in the same buffer as the rx indication message.
+	 *
+	 * Use the FW rx desc offset configured during startup to account for
+	 * this difference between HL vs. LL.
+	 *
+	 * An optimization would be to define the LL and HL msdu_desc pointer
+	 * in such a way that they both use the same offset to the FW rx desc.
+	 * Then the following functions could be converted to macros, without
+	 * needing to expose the htt_pdev_t definition outside HTT.
+	 */
+	return *(((uint8_t *) msdu_desc) + pdev->rx_fw_desc_offset);
+}
+
+int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc)
+{
+	return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_DISCARD_M;
+}
+
+int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc)
+{
+	return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_FORWARD_M;
+}
+
+int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc)
+{
+	return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_INSPECT_M;
+}
+
+void
+htt_rx_msdu_actions(htt_pdev_handle pdev,
+		    void *msdu_desc, int *discard, int *forward, int *inspect)
+{
+	uint8_t rx_msdu_fw_desc = htt_rx_msdu_fw_desc_get(pdev, msdu_desc);
+#ifdef HTT_DEBUG_DATA
+	HTT_PRINT("act:0x%x ", rx_msdu_fw_desc);
+#endif
+	*discard = rx_msdu_fw_desc & FW_RX_DESC_DISCARD_M;
+	*forward = rx_msdu_fw_desc & FW_RX_DESC_FORWARD_M;
+	*inspect = rx_msdu_fw_desc & FW_RX_DESC_INSPECT_M;
+}
+
+static inline cdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
+{
+	int idx;
+	cdf_nbuf_t msdu;
+
+	HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
+
+#ifdef DEBUG_DMA_DONE
+	pdev->rx_ring.dbg_ring_idx++;
+	pdev->rx_ring.dbg_ring_idx &= pdev->rx_ring.size_mask;
+#endif
+
+	idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
+	msdu = pdev->rx_ring.buf.netbufs_ring[idx];
+	idx++;
+	idx &= pdev->rx_ring.size_mask;
+	pdev->rx_ring.sw_rd_idx.msdu_payld = idx;
+	pdev->rx_ring.fill_cnt--;
+	return msdu;
+}
+
+static inline cdf_nbuf_t
+htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, uint32_t paddr)
+{
+	HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
+	pdev->rx_ring.fill_cnt--;
+	return htt_rx_hash_list_lookup(pdev, paddr);
+}
+
+/* FIX ME: this function applies only to LL rx descs.
+   An equivalent for HL rx descs is needed. */
+#ifdef CHECKSUM_OFFLOAD
+static inline
+void
+htt_set_checksum_result_ll(htt_pdev_handle pdev, cdf_nbuf_t msdu,
+			   struct htt_host_rx_desc_base *rx_desc)
+{
+#define MAX_IP_VER          2
+#define MAX_PROTO_VAL       4
+	struct rx_msdu_start *rx_msdu = &rx_desc->msdu_start;
+	unsigned int proto = (rx_msdu->tcp_proto) | (rx_msdu->udp_proto << 1);
+
+	/*
+	 * HW supports TCP & UDP checksum offload for ipv4 and ipv6
+	 */
+	static const cdf_nbuf_l4_rx_cksum_type_t
+		cksum_table[][MAX_PROTO_VAL][MAX_IP_VER] = {
+		{
+			/* non-fragmented IP packet */
+			/* non TCP/UDP packet */
+			{CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+			/* TCP packet */
+			{CDF_NBUF_RX_CKSUM_TCP, CDF_NBUF_RX_CKSUM_TCPIPV6},
+			/* UDP packet */
+			{CDF_NBUF_RX_CKSUM_UDP, CDF_NBUF_RX_CKSUM_UDPIPV6},
+			/* invalid packet type */
+			{CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+		},
+		{
+			/* fragmented IP packet */
+			{CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+			{CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+			{CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+			{CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+		}
+	};
+
+	cdf_nbuf_rx_cksum_t cksum = {
+		cksum_table[rx_msdu->ip_frag][proto][rx_msdu->ipv6_proto],
+		CDF_NBUF_RX_CKSUM_NONE,
+		0
+	};
+
+	if (cksum.l4_type !=
+	    (cdf_nbuf_l4_rx_cksum_type_t) CDF_NBUF_RX_CKSUM_NONE) {
+		cksum.l4_result =
+			((*(uint32_t *) &rx_desc->attention) &
+			 RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) ?
+			CDF_NBUF_RX_CKSUM_NONE :
+			CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
+	}
+	cdf_nbuf_set_rx_cksum(msdu, &cksum);
+#undef MAX_IP_VER
+#undef MAX_PROTO_VAL
+}
+#else
+#define htt_set_checksum_result_ll(pdev, msdu, rx_desc) /* no-op */
+#endif
+
+#ifdef DEBUG_DMA_DONE
+void htt_rx_print_rx_indication(cdf_nbuf_t rx_ind_msg, htt_pdev_handle pdev)
+{
+	uint32_t *msg_word;
+	int byte_offset;
+	int mpdu_range, num_mpdu_range;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+
+	cdf_print
+		("------------------HTT RX IND-----------------------------\n");
+	cdf_print("alloc idx paddr %x (*vaddr) %d\n",
+		  pdev->rx_ring.alloc_idx.paddr,
+		  *pdev->rx_ring.alloc_idx.vaddr);
+
+	cdf_print("sw_rd_idx msdu_payld %d msdu_desc %d\n",
+		  pdev->rx_ring.sw_rd_idx.msdu_payld,
+		  pdev->rx_ring.sw_rd_idx.msdu_desc);
+
+	cdf_print("dbg_ring_idx %d\n", pdev->rx_ring.dbg_ring_idx);
+
+	cdf_print("fill_level %d fill_cnt %d\n", pdev->rx_ring.fill_level,
+		  pdev->rx_ring.fill_cnt);
+
+	cdf_print("initial msdu_payld %d curr mpdu range %d curr mpdu cnt %d\n",
+		  pdev->rx_ring.dbg_initial_msdu_payld,
+		  pdev->rx_ring.dbg_mpdu_range, pdev->rx_ring.dbg_mpdu_count);
+
+	/* Print the RX_IND contents */
+
+	cdf_print("peer id %x RV %x FV %x ext_tid %x msg_type %x\n",
+		  HTT_RX_IND_PEER_ID_GET(*msg_word),
+		  HTT_RX_IND_REL_VALID_GET(*msg_word),
+		  HTT_RX_IND_FLUSH_VALID_GET(*msg_word),
+		  HTT_RX_IND_EXT_TID_GET(*msg_word),
+		  HTT_T2H_MSG_TYPE_GET(*msg_word));
+
+	cdf_print("num_mpdu_ranges %x rel_seq_num_end %x rel_seq_num_start %x\n"
+		  " flush_seq_num_end %x flush_seq_num_start %x\n",
+		  HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1)),
+		  HTT_RX_IND_REL_SEQ_NUM_END_GET(*(msg_word + 1)),
+		  HTT_RX_IND_REL_SEQ_NUM_START_GET(*(msg_word + 1)),
+		  HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1)),
+		  HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1)));
+
+	cdf_print("fw_rx_desc_bytes %x\n",
+		  HTT_RX_IND_FW_RX_DESC_BYTES_GET(*
+						  (msg_word + 2 +
+						   HTT_RX_PPDU_DESC_SIZE32)));
+
+	/* receive MSDU desc for current frame */
+	byte_offset =
+		HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
+					 pdev->rx_ind_msdu_byte_idx);
+
+	cdf_print("msdu byte idx %x msdu desc %x\n", pdev->rx_ind_msdu_byte_idx,
+		  HTT_RX_IND_FW_RX_DESC_BYTES_GET(*
+						  (msg_word + 2 +
+						   HTT_RX_PPDU_DESC_SIZE32)));
+
+	num_mpdu_range = HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
+
+	for (mpdu_range = 0; mpdu_range < num_mpdu_range; mpdu_range++) {
+		enum htt_rx_status status;
+		int num_mpdus;
+
+		htt_rx_ind_mpdu_range_info(pdev, rx_ind_msg, mpdu_range,
+					   &status, &num_mpdus);
+
+		cdf_print("mpdu_range %x status %x num_mpdus %x\n",
+			  pdev->rx_ind_msdu_byte_idx, status, num_mpdus);
+	}
+	cdf_print
+		("---------------------------------------------------------\n");
+}
+#endif
+
+#ifdef DEBUG_DMA_DONE
+#define MAX_DONE_BIT_CHECK_ITER 5
+#endif
+
+int
+htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
+		    cdf_nbuf_t rx_ind_msg,
+		    cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu)
+{
+	int msdu_len, msdu_chaining = 0;
+	cdf_nbuf_t msdu;
+	struct htt_host_rx_desc_base *rx_desc;
+	uint8_t *rx_ind_data;
+	uint32_t *msg_word, num_msdu_bytes;
+	enum htt_t2h_msg_type msg_type;
+	uint8_t pad_bytes = 0;
+
+	HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
+	rx_ind_data = cdf_nbuf_data(rx_ind_msg);
+	msg_word = (uint32_t *) rx_ind_data;
+
+	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
+
+	if (cdf_unlikely(HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type)) {
+		num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
+			*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
+	} else {
+		num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
+			*(msg_word
+			  + HTT_RX_IND_HDR_PREFIX_SIZE32
+			  + HTT_RX_PPDU_DESC_SIZE32));
+	}
+	msdu = *head_msdu = htt_rx_netbuf_pop(pdev);
+	while (1) {
+		int last_msdu, msdu_len_invalid, msdu_chained;
+		int byte_offset;
+
+		/*
+		 * Set the netbuf length to be the entire buffer length
+		 * initially, so the unmap will unmap the entire buffer.
+		 */
+		cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+#ifdef DEBUG_DMA_DONE
+		cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_BIDIRECTIONAL);
+#else
+		cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
+#endif
+
+		/* cache consistency has been taken care of by cdf_nbuf_unmap */
+
+		/*
+		 * Now read the rx descriptor.
+		 * Set the length to the appropriate value.
+		 * Check if this MSDU completes a MPDU.
+		 */
+		rx_desc = htt_rx_desc(msdu);
+#if defined(HELIUMPLUS_PADDR64)
+		if (HTT_WIFI_IP(pdev, 2, 0))
+			pad_bytes = rx_desc->msdu_end.l3_header_padding;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+		/*
+		 * Make the netbuf's data pointer point to the payload rather
+		 * than the descriptor.
+		 */
+
+		cdf_nbuf_pull_head(msdu,
+				   HTT_RX_STD_DESC_RESERVATION + pad_bytes);
+
+		/*
+		 * Sanity check - confirm the HW is finished filling in
+		 * the rx data.
+		 * If the HW and SW are working correctly, then it's guaranteed
+		 * that the HW's MAC DMA is done before this point in the SW.
+		 * To prevent the case that we handle a stale Rx descriptor,
+		 * just assert for now until we have a way to recover.
+		 */
+
+#ifdef DEBUG_DMA_DONE
+		if (cdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
+				   & RX_ATTENTION_0_MSDU_DONE_MASK))) {
+
+			int dbg_iter = MAX_DONE_BIT_CHECK_ITER;
+
+			cdf_print("malformed frame\n");
+
+			while (dbg_iter &&
+			       (!((*(uint32_t *) &rx_desc->attention) &
+				  RX_ATTENTION_0_MSDU_DONE_MASK))) {
+				cdf_mdelay(1);
+
+				cdf_invalidate_range((void *)rx_desc,
+						     (void *)((char *)rx_desc +
+						 HTT_RX_STD_DESC_RESERVATION));
+
+				cdf_print("debug iter %d success %d\n",
+					  dbg_iter,
+					  pdev->rx_ring.dbg_sync_success);
+
+				dbg_iter--;
+			}
+
+			if (cdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
+					   & RX_ATTENTION_0_MSDU_DONE_MASK))) {
+
+#ifdef HTT_RX_RESTORE
+				cdf_print("RX done bit error detected!\n");
+				cdf_nbuf_set_next(msdu, NULL);
+				*tail_msdu = msdu;
+				pdev->rx_ring.rx_reset = 1;
+				return msdu_chaining;
+#else
+				wma_cli_set_command(0, GEN_PARAM_CRASH_INJECT,
+						    0, GEN_CMD);
+				HTT_ASSERT_ALWAYS(0);
+#endif
+			}
+			pdev->rx_ring.dbg_sync_success++;
+			cdf_print("debug iter %d success %d\n", dbg_iter,
+				  pdev->rx_ring.dbg_sync_success);
+		}
+#else
+		HTT_ASSERT_ALWAYS((*(uint32_t *) &rx_desc->attention) &
+				  RX_ATTENTION_0_MSDU_DONE_MASK);
+#endif
+		/*
+		 * Copy the FW rx descriptor for this MSDU from the rx
+		 * indication message into the MSDU's netbuf.
+		 * HL uses the same rx indication message definition as LL, and
+		 * simply appends new info (fields from the HW rx desc, and the
+		 * MSDU payload itself).
+		 * So, the offset into the rx indication message only has to
+		 * account for the standard offset of the per-MSDU FW rx
+		 * desc info within the message, and how many bytes of the
+		 * per-MSDU FW rx desc info have already been consumed.
+		 * (And the endianness of the host,
+		 * since for a big-endian host, the rx ind message contents,
+		 * including the per-MSDU rx desc bytes, were byteswapped during
+		 * upload.)
+		 */
+		if (pdev->rx_ind_msdu_byte_idx < num_msdu_bytes) {
+			if (cdf_unlikely
+				    (HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type))
+				byte_offset =
+					HTT_ENDIAN_BYTE_IDX_SWAP
+					(HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET);
+			else
+				byte_offset =
+					HTT_ENDIAN_BYTE_IDX_SWAP
+					(HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
+						pdev->rx_ind_msdu_byte_idx);
+
+			*((uint8_t *) &rx_desc->fw_desc.u.val) =
+				rx_ind_data[byte_offset];
+			/*
+			 * The target is expected to only provide the basic
+			 * per-MSDU rx descriptors.  Just to be sure,
+			 * verify that the target has not attached
+			 * extension data (e.g. LRO flow ID).
+			 */
+			/*
+			 * The assertion below currently doesn't work for
+			 * RX_FRAG_IND messages, since their format differs
+			 * from the RX_IND format (no FW rx PPDU desc in
+			 * the current RX_FRAG_IND message).
+			 * If the RX_FRAG_IND message format is updated to match
+			 * the RX_IND message format, then the following
+			 * assertion can be restored.
+			 */
+			/* cdf_assert((rx_ind_data[byte_offset] &
+			   FW_RX_DESC_EXT_M) == 0); */
+			pdev->rx_ind_msdu_byte_idx += 1;
+			/* or more, if there's ext data */
+		} else {
+			/*
+			 * When an oversized AMSDU happened, FW will lost some
+			 * of MSDU status - in this case, the FW descriptors
+			 * provided will be less than the actual MSDUs
+			 * inside this MPDU.
+			 * Mark the FW descriptors so that it will still
+			 * deliver to upper stack, if no CRC error for the MPDU.
+			 *
+			 * FIX THIS - the FW descriptors are actually for MSDUs
+			 * in the end of this A-MSDU instead of the beginning.
+			 */
+			*((uint8_t *) &rx_desc->fw_desc.u.val) = 0;
+		}
+
+		/*
+		 *  TCP/UDP checksum offload support
+		 */
+		htt_set_checksum_result_ll(pdev, msdu, rx_desc);
+
+		msdu_len_invalid = (*(uint32_t *) &rx_desc->attention) &
+				   RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
+		msdu_chained = (((*(uint32_t *) &rx_desc->frag_info) &
+				 RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) >>
+				RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB);
+		msdu_len =
+			((*((uint32_t *) &rx_desc->msdu_start)) &
+			 RX_MSDU_START_0_MSDU_LENGTH_MASK) >>
+			RX_MSDU_START_0_MSDU_LENGTH_LSB;
+
+		do {
+			if (!msdu_len_invalid && !msdu_chained) {
+#if defined(PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR)
+				if (msdu_len > 0x3000)
+					break;
+#endif
+				cdf_nbuf_trim_tail(msdu,
+						   HTT_RX_BUF_SIZE -
+						   (RX_STD_DESC_SIZE +
+						    msdu_len));
+			}
+		} while (0);
+
+		while (msdu_chained--) {
+			cdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
+			cdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
+			msdu_len -= HTT_RX_BUF_SIZE;
+			cdf_nbuf_set_next(msdu, next);
+			msdu = next;
+			msdu_chaining = 1;
+
+			if (msdu_chained == 0) {
+				/* Trim the last one to the correct size -
+				 * accounting for inconsistent HW lengths
+				 * causing length overflows and underflows
+				 */
+				if (((unsigned)msdu_len) >
+				    ((unsigned)
+				     (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE))) {
+					msdu_len =
+						(HTT_RX_BUF_SIZE -
+						 RX_STD_DESC_SIZE);
+				}
+
+				cdf_nbuf_trim_tail(next,
+						   HTT_RX_BUF_SIZE -
+						   (RX_STD_DESC_SIZE +
+						    msdu_len));
+			}
+		}
+
+		last_msdu =
+			((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
+			 RX_MSDU_END_4_LAST_MSDU_MASK) >>
+			RX_MSDU_END_4_LAST_MSDU_LSB;
+
+		if (last_msdu) {
+			cdf_nbuf_set_next(msdu, NULL);
+			break;
+		} else {
+			cdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
+			cdf_nbuf_set_next(msdu, next);
+			msdu = next;
+		}
+	}
+	*tail_msdu = msdu;
+
+	/*
+	 * Don't refill the ring yet.
+	 * First, the elements popped here are still in use - it is
+	 * not safe to overwrite them until the matching call to
+	 * mpdu_desc_list_next.
+	 * Second, for efficiency it is preferable to refill the rx ring
+	 * with 1 PPDU's worth of rx buffers (something like 32 x 3 buffers),
+	 * rather than one MPDU's worth of rx buffers (sth like 3 buffers).
+	 * Consequently, we'll rely on the txrx SW to tell us when it is done
+	 * pulling all the PPDU's rx buffers out of the rx ring, and then
+	 * refill it just once.
+	 */
+	return msdu_chaining;
+}
+
+int
+htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
+			   cdf_nbuf_t offload_deliver_msg,
+			   int *vdev_id,
+			   int *peer_id,
+			   int *tid,
+			   uint8_t *fw_desc,
+			   cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf)
+{
+	cdf_nbuf_t buf;
+	uint32_t *msdu_hdr, msdu_len;
+
+	*head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
+	/* Fake read mpdu_desc to keep desc ptr in sync */
+	htt_rx_mpdu_desc_list_next(pdev, NULL);
+	cdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
+#ifdef DEBUG_DMA_DONE
+	cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_BIDIRECTIONAL);
+#else
+	cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_FROM_DEVICE);
+#endif
+	msdu_hdr = (uint32_t *) cdf_nbuf_data(buf);
+
+	/* First dword */
+	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
+	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
+
+	/* Second dword */
+	msdu_hdr++;
+	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
+	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
+	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
+
+	cdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
+	cdf_nbuf_set_pktlen(buf, msdu_len);
+	return 0;
+}
+
+int
+htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
+				 uint32_t *msg_word,
+				 int msdu_iter,
+				 int *vdev_id,
+				 int *peer_id,
+				 int *tid,
+				 uint8_t *fw_desc,
+				 cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf)
+{
+	cdf_nbuf_t buf;
+	uint32_t *msdu_hdr, msdu_len;
+	uint32_t *curr_msdu;
+	uint32_t paddr;
+
+	curr_msdu =
+		msg_word + (msdu_iter * HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS);
+	paddr = HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*curr_msdu);
+	*head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
+
+	if (cdf_unlikely(NULL == buf)) {
+		cdf_print("%s: netbuf pop failed!\n", __func__);
+		return 0;
+	}
+	cdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
+#ifdef DEBUG_DMA_DONE
+	cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_BIDIRECTIONAL);
+#else
+	cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_FROM_DEVICE);
+#endif
+	msdu_hdr = (uint32_t *) cdf_nbuf_data(buf);
+
+	/* First dword */
+	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
+	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
+
+	/* Second dword */
+	msdu_hdr++;
+	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
+	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
+	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
+
+	cdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
+	cdf_nbuf_set_pktlen(buf, msdu_len);
+	return 0;
+}
+
+extern void
+dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
+
+#ifdef RX_HASH_DEBUG
+#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) HTT_ASSERT_ALWAYS(msdu_count)
+#else
+#define HTT_RX_CHECK_MSDU_COUNT(msdu_count)     /* no-op */
+#endif
+
+/* Return values: 1 - success, 0 - failure */
+int
+htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
+				cdf_nbuf_t rx_ind_msg,
+				cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu)
+{
+	cdf_nbuf_t msdu, next, prev = NULL;
+	uint8_t *rx_ind_data;
+	uint32_t *msg_word;
+	unsigned int msdu_count = 0;
+	uint8_t offload_ind;
+	struct htt_host_rx_desc_base *rx_desc;
+
+	HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
+
+	rx_ind_data = cdf_nbuf_data(rx_ind_msg);
+	msg_word = (uint32_t *) rx_ind_data;
+
+	offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
+
+	/* Get the total number of MSDUs */
+	msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
+	HTT_RX_CHECK_MSDU_COUNT(msdu_count);
+
+	msg_word =
+		(uint32_t *) (rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
+	if (offload_ind) {
+		ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
+							msg_word);
+		*head_msdu = *tail_msdu = NULL;
+		return 0;
+	}
+
+	(*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(
+		pdev,
+		HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*msg_word));
+
+	if (cdf_unlikely(NULL == msdu)) {
+		cdf_print("%s: netbuf pop failed!\n", __func__);
+		*tail_msdu = NULL;
+		return 0;
+	}
+
+	while (msdu_count > 0) {
+
+		/*
+		 * Set the netbuf length to be the entire buffer length
+		 * initially, so the unmap will unmap the entire buffer.
+		 */
+		cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+#ifdef DEBUG_DMA_DONE
+		cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_BIDIRECTIONAL);
+#else
+		cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
+#endif
+
+		/* cache consistency has been taken care of by cdf_nbuf_unmap */
+		rx_desc = htt_rx_desc(msdu);
+
+		htt_rx_extract_lro_info(msdu, rx_desc);
+
+		/*
+		 * Make the netbuf's data pointer point to the payload rather
+		 * than the descriptor.
+		 */
+		cdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
+#if HTT_PADDR64
+#define NEXT_FIELD_OFFSET_IN32 2
+#else /* ! HTT_PADDR64 */
+#define NEXT_FIELD_OFFSET_IN32 1
+#endif /* HTT_PADDR64 */
+#
+		cdf_nbuf_trim_tail(msdu,
+				   HTT_RX_BUF_SIZE -
+				   (RX_STD_DESC_SIZE +
+				    HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
+					    *(msg_word + NEXT_FIELD_OFFSET_IN32))));
+#if defined(HELIUMPLUS_DEBUG)
+		dump_pkt(msdu, 0, 64);
+#endif
+		*((uint8_t *) &rx_desc->fw_desc.u.val) =
+			HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word + NEXT_FIELD_OFFSET_IN32));
+#undef NEXT_FIELD_OFFSET_IN32
+
+		msdu_count--;
+
+		if (cdf_unlikely((*((u_int8_t *) &rx_desc->fw_desc.u.val)) &
+				    FW_RX_DESC_MIC_ERR_M)) {
+			u_int8_t tid =
+				HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
+					*(u_int32_t *)rx_ind_data);
+			u_int16_t peer_id =
+				HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
+					*(u_int32_t *)rx_ind_data);
+			ol_rx_mic_error_handler(pdev->txrx_pdev, tid, peer_id,
+						rx_desc, msdu);
+
+			htt_rx_desc_frame_free(pdev, msdu);
+			/* if this is the last msdu */
+			if (!msdu_count) {
+				/* if this is the only msdu */
+				if (!prev) {
+					*head_msdu = *tail_msdu = NULL;
+					return 0;
+				} else {
+					*tail_msdu = prev;
+					cdf_nbuf_set_next(prev, NULL);
+					return 1;
+				}
+			} else { /* if this is not the last msdu */
+				/* get the next msdu */
+				msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
+				next = htt_rx_in_order_netbuf_pop(
+					pdev,
+					HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(
+						*msg_word));
+				if (cdf_unlikely(NULL == next)) {
+					cdf_print("%s: netbuf pop failed!\n",
+								 __func__);
+					*tail_msdu = NULL;
+					return 0;
+				}
+
+				/* if this is not the first msdu, update the
+				 * next pointer of the preceding msdu
+				 */
+				if (prev) {
+					cdf_nbuf_set_next(prev, next);
+				} else {
+				/* if this is the first msdu, update the
+				 * head pointer
+				 */
+					*head_msdu = next;
+				}
+				msdu = next;
+				continue;
+			}
+		}
+
+		/* Update checksum result */
+		htt_set_checksum_result_ll(pdev, msdu, rx_desc);
+
+		/* check if this is the last msdu */
+		if (msdu_count) {
+			msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
+			next = htt_rx_in_order_netbuf_pop(
+				pdev,
+				HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*msg_word));
+			if (cdf_unlikely(NULL == next)) {
+				cdf_print("%s: netbuf pop failed!\n",
+					  __func__);
+				*tail_msdu = NULL;
+				return 0;
+			}
+			cdf_nbuf_set_next(msdu, next);
+			prev = msdu;
+			msdu = next;
+		} else {
+			*tail_msdu = msdu;
+			cdf_nbuf_set_next(msdu, NULL);
+		}
+	}
+
+	return 1;
+}
+
+/* Util fake function that has same prototype as cdf_nbuf_clone that just
+ * retures the same nbuf
+ */
+cdf_nbuf_t htt_rx_cdf_noclone_buf(cdf_nbuf_t buf)
+{
+	return buf;
+}
+
+/* FIXME: This is a HW definition not provded by HW, where does it go ? */
+enum {
+	HW_RX_DECAP_FORMAT_RAW = 0,
+	HW_RX_DECAP_FORMAT_NWIFI,
+	HW_RX_DECAP_FORMAT_8023,
+	HW_RX_DECAP_FORMAT_ETH2,
+};
+
+#define HTT_FCS_LEN (4)
+
+static void
+htt_rx_parse_ppdu_start_status(struct htt_host_rx_desc_base *rx_desc,
+			       struct ieee80211_rx_status *rs)
+{
+
+	struct rx_ppdu_start *ppdu_start = &rx_desc->ppdu_start;
+
+	/* RSSI */
+	rs->rs_rssi = ppdu_start->rssi_comb;
+
+	/* PHY rate */
+	/* rs_ratephy coding
+	   [b3 - b0]
+	   0 -> OFDM
+	   1 -> CCK
+	   2 -> HT
+	   3 -> VHT
+	   OFDM / CCK
+	   [b7  - b4 ] => LSIG rate
+	   [b23 - b8 ] => service field
+	   (b'12 static/dynamic,
+	   b'14..b'13 BW for VHT)
+	   [b31 - b24 ] => Reserved
+	   HT / VHT
+	   [b15 - b4 ] => SIG A_2 12 LSBs
+	   [b31 - b16] => SIG A_1 16 LSBs
+
+	 */
+	if (ppdu_start->preamble_type == 0x4) {
+		rs->rs_ratephy = ppdu_start->l_sig_rate_select;
+		rs->rs_ratephy |= ppdu_start->l_sig_rate << 4;
+		rs->rs_ratephy |= ppdu_start->service << 8;
+	} else {
+		rs->rs_ratephy = (ppdu_start->preamble_type & 0x4) ? 3 : 2;
+#ifdef HELIUMPLUS
+		rs->rs_ratephy |=
+			(ppdu_start->ht_sig_vht_sig_ah_sig_a_2 & 0xFFF) << 4;
+		rs->rs_ratephy |=
+			(ppdu_start->ht_sig_vht_sig_ah_sig_a_1 & 0xFFFF) << 16;
+#else
+		rs->rs_ratephy |= (ppdu_start->ht_sig_vht_sig_a_2 & 0xFFF) << 4;
+		rs->rs_ratephy |=
+			(ppdu_start->ht_sig_vht_sig_a_1 & 0xFFFF) << 16;
+#endif
+	}
+
+	return;
+}
+
+/* This function is used by montior mode code to restitch an MSDU list
+ * corresponding to an MPDU back into an MPDU by linking up the skbs.
+ */
+cdf_nbuf_t
+htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
+				cdf_nbuf_t head_msdu,
+				struct ieee80211_rx_status *rx_status,
+				unsigned clone_not_reqd)
+{
+
+	cdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list_cloned;
+	cdf_nbuf_t (*clone_nbuf_fn)(cdf_nbuf_t buf);
+	unsigned decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len,
+		 mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
+		 is_amsdu, is_first_frag, amsdu_pad, msdu_len;
+	struct htt_host_rx_desc_base *rx_desc;
+	char *hdr_desc;
+	unsigned char *dest;
+	struct ieee80211_frame *wh;
+	struct ieee80211_qoscntl *qos;
+
+	/* If this packet does not go up the normal stack path we dont need to
+	 * waste cycles cloning the packets
+	 */
+	clone_nbuf_fn =
+		clone_not_reqd ? htt_rx_cdf_noclone_buf : cdf_nbuf_clone;
+
+	/* The nbuf has been pulled just beyond the status and points to the
+	 * payload
+	 */
+	msdu_orig = head_msdu;
+	rx_desc = htt_rx_desc(msdu_orig);
+
+	/* Fill out the rx_status from the PPDU start and end fields */
+	if (rx_desc->attention.first_mpdu) {
+		htt_rx_parse_ppdu_start_status(rx_desc, rx_status);
+
+		/* The timestamp is no longer valid - It will be valid only for
+		 * the last MPDU
+		 */
+		rx_status->rs_tstamp.tsf = ~0;
+	}
+
+	decap_format =
+		GET_FIELD(&rx_desc->msdu_start, RX_MSDU_START_2_DECAP_FORMAT);
+
+	head_frag_list_cloned = NULL;
+
+	/* Easy case - The MSDU status indicates that this is a non-decapped
+	 * packet in RAW mode.
+	 * return
+	 */
+	if (decap_format == HW_RX_DECAP_FORMAT_RAW) {
+		/* Note that this path might suffer from headroom unavailabilty,
+		 * but the RX status is usually enough
+		 */
+		mpdu_buf = clone_nbuf_fn(head_msdu);
+
+		prev_buf = mpdu_buf;
+
+		frag_list_sum_len = 0;
+		is_first_frag = 1;
+		msdu_len = cdf_nbuf_len(mpdu_buf);
+
+		/* Drop the zero-length msdu */
+		if (!msdu_len)
+			goto mpdu_stitch_fail;
+
+		msdu_orig = cdf_nbuf_next(head_msdu);
+
+		while (msdu_orig) {
+
+			/* TODO: intra AMSDU padding - do we need it ??? */
+			msdu = clone_nbuf_fn(msdu_orig);
+			if (!msdu)
+				goto mpdu_stitch_fail;
+
+			if (is_first_frag) {
+				is_first_frag = 0;
+				head_frag_list_cloned = msdu;
+			}
+
+			msdu_len = cdf_nbuf_len(msdu);
+			/* Drop the zero-length msdu */
+			if (!msdu_len)
+				goto mpdu_stitch_fail;
+
+			frag_list_sum_len += msdu_len;
+
+			/* Maintain the linking of the cloned MSDUS */
+			cdf_nbuf_set_next_ext(prev_buf, msdu);
+
+			/* Move to the next */
+			prev_buf = msdu;
+			msdu_orig = cdf_nbuf_next(msdu_orig);
+		}
+
+		/* The last msdu length need be larger than HTT_FCS_LEN */
+		if (msdu_len < HTT_FCS_LEN)
+			goto mpdu_stitch_fail;
+
+		cdf_nbuf_trim_tail(prev_buf, HTT_FCS_LEN);
+
+		/* If there were more fragments to this RAW frame */
+		if (head_frag_list_cloned) {
+			cdf_nbuf_append_ext_list(mpdu_buf,
+						 head_frag_list_cloned,
+						 frag_list_sum_len);
+		}
+
+		goto mpdu_stitch_done;
+	}
+
+	/* Decap mode:
+	 * Calculate the amount of header in decapped packet to knock off based
+	 * on the decap type and the corresponding number of raw bytes to copy
+	 * status header
+	 */
+
+	hdr_desc = &rx_desc->rx_hdr_status[0];
+
+	/* Base size */
+	wifi_hdr_len = sizeof(struct ieee80211_frame);
+	wh = (struct ieee80211_frame *)hdr_desc;
+
+	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+	if (dir == IEEE80211_FC1_DIR_DSTODS)
+		wifi_hdr_len += 6;
+
+	is_amsdu = 0;
+	if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+		qos = (struct ieee80211_qoscntl *)
+		      (hdr_desc + wifi_hdr_len);
+		wifi_hdr_len += 2;
+
+		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
+	}
+
+	/* TODO: Any security headers associated with MPDU */
+	sec_hdr_len = 0;
+
+	/* MSDU related stuff LLC - AMSDU subframe header etc */
+	msdu_llc_len = is_amsdu ? (14 + 8) : 8;
+
+	mpdu_buf_len = wifi_hdr_len + sec_hdr_len + msdu_llc_len;
+
+	/* "Decap" header to remove from MSDU buffer */
+	decap_hdr_pull_bytes = 14;
+
+	/* Allocate a new nbuf for holding the 802.11 header retrieved from the
+	 * status of the now decapped first msdu. Leave enough headroom for
+	 * accomodating any radio-tap /prism like PHY header
+	 */
+#define HTT_MAX_MONITOR_HEADER (512)
+	mpdu_buf = cdf_nbuf_alloc(pdev->osdev,
+				  HTT_MAX_MONITOR_HEADER + mpdu_buf_len,
+				  HTT_MAX_MONITOR_HEADER, 4, false);
+
+	if (!mpdu_buf)
+		goto mpdu_stitch_fail;
+
+	/* Copy the MPDU related header and enc headers into the first buffer
+	 * - Note that there can be a 2 byte pad between heaader and enc header
+	 */
+
+	prev_buf = mpdu_buf;
+	dest = cdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
+	if (!dest)
+		goto mpdu_stitch_fail;
+	cdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
+	hdr_desc += wifi_hdr_len;
+
+	/* NOTE - This padding is present only in the RAW header status - not
+	 * when the MSDU data payload is in RAW format.
+	 */
+	/* Skip the "IV pad" */
+	if (wifi_hdr_len & 0x3)
+		hdr_desc += 2;
+
+	/* The first LLC len is copied into the MPDU buffer */
+	frag_list_sum_len = 0;
+	frag_list_sum_len -= msdu_llc_len;
+
+	msdu_orig = head_msdu;
+	is_first_frag = 1;
+	amsdu_pad = 0;
+
+	while (msdu_orig) {
+
+		/* TODO: intra AMSDU padding - do we need it ??? */
+
+		msdu = clone_nbuf_fn(msdu_orig);
+		if (!msdu)
+			goto mpdu_stitch_fail;
+
+		if (is_first_frag) {
+			is_first_frag = 0;
+			head_frag_list_cloned = msdu;
+		} else {
+
+			/* Maintain the linking of the cloned MSDUS */
+			cdf_nbuf_set_next_ext(prev_buf, msdu);
+
+			/* Reload the hdr ptr only on non-first MSDUs */
+			rx_desc = htt_rx_desc(msdu_orig);
+			hdr_desc = &rx_desc->rx_hdr_status[0];
+
+		}
+
+		/* Copy this buffers MSDU related status into the prev buffer */
+		dest = cdf_nbuf_put_tail(prev_buf, msdu_llc_len + amsdu_pad);
+		dest += amsdu_pad;
+		cdf_mem_copy(dest, hdr_desc, msdu_llc_len);
+
+		/* Push the MSDU buffer beyond the decap header */
+		cdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
+		frag_list_sum_len +=
+			msdu_llc_len + cdf_nbuf_len(msdu) + amsdu_pad;
+
+		/* Set up intra-AMSDU pad to be added to start of next buffer -
+		 * AMSDU pad is 4 byte pad on AMSDU subframe */
+		amsdu_pad = (msdu_llc_len + cdf_nbuf_len(msdu)) & 0x3;
+		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
+
+		/* TODO FIXME How do we handle MSDUs that have fraglist - Should
+		 * probably iterate all the frags cloning them along the way and
+		 * and also updating the prev_buf pointer
+		 */
+
+		/* Move to the next */
+		prev_buf = msdu;
+		msdu_orig = cdf_nbuf_next(msdu_orig);
+
+	}
+
+	/* TODO: Convert this to suitable cdf routines */
+	cdf_nbuf_append_ext_list(mpdu_buf, head_frag_list_cloned,
+				 frag_list_sum_len);
+
+mpdu_stitch_done:
+	/* Check if this buffer contains the PPDU end status for TSF */
+	if (rx_desc->attention.last_mpdu)
+#ifdef HELIUMPLUS
+		rx_status->rs_tstamp.tsf =
+			rx_desc->ppdu_end.rx_pkt_end.phy_timestamp_1_lower_32;
+#else
+		rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
+#endif
+	/* All the nbufs have been linked into the ext list and
+	   then unlink the nbuf list */
+	if (clone_not_reqd) {
+		msdu = head_msdu;
+		while (msdu) {
+			msdu_orig = msdu;
+			msdu = cdf_nbuf_next(msdu);
+			cdf_nbuf_set_next(msdu_orig, NULL);
+		}
+	}
+
+	return mpdu_buf;
+
+mpdu_stitch_fail:
+	/* Free these alloced buffers and the orig buffers in non-clone case */
+	if (!clone_not_reqd) {
+		/* Free the head buffer */
+		if (mpdu_buf)
+			cdf_nbuf_free(mpdu_buf);
+
+		/* Free the partial list */
+		while (head_frag_list_cloned) {
+			msdu = head_frag_list_cloned;
+			head_frag_list_cloned =
+				cdf_nbuf_next_ext(head_frag_list_cloned);
+			cdf_nbuf_free(msdu);
+		}
+	} else {
+		/* Free the alloced head buffer */
+		if (decap_format != HW_RX_DECAP_FORMAT_RAW)
+			if (mpdu_buf)
+				cdf_nbuf_free(mpdu_buf);
+
+		/* Free the orig buffers */
+		msdu = head_msdu;
+		while (msdu) {
+			msdu_orig = msdu;
+			msdu = cdf_nbuf_next(msdu);
+			cdf_nbuf_free(msdu_orig);
+		}
+	}
+
+	return NULL;
+}
+
+int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	/*
+	 * Currently the RSSI is provided only as a field in the
+	 * HTT_T2H_RX_IND message, rather than in each rx descriptor.
+	 */
+	return HTT_RSSI_INVALID;
+}
+
+/*
+ * htt_rx_amsdu_pop -
+ * global function pointer that is programmed during attach to point
+ * to either htt_rx_amsdu_pop_ll or htt_rx_amsdu_rx_in_order_pop_ll.
+ */
+int (*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
+			cdf_nbuf_t rx_ind_msg,
+			cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+
+/*
+ * htt_rx_frag_pop -
+ * global function pointer that is programmed during attach to point
+ * to either htt_rx_amsdu_pop_ll
+ */
+int (*htt_rx_frag_pop)(htt_pdev_handle pdev,
+		       cdf_nbuf_t rx_ind_msg,
+		       cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+
+int
+(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
+			   cdf_nbuf_t offload_deliver_msg,
+			   int *vdev_id,
+			   int *peer_id,
+			   int *tid,
+			   uint8_t *fw_desc,
+			   cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+
+void * (*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev,
+				    cdf_nbuf_t rx_ind_msg);
+
+bool (*htt_rx_mpdu_desc_retry)(
+    htt_pdev_handle pdev, void *mpdu_desc);
+
+uint16_t (*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
+
+void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
+			    void *mpdu_desc,
+			    union htt_rx_pn_t *pn, int pn_len_bits);
+
+uint8_t (*htt_rx_mpdu_desc_tid)(
+    htt_pdev_handle pdev, void *mpdu_desc);
+
+bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev, void *msdu_desc);
+
+bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev, void *msdu_desc);
+
+int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev, void *msdu_desc);
+
+bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
+
+int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
+
+void * (*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, cdf_nbuf_t msdu);
+
+bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
+
+bool (*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
+				void *mpdu_desc, uint8_t *key_id);
+
+void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+	int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
+	cdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
+	pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
+	return (void *)htt_rx_desc(netbuf);
+}
+
+bool (*htt_rx_msdu_chan_info_present)(
+	htt_pdev_handle pdev,
+	void *mpdu_desc);
+
+bool (*htt_rx_msdu_center_freq)(
+	htt_pdev_handle pdev,
+	struct ol_txrx_peer_t *peer,
+	void *mpdu_desc,
+	uint16_t *primary_chan_center_freq_mhz,
+	uint16_t *contig_chan1_center_freq_mhz,
+	uint16_t *contig_chan2_center_freq_mhz,
+	uint8_t *phy_mode);
+
+void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
+					   cdf_nbuf_t netbuf)
+{
+	return (void *)htt_rx_desc(netbuf);
+}
+
+void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, cdf_nbuf_t msdu)
+{
+	return htt_rx_desc(msdu);
+}
+
+bool htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	struct htt_host_rx_desc_base *rx_desc =
+		(struct htt_host_rx_desc_base *)mpdu_desc;
+
+	return (((*((uint32_t *) &rx_desc->mpdu_start)) &
+		 RX_MPDU_START_0_ENCRYPTED_MASK) >>
+		RX_MPDU_START_0_ENCRYPTED_LSB) ? true : false;
+}
+
+bool htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	return false;
+}
+
+bool htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,
+	struct ol_txrx_peer_t *peer,
+	void *mpdu_desc,
+	uint16_t *primary_chan_center_freq_mhz,
+	uint16_t *contig_chan1_center_freq_mhz,
+	uint16_t *contig_chan2_center_freq_mhz,
+	uint8_t *phy_mode)
+{
+	if (primary_chan_center_freq_mhz)
+		*primary_chan_center_freq_mhz = 0;
+	if (contig_chan1_center_freq_mhz)
+		*contig_chan1_center_freq_mhz = 0;
+	if (contig_chan2_center_freq_mhz)
+		*contig_chan2_center_freq_mhz = 0;
+	if (phy_mode)
+		*phy_mode = 0;
+	return false;
+}
+
+bool
+htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev, void *mpdu_desc,
+			   uint8_t *key_id)
+{
+	struct htt_host_rx_desc_base *rx_desc = (struct htt_host_rx_desc_base *)
+						mpdu_desc;
+
+	if (!htt_rx_msdu_first_msdu_flag_ll(pdev, mpdu_desc))
+		return false;
+
+	*key_id = ((*(((uint32_t *) &rx_desc->msdu_end) + 1)) &
+		   (RX_MSDU_END_1_KEY_ID_OCT_MASK >>
+		    RX_MSDU_END_1_KEY_ID_OCT_LSB));
+
+	return true;
+}
+
+void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu)
+{
+	cdf_nbuf_free(msdu);
+}
+
+void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu)
+{
+	/*
+	 * The rx descriptor is in the same buffer as the rx MSDU payload,
+	 * and does not need to be freed separately.
+	 */
+}
+
+void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev)
+{
+	if (cdf_atomic_dec_and_test(&pdev->rx_ring.refill_ref_cnt)) {
+		int num_to_fill;
+		num_to_fill = pdev->rx_ring.fill_level -
+			pdev->rx_ring.fill_cnt;
+
+		htt_rx_ring_fill_n(pdev,
+				   num_to_fill /* okay if <= 0 */);
+	}
+	cdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
+}
+
+#define AR600P_ASSEMBLE_HW_RATECODE(_rate, _nss, _pream)     \
+	(((_pream) << 6) | ((_nss) << 4) | (_rate))
+
+enum AR600P_HW_RATECODE_PREAM_TYPE {
+	AR600P_HW_RATECODE_PREAM_OFDM,
+	AR600P_HW_RATECODE_PREAM_CCK,
+	AR600P_HW_RATECODE_PREAM_HT,
+	AR600P_HW_RATECODE_PREAM_VHT,
+};
+
+/*--- RX In Order Hash Code --------------------------------------------------*/
+
+/* Initializes the circular linked list */
+static inline void htt_list_init(struct htt_list_node *head)
+{
+	head->prev = head;
+	head->next = head;
+}
+
+/* Adds entry to the end of the linked list */
+static inline void htt_list_add_tail(struct htt_list_node *head,
+				     struct htt_list_node *node)
+{
+	head->prev->next = node;
+	node->prev = head->prev;
+	node->next = head;
+	head->prev = node;
+}
+
+/* Removes the entry corresponding to the input node from the linked list */
+static inline void htt_list_remove(struct htt_list_node *node)
+{
+	node->prev->next = node->next;
+	node->next->prev = node->prev;
+}
+
+/* Helper macro to iterate through the linked list */
+#define HTT_LIST_ITER_FWD(iter, head) for (iter = (head)->next;		\
+					   (iter) != (head);		\
+					   (iter) = (iter)->next)	\
+
+#ifdef RX_HASH_DEBUG
+/* Hash cookie related macros */
+#define HTT_RX_HASH_COOKIE 0xDEED
+
+#define HTT_RX_HASH_COOKIE_SET(hash_element) \
+	((hash_element)->cookie = HTT_RX_HASH_COOKIE)
+
+#define HTT_RX_HASH_COOKIE_CHECK(hash_element) \
+	HTT_ASSERT_ALWAYS((hash_element)->cookie == HTT_RX_HASH_COOKIE)
+
+/* Hash count related macros */
+#define HTT_RX_HASH_COUNT_INCR(hash_bucket) \
+	((hash_bucket).count++)
+
+#define HTT_RX_HASH_COUNT_DECR(hash_bucket) \
+	((hash_bucket).count--)
+
+#define HTT_RX_HASH_COUNT_RESET(hash_bucket) ((hash_bucket).count = 0)
+
+#define HTT_RX_HASH_COUNT_PRINT(hash_bucket) \
+	RX_HASH_LOG(cdf_print(" count %d\n", (hash_bucket).count))
+#else                           /* RX_HASH_DEBUG */
+/* Hash cookie related macros */
+#define HTT_RX_HASH_COOKIE_SET(hash_element)    /* no-op */
+#define HTT_RX_HASH_COOKIE_CHECK(hash_element)  /* no-op */
+/* Hash count related macros */
+#define HTT_RX_HASH_COUNT_INCR(hash_bucket)     /* no-op */
+#define HTT_RX_HASH_COUNT_DECR(hash_bucket)     /* no-op */
+#define HTT_RX_HASH_COUNT_PRINT(hash_bucket)    /* no-op */
+#define HTT_RX_HASH_COUNT_RESET(hash_bucket)    /* no-op */
+#endif /* RX_HASH_DEBUG */
+
+/* Inserts the given "physical address - network buffer" pair into the
+   hash table for the given pdev. This function will do the following:
+   1. Determine which bucket to insert the pair into
+   2. First try to allocate the hash entry for this pair from the pre-allocated
+      entries list
+   3. If there are no more entries in the pre-allocated entries list, allocate
+      the hash entry from the hash memory pool
+   Note: this function is not thread-safe
+   Returns 0 - success, 1 - failure */
+int
+htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
+			cdf_nbuf_t netbuf)
+{
+	int i;
+	struct htt_rx_hash_entry *hash_element = NULL;
+
+	i = RX_HASH_FUNCTION(paddr);
+
+	/* Check if there are any entries in the pre-allocated free list */
+	if (pdev->rx_ring.hash_table[i].freepool.next !=
+	    &pdev->rx_ring.hash_table[i].freepool) {
+
+		hash_element =
+			(struct htt_rx_hash_entry *)(
+				(char *)
+				pdev->rx_ring.hash_table[i].freepool.next -
+				pdev->rx_ring.listnode_offset);
+		if (cdf_unlikely(NULL == hash_element)) {
+			HTT_ASSERT_ALWAYS(0);
+			return 1;
+		}
+
+		htt_list_remove(pdev->rx_ring.hash_table[i].freepool.next);
+	} else {
+		hash_element = cdf_mem_malloc(sizeof(struct htt_rx_hash_entry));
+		if (cdf_unlikely(NULL == hash_element)) {
+			HTT_ASSERT_ALWAYS(0);
+			return 1;
+		}
+		hash_element->fromlist = 0;
+	}
+
+	hash_element->netbuf = netbuf;
+	hash_element->paddr = paddr;
+	HTT_RX_HASH_COOKIE_SET(hash_element);
+
+	htt_list_add_tail(&pdev->rx_ring.hash_table[i].listhead,
+			  &hash_element->listnode);
+
+	RX_HASH_LOG(cdf_print("rx hash: %s: paddr 0x%x netbuf %p bucket %d\n",
+			      __func__, paddr, netbuf, (int)i));
+
+	HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
+	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
+
+	return 0;
+}
+
+/* Given a physical address this function will find the corresponding network
+   buffer from the hash table.
+   Note: this function is not thread-safe */
+cdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr)
+{
+	uint32_t i;
+	struct htt_list_node *list_iter = NULL;
+	cdf_nbuf_t netbuf = NULL;
+	struct htt_rx_hash_entry *hash_entry;
+
+	i = RX_HASH_FUNCTION(paddr);
+
+	HTT_LIST_ITER_FWD(list_iter, &pdev->rx_ring.hash_table[i].listhead) {
+		hash_entry = (struct htt_rx_hash_entry *)
+			     ((char *)list_iter -
+			      pdev->rx_ring.listnode_offset);
+
+		HTT_RX_HASH_COOKIE_CHECK(hash_entry);
+
+		if (hash_entry->paddr == paddr) {
+			/* Found the entry corresponding to paddr */
+			netbuf = hash_entry->netbuf;
+			htt_list_remove(&hash_entry->listnode);
+			HTT_RX_HASH_COUNT_DECR(pdev->rx_ring.hash_table[i]);
+			/* if the rx entry is from the pre-allocated list,
+			   return it */
+			if (hash_entry->fromlist)
+				htt_list_add_tail(&pdev->rx_ring.hash_table[i].
+						  freepool,
+						  &hash_entry->listnode);
+			else
+				cdf_mem_free(hash_entry);
+
+			break;
+		}
+	}
+
+	RX_HASH_LOG(cdf_print("rx hash: %s: paddr 0x%x, netbuf %p, bucket %d\n",
+			      __func__, paddr, netbuf, (int)i));
+	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
+
+	if (netbuf == NULL) {
+		cdf_print("rx hash: %s: no entry found for 0x%x!!!\n",
+			  __func__, paddr);
+		HTT_ASSERT_ALWAYS(0);
+	}
+
+	return netbuf;
+}
+
+/* Initialization function of the rx buffer hash table. This function will
+   allocate a hash table of a certain pre-determined size and initialize all
+   the elements */
+int htt_rx_hash_init(struct htt_pdev_t *pdev)
+{
+	int i, j;
+
+	HTT_ASSERT2(IS_PWR2(RX_NUM_HASH_BUCKETS));
+
+	pdev->rx_ring.hash_table =
+		cdf_mem_malloc(RX_NUM_HASH_BUCKETS *
+			       sizeof(struct htt_rx_hash_bucket));
+
+	if (NULL == pdev->rx_ring.hash_table) {
+		cdf_print("rx hash table allocation failed!\n");
+		return 1;
+	}
+
+	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
+		HTT_RX_HASH_COUNT_RESET(pdev->rx_ring.hash_table[i]);
+
+		/* initialize the hash table buckets */
+		htt_list_init(&pdev->rx_ring.hash_table[i].listhead);
+
+		/* initialize the hash table free pool per bucket */
+		htt_list_init(&pdev->rx_ring.hash_table[i].freepool);
+
+		/* pre-allocate a pool of entries for this bucket */
+		pdev->rx_ring.hash_table[i].entries =
+			cdf_mem_malloc(RX_ENTRIES_SIZE *
+				       sizeof(struct htt_rx_hash_entry));
+
+		if (NULL == pdev->rx_ring.hash_table[i].entries) {
+			cdf_print("rx hash bucket %d entries alloc failed\n",
+				(int)i);
+			while (i) {
+				i--;
+				cdf_mem_free(pdev->rx_ring.hash_table[i].
+					     entries);
+			}
+			cdf_mem_free(pdev->rx_ring.hash_table);
+			pdev->rx_ring.hash_table = NULL;
+			return 1;
+		}
+
+		/* initialize the free list with pre-allocated entries */
+		for (j = 0; j < RX_ENTRIES_SIZE; j++) {
+			pdev->rx_ring.hash_table[i].entries[j].fromlist = 1;
+			htt_list_add_tail(&pdev->rx_ring.hash_table[i].freepool,
+					  &pdev->rx_ring.hash_table[i].
+					  entries[j].listnode);
+		}
+	}
+
+	pdev->rx_ring.listnode_offset =
+		cdf_offsetof(struct htt_rx_hash_entry, listnode);
+
+	return 0;
+}
+
+void htt_rx_hash_dump_table(struct htt_pdev_t *pdev)
+{
+	uint32_t i;
+	struct htt_rx_hash_entry *hash_entry;
+	struct htt_list_node *list_iter = NULL;
+
+	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
+		HTT_LIST_ITER_FWD(list_iter,
+				  &pdev->rx_ring.hash_table[i].listhead) {
+			hash_entry =
+				(struct htt_rx_hash_entry *)((char *)list_iter -
+							     pdev->rx_ring.
+							     listnode_offset);
+			cdf_print("hash_table[%d]: netbuf %p paddr 0x%x\n", i,
+				  hash_entry->netbuf, hash_entry->paddr);
+		}
+	}
+}
+
+/*--- RX In Order Hash Code --------------------------------------------------*/
+
+/* move the function to the end of file
+ * to omit ll/hl pre-declaration
+ */
+int htt_rx_attach(struct htt_pdev_t *pdev)
+{
+	cdf_dma_addr_t paddr;
+#if HTT_PADDR64
+	uint32_t ring_elem_size = sizeof(uint64_t);
+#else
+	uint32_t ring_elem_size = sizeof(uint32_t);
+#endif /* HTT_PADDR64 */
+	pdev->rx_ring.size = htt_rx_ring_size(pdev);
+	HTT_ASSERT2(IS_PWR2(pdev->rx_ring.size));
+	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
+
+	/*
+	* Set the initial value for the level to which the rx ring
+	* should be filled, based on the max throughput and the worst
+	* likely latency for the host to fill the rx ring.
+	* In theory, this fill level can be dynamically adjusted from
+	* the initial value set here to reflect the actual host latency
+	* rather than a conservative assumption.
+	*/
+	pdev->rx_ring.fill_level = htt_rx_ring_fill_level(pdev);
+
+	if (pdev->cfg.is_full_reorder_offload) {
+		if (htt_rx_hash_init(pdev))
+			goto fail1;
+
+		/* allocate the target index */
+		pdev->rx_ring.target_idx.vaddr =
+			 cdf_os_mem_alloc_consistent(pdev->osdev,
+				 sizeof(uint32_t),
+				 &paddr,
+				 cdf_get_dma_mem_context(
+					(&pdev->rx_ring.target_idx),
+					 memctx));
+
+		if (!pdev->rx_ring.target_idx.vaddr)
+			goto fail1;
+
+		pdev->rx_ring.target_idx.paddr = paddr;
+		*pdev->rx_ring.target_idx.vaddr = 0;
+	} else {
+		pdev->rx_ring.buf.netbufs_ring =
+			cdf_mem_malloc(pdev->rx_ring.size * sizeof(cdf_nbuf_t));
+		if (!pdev->rx_ring.buf.netbufs_ring)
+			goto fail1;
+
+		pdev->rx_ring.sw_rd_idx.msdu_payld = 0;
+		pdev->rx_ring.sw_rd_idx.msdu_desc = 0;
+	}
+
+	pdev->rx_ring.buf.paddrs_ring =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			 pdev->rx_ring.size * ring_elem_size,
+			 &paddr,
+			 cdf_get_dma_mem_context(
+				(&pdev->rx_ring.buf),
+				 memctx));
+	if (!pdev->rx_ring.buf.paddrs_ring)
+		goto fail2;
+
+	pdev->rx_ring.base_paddr = paddr;
+	pdev->rx_ring.alloc_idx.vaddr =
+		 cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			 sizeof(uint32_t),
+			 &paddr,
+			 cdf_get_dma_mem_context(
+				(&pdev->rx_ring.alloc_idx),
+				 memctx));
+
+	if (!pdev->rx_ring.alloc_idx.vaddr)
+		goto fail3;
+
+	pdev->rx_ring.alloc_idx.paddr = paddr;
+	*pdev->rx_ring.alloc_idx.vaddr = 0;
+
+	/*
+	* Initialize the Rx refill reference counter to be one so that
+	* only one thread is allowed to refill the Rx ring.
+	*/
+	cdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
+	cdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
+
+	/* Initialize the Rx refill retry timer */
+	cdf_softirq_timer_init(pdev->osdev,
+		 &pdev->rx_ring.refill_retry_timer,
+		 htt_rx_ring_refill_retry, (void *)pdev,
+		 CDF_TIMER_TYPE_SW);
+
+	pdev->rx_ring.fill_cnt = 0;
+#ifdef DEBUG_DMA_DONE
+	pdev->rx_ring.dbg_ring_idx = 0;
+	pdev->rx_ring.dbg_refill_cnt = 0;
+	pdev->rx_ring.dbg_sync_success = 0;
+#endif
+#ifdef HTT_RX_RESTORE
+	pdev->rx_ring.rx_reset = 0;
+	pdev->rx_ring.htt_rx_restore = 0;
+#endif
+	htt_rx_ring_fill_n(pdev, pdev->rx_ring.fill_level);
+
+	if (pdev->cfg.is_full_reorder_offload) {
+		cdf_print("HTT: full reorder offload enabled\n");
+		htt_rx_amsdu_pop = htt_rx_amsdu_rx_in_order_pop_ll;
+		htt_rx_frag_pop = htt_rx_amsdu_rx_in_order_pop_ll;
+		htt_rx_mpdu_desc_list_next =
+			 htt_rx_in_ord_mpdu_desc_list_next_ll;
+	} else {
+		htt_rx_amsdu_pop = htt_rx_amsdu_pop_ll;
+		htt_rx_frag_pop = htt_rx_amsdu_pop_ll;
+		htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_ll;
+	}
+
+	htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
+        htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
+	htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
+	htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
+        htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
+	htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
+	htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
+	htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
+	htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_ll;
+	htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_ll;
+	htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_ll;
+	htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_ll;
+	htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_ll;
+	htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_ll;
+	htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_ll;
+
+	return 0;               /* success */
+
+fail3:
+	cdf_os_mem_free_consistent(pdev->osdev,
+				   pdev->rx_ring.size * sizeof(uint32_t),
+				   pdev->rx_ring.buf.paddrs_ring,
+				   pdev->rx_ring.base_paddr,
+				   cdf_get_dma_mem_context((&pdev->rx_ring.buf),
+							   memctx));
+
+fail2:
+	if (pdev->cfg.is_full_reorder_offload) {
+		cdf_os_mem_free_consistent(pdev->osdev,
+					   sizeof(uint32_t),
+					   pdev->rx_ring.target_idx.vaddr,
+					   pdev->rx_ring.target_idx.paddr,
+					   cdf_get_dma_mem_context((&pdev->
+								    rx_ring.
+								    target_idx),
+								   memctx));
+		htt_rx_hash_deinit(pdev);
+	} else {
+		cdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
+	}
+
+fail1:
+	return 1;               /* failure */
+}
+
+#ifdef IPA_OFFLOAD
+int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
+			 unsigned int rx_ind_ring_elements)
+{
+	/* Allocate RX indication ring */
+	/* RX IND ring element
+	 *   4bytes: pointer
+	 *   2bytes: VDEV ID
+	 *   2bytes: length */
+	pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			rx_ind_ring_elements *
+			sizeof(struct ipa_uc_rx_ring_elem_t),
+			&pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx_ind_ring_base),
+						memctx));
+	if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
+		cdf_print("%s: RX IND RING alloc fail", __func__);
+		return -ENOBUFS;
+	}
+
+	/* RX indication ring size, by bytes */
+	pdev->ipa_uc_rx_rsc.rx_ind_ring_size =
+		rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
+	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+		pdev->ipa_uc_rx_rsc.rx_ind_ring_size);
+
+	/* Allocate RX process done index */
+	pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			4,
+			&pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx_ipa_prc_done_idx),
+						memctx));
+	if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
+		cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx_ind_ring_base),
+						memctx));
+		return -ENOBUFS;
+	}
+	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
+
+	/* Allocate RX2 indication ring */
+	/* RX2 IND ring element
+	 *   4bytes: pointer
+	 *   2bytes: VDEV ID
+	 *   2bytes: length */
+	pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			rx_ind_ring_elements *
+			sizeof(struct ipa_uc_rx_ring_elem_t),
+			&pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx2_ind_ring_base),
+						memctx));
+	if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
+		cdf_print("%s: RX IND RING alloc fail", __func__);
+		return -ENOBUFS;
+	}
+
+	/* RX indication ring size, by bytes */
+	pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
+		rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
+	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+		pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
+
+	/* Allocate RX process done index */
+	pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			4,
+			&pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx_ipa_prc_done_idx),
+						memctx));
+	if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
+		cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx2_ind_ring_base),
+						memctx));
+		return -ENOBUFS;
+	}
+	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
+	return 0;
+}
+
+int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+	if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx_ind_ring_base),
+						memctx));
+	}
+
+	if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			4,
+			pdev->ipa_uc_rx_rsc.
+			rx_ipa_prc_done_idx.vaddr,
+			pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx_ipa_prc_done_idx),
+						memctx));
+	}
+	if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx2_ind_ring_base),
+						memctx));
+	}
+
+	if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			4,
+			pdev->ipa_uc_rx_rsc.
+			rx_ipa_prc_done_idx.vaddr,
+			pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+						 rx_ipa_prc_done_idx),
+						memctx));
+	}
+	return 0;
+}
+#endif /* IPA_OFFLOAD */

+ 935 - 0
core/dp/htt/htt_t2h.c

@@ -0,0 +1,935 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_t2h.c
+ * @brief Provide functions to process target->host HTT messages.
+ * @details
+ *  This file contains functions related to target->host HTT messages.
+ *  There are two categories of functions:
+ *  1.  A function that receives a HTT message from HTC, and dispatches it
+ *      based on the HTT message type.
+ *  2.  functions that provide the info elements from specific HTT messages.
+ */
+
+#include <htc_api.h>            /* HTC_PACKET */
+#include <htt.h>                /* HTT_T2H_MSG_TYPE, etc. */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t */
+
+#include <ol_htt_rx_api.h>
+#include <ol_htt_tx_api.h>
+#include <ol_txrx_htt_api.h>    /* htt_tx_status */
+
+#include <htt_internal.h>       /* HTT_TX_SCHED, etc. */
+#include <pktlog_ac_fmt.h>
+#include <wdi_event.h>
+#include <ol_htt_tx_api.h>
+#include <ol_txrx_types.h>
+/*--- target->host HTT message dispatch function ----------------------------*/
+
+#ifndef DEBUG_CREDIT
+#define DEBUG_CREDIT 0
+#endif
+
+static uint8_t *htt_t2h_mac_addr_deswizzle(uint8_t *tgt_mac_addr,
+					   uint8_t *buffer)
+{
+#ifdef BIG_ENDIAN_HOST
+	/*
+	 * The host endianness is opposite of the target endianness.
+	 * To make uint32_t elements come out correctly, the target->host
+	 * upload has swizzled the bytes in each uint32_t element of the
+	 * message.
+	 * For byte-array message fields like the MAC address, this
+	 * upload swizzling puts the bytes in the wrong order, and needs
+	 * to be undone.
+	 */
+	buffer[0] = tgt_mac_addr[3];
+	buffer[1] = tgt_mac_addr[2];
+	buffer[2] = tgt_mac_addr[1];
+	buffer[3] = tgt_mac_addr[0];
+	buffer[4] = tgt_mac_addr[7];
+	buffer[5] = tgt_mac_addr[6];
+	return buffer;
+#else
+	/*
+	 * The host endianness matches the target endianness -
+	 * we can use the mac addr directly from the message buffer.
+	 */
+	return tgt_mac_addr;
+#endif
+}
+
+static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, cdf_nbuf_t msg)
+{
+	uint32_t *msg_word;
+	unsigned num_msdu_bytes;
+	cdf_nbuf_t msdu;
+	struct htt_host_rx_desc_base *rx_desc;
+	int start_idx;
+	uint8_t *p_fw_msdu_rx_desc = 0;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+	num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
+		*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
+	/*
+	 * 1 word for the message header,
+	 * 1 word to specify the number of MSDU bytes,
+	 * 1 word for every 4 MSDU bytes (round up),
+	 * 1 word for the MPDU range header
+	 */
+	pdev->rx_mpdu_range_offset_words = 3 + ((num_msdu_bytes + 3) >> 2);
+	pdev->rx_ind_msdu_byte_idx = 0;
+
+	p_fw_msdu_rx_desc = ((uint8_t *) (msg_word) +
+			     HTT_ENDIAN_BYTE_IDX_SWAP
+				     (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET));
+
+	/*
+	 * Fix for EV126710, in which BSOD occurs due to last_msdu bit
+	 * not set while the next pointer is deliberately set to NULL
+	 * before calling ol_rx_pn_check_base()
+	 *
+	 * For fragment frames, the HW may not have set the last_msdu bit
+	 * in the rx descriptor, but the SW expects this flag to be set,
+	 * since each fragment is in a separate MPDU. Thus, set the flag here,
+	 * just in case the HW didn't.
+	 */
+	start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
+	msdu = pdev->rx_ring.buf.netbufs_ring[start_idx];
+	cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+	cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
+	rx_desc = htt_rx_desc(msdu);
+	*((uint8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc;
+	rx_desc->msdu_end.last_msdu = 1;
+	cdf_nbuf_map(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
+}
+
+/* Target to host Msg/event  handler  for low priority messages*/
+void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
+{
+	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
+	uint32_t *msg_word;
+	enum htt_t2h_msg_type msg_type;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(htt_t2h_msg);
+	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
+	switch (msg_type) {
+	case HTT_T2H_MSG_TYPE_VERSION_CONF:
+	{
+		pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
+		pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
+		cdf_print
+			("target uses HTT version %d.%d; host uses %d.%d\n",
+			pdev->tgt_ver.major, pdev->tgt_ver.minor,
+			HTT_CURRENT_VERSION_MAJOR,
+			HTT_CURRENT_VERSION_MINOR);
+		if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR)
+			cdf_print
+			      ("*** Incompatible host/target HTT versions!\n");
+		/* abort if the target is incompatible with the host */
+		cdf_assert(pdev->tgt_ver.major ==
+			   HTT_CURRENT_VERSION_MAJOR);
+		if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
+			cdf_print("*** Warning: host/target HTT versions are ");
+			cdf_print(" different, though compatible!\n");
+		}
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_FLUSH:
+	{
+		uint16_t peer_id;
+		uint8_t tid;
+		int seq_num_start, seq_num_end;
+		enum htt_rx_flush_action action;
+
+		peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word);
+		tid = HTT_RX_FLUSH_TID_GET(*msg_word);
+		seq_num_start =
+			HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1));
+		seq_num_end =
+			HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1));
+		action =
+			HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word + 1)) ==
+			1 ? htt_rx_flush_release : htt_rx_flush_discard;
+		ol_rx_flush_handler(pdev->txrx_pdev, peer_id, tid,
+				    seq_num_start, seq_num_end, action);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND:
+	{
+		int msdu_cnt;
+		msdu_cnt =
+			HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word);
+		ol_rx_offload_deliver_ind_handler(pdev->txrx_pdev,
+						  htt_t2h_msg,
+						  msdu_cnt);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_FRAG_IND:
+	{
+		uint16_t peer_id;
+		uint8_t tid;
+
+		peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word);
+		tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word);
+		htt_rx_frag_set_last_msdu(pdev, htt_t2h_msg);
+
+		ol_rx_frag_indication_handler(pdev->txrx_pdev,
+					      htt_t2h_msg,
+					      peer_id, tid);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_ADDBA:
+	{
+		uint16_t peer_id;
+		uint8_t tid;
+		uint8_t win_sz;
+		uint16_t start_seq_num;
+
+		/*
+		 * FOR NOW, the host doesn't need to know the initial
+		 * sequence number for rx aggregation.
+		 * Thus, any value will do - specify 0.
+		 */
+		start_seq_num = 0;
+		peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
+		tid = HTT_RX_ADDBA_TID_GET(*msg_word);
+		win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
+		ol_rx_addba_handler(pdev->txrx_pdev, peer_id, tid,
+				    win_sz, start_seq_num,
+				    0 /* success */);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_DELBA:
+	{
+		uint16_t peer_id;
+		uint8_t tid;
+
+		peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
+		tid = HTT_RX_DELBA_TID_GET(*msg_word);
+		ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_PEER_MAP:
+	{
+		uint8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
+		uint8_t *peer_mac_addr;
+		uint16_t peer_id;
+		uint8_t vdev_id;
+
+		peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
+		vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
+		peer_mac_addr = htt_t2h_mac_addr_deswizzle(
+			(uint8_t *) (msg_word + 1),
+			&mac_addr_deswizzle_buf[0]);
+
+		ol_rx_peer_map_handler(pdev->txrx_pdev, peer_id,
+				       vdev_id, peer_mac_addr,
+				       1 /*can tx */);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+	{
+		uint16_t peer_id;
+		peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
+
+		ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_SEC_IND:
+	{
+		uint16_t peer_id;
+		enum htt_sec_type sec_type;
+		int is_unicast;
+
+		peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
+		sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
+		is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
+		msg_word++;   /* point to the first part of the Michael key */
+		ol_rx_sec_ind_handler(pdev->txrx_pdev, peer_id,
+				      sec_type, is_unicast, msg_word,
+				      msg_word + 2);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
+	{
+		struct htt_mgmt_tx_compl_ind *compl_msg;
+
+		compl_msg =
+			(struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
+
+		ol_tx_single_completion_handler(pdev->txrx_pdev,
+						compl_msg->status,
+						compl_msg->desc_id);
+		HTT_TX_SCHED(pdev);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_STATS_CONF:
+	{
+		uint64_t cookie;
+		uint8_t *stats_info_list;
+
+		cookie = *(msg_word + 1);
+		cookie |= ((uint64_t) (*(msg_word + 2))) << 32;
+
+		stats_info_list = (uint8_t *) (msg_word + 3);
+		ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie,
+					 stats_info_list);
+		break;
+	}
+#ifndef REMOVE_PKT_LOG
+	case HTT_T2H_MSG_TYPE_PKTLOG:
+	{
+		uint32_t *pl_hdr;
+		uint32_t log_type;
+		pl_hdr = (msg_word + 1);
+		log_type =
+			(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
+			ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
+		if ((log_type == PKTLOG_TYPE_TX_CTRL)
+		    || (log_type == PKTLOG_TYPE_TX_STAT)
+		    || (log_type == PKTLOG_TYPE_TX_MSDU_ID)
+		    || (log_type == PKTLOG_TYPE_TX_FRM_HDR)
+		    || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
+			wdi_event_handler(WDI_EVENT_TX_STATUS,
+					  pdev->txrx_pdev, pl_hdr);
+		else if (log_type == PKTLOG_TYPE_RC_FIND)
+			wdi_event_handler(WDI_EVENT_RATE_FIND,
+					  pdev->txrx_pdev, pl_hdr);
+		else if (log_type == PKTLOG_TYPE_RC_UPDATE)
+			wdi_event_handler(WDI_EVENT_RATE_UPDATE,
+					  pdev->txrx_pdev, pl_hdr);
+		else if (log_type == PKTLOG_TYPE_RX_STAT)
+			wdi_event_handler(WDI_EVENT_RX_DESC,
+					  pdev->txrx_pdev, pl_hdr);
+
+		break;
+	}
+#endif
+	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
+	{
+		uint32_t htt_credit_delta_abs;
+		int32_t htt_credit_delta;
+		int sign;
+
+		htt_credit_delta_abs =
+			HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
+		sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
+		htt_credit_delta = sign * htt_credit_delta_abs;
+		ol_tx_credit_completion_handler(pdev->txrx_pdev,
+						htt_credit_delta);
+		break;
+	}
+
+	case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
+	{
+		uint8_t op_code;
+		uint16_t len;
+		uint8_t *op_msg_buffer;
+		uint8_t *msg_start_ptr;
+
+		msg_start_ptr = (uint8_t *) msg_word;
+		op_code =
+			HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
+		msg_word++;
+		len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);
+
+		op_msg_buffer =
+			cdf_mem_malloc(sizeof
+				       (struct htt_wdi_ipa_op_response_t) +
+				       len);
+		if (!op_msg_buffer) {
+			cdf_print("OPCODE messsage buffer alloc fail");
+			break;
+		}
+		cdf_mem_copy(op_msg_buffer,
+			     msg_start_ptr,
+			     sizeof(struct htt_wdi_ipa_op_response_t) +
+			     len);
+		ol_txrx_ipa_uc_op_response(pdev->txrx_pdev,
+					   op_msg_buffer);
+		break;
+	}
+
+	case HTT_T2H_MSG_TYPE_FLOW_POOL_MAP:
+	{
+		uint8_t num_flows;
+		struct htt_flow_pool_map_payload_t *pool_map_payoad;
+
+		num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
+
+		msg_word++;
+		while (num_flows) {
+			pool_map_payoad = (struct htt_flow_pool_map_payload_t *)
+								msg_word;
+			ol_tx_flow_pool_map_handler(pool_map_payoad->flow_id,
+					pool_map_payoad->flow_type,
+					pool_map_payoad->flow_pool_id,
+					pool_map_payoad->flow_pool_size);
+
+			msg_word += (HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
+						 HTT_FLOW_POOL_MAP_HEADER_SZ);
+			num_flows--;
+		}
+		break;
+	}
+
+	case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
+	{
+		struct htt_flow_pool_unmap_t *pool_numap_payload;
+
+		pool_numap_payload = (struct htt_flow_pool_unmap_t *)msg_word;
+		ol_tx_flow_pool_unmap_handler(pool_numap_payload->flow_id,
+					pool_numap_payload->flow_type,
+					pool_numap_payload->flow_pool_id);
+		break;
+	}
+
+	default:
+		break;
+	};
+	/* Free the indication buffer */
+	cdf_nbuf_free(htt_t2h_msg);
+}
+
+/* Generic Target to host Msg/event  handler  for low priority messages
+   Low priority message are handler in a different handler called from
+   this function . So that the most likely succes path like Rx and
+   Tx comp   has little code   foot print
+ */
+void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
+{
+	struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
+	cdf_nbuf_t htt_t2h_msg = (cdf_nbuf_t) pkt->pPktContext;
+	uint32_t *msg_word;
+	enum htt_t2h_msg_type msg_type;
+
+	/* check for successful message reception */
+	if (pkt->Status != A_OK) {
+		if (pkt->Status != A_ECANCELED)
+			pdev->stats.htc_err_cnt++;
+		cdf_nbuf_free(htt_t2h_msg);
+		return;
+	}
+#ifdef HTT_RX_RESTORE
+	if (cdf_unlikely(pdev->rx_ring.rx_reset)) {
+		cdf_print("rx restore ..\n");
+		cdf_nbuf_free(htt_t2h_msg);
+		return;
+	}
+#endif
+
+	/* confirm alignment */
+	HTT_ASSERT3((((unsigned long)cdf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);
+
+	msg_word = (uint32_t *) cdf_nbuf_data(htt_t2h_msg);
+	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
+
+#if defined(HELIUMPLUS_DEBUG)
+	cdf_print("%s %d: msg_word 0x%x msg_type %d\n",
+		  __func__, __LINE__, *msg_word, msg_type);
+#endif
+
+	switch (msg_type) {
+	case HTT_T2H_MSG_TYPE_RX_IND:
+	{
+		unsigned num_mpdu_ranges;
+		unsigned num_msdu_bytes;
+		uint16_t peer_id;
+		uint8_t tid;
+
+		if (cdf_unlikely(pdev->cfg.is_full_reorder_offload)) {
+			cdf_print("HTT_T2H_MSG_TYPE_RX_IND not supported ");
+			cdf_print("with full reorder offload\n");
+			break;
+		}
+		peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
+		tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
+
+		num_msdu_bytes =
+			HTT_RX_IND_FW_RX_DESC_BYTES_GET(
+				*(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
+		/*
+		 * 1 word for the message header,
+		 * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc
+		 * 1 word to specify the number of MSDU bytes,
+		 * 1 word for every 4 MSDU bytes (round up),
+		 * 1 word for the MPDU range header
+		 */
+		pdev->rx_mpdu_range_offset_words =
+			(HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3) >> 2;
+		num_mpdu_ranges =
+			HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
+		pdev->rx_ind_msdu_byte_idx = 0;
+
+		ol_rx_indication_handler(pdev->txrx_pdev,
+					 htt_t2h_msg, peer_id,
+					 tid, num_mpdu_ranges);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+	{
+		int num_msdus;
+		enum htt_tx_status status;
+
+		/* status - no enum translation needed */
+		status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
+		num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
+		if (num_msdus & 0x1) {
+			struct htt_tx_compl_ind_base *compl =
+				(void *)msg_word;
+
+			/*
+			 * Host CPU endianness can be different from FW CPU.
+			 * This can result in even and odd MSDU IDs being
+			 * switched. If this happens, copy the switched final
+			 * odd MSDU ID from location payload[size], to
+			 * location payload[size-1], where the message
+			 * handler function expects to find it
+			 */
+			if (compl->payload[num_msdus] !=
+			    HTT_TX_COMPL_INV_MSDU_ID) {
+				compl->payload[num_msdus - 1] =
+					compl->payload[num_msdus];
+			}
+		}
+		ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
+					 status, msg_word + 1);
+		HTT_TX_SCHED(pdev);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_PN_IND:
+	{
+		uint16_t peer_id;
+		uint8_t tid, pn_ie_cnt, *pn_ie = NULL;
+		int seq_num_start, seq_num_end;
+
+		/*First dword */
+		peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
+		tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
+
+		msg_word++;
+		/*Second dword */
+		seq_num_start =
+			HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
+		seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
+		pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
+
+		msg_word++;
+		/*Third dword */
+		if (pn_ie_cnt)
+			pn_ie = (uint8_t *) msg_word;
+
+		ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
+				     seq_num_start, seq_num_end,
+				     pn_ie_cnt, pn_ie);
+
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+	{
+		int num_msdus;
+
+		num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
+		if (num_msdus & 0x1) {
+			struct htt_tx_compl_ind_base *compl =
+				(void *)msg_word;
+
+			/*
+			 * Host CPU endianness can be different from FW CPU.
+			 * This can result in even and odd MSDU IDs being
+			 * switched. If this happens, copy the switched final
+			 * odd MSDU ID from location payload[size], to
+			 * location payload[size-1], where the message handler
+			 * function expects to find it
+			 */
+			if (compl->payload[num_msdus] !=
+			    HTT_TX_COMPL_INV_MSDU_ID) {
+				compl->payload[num_msdus - 1] =
+					compl->payload[num_msdus];
+			}
+		}
+		ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus,
+				      msg_word + 1);
+		HTT_TX_SCHED(pdev);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
+	{
+		uint16_t peer_id;
+		uint8_t tid;
+		uint8_t offload_ind, frag_ind;
+
+		if (cdf_unlikely(!pdev->cfg.is_full_reorder_offload)) {
+			cdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not ");
+			cdf_print("supported when full reorder offload is ");
+			cdf_print("disabled in the configuration.\n");
+			break;
+		}
+
+		peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
+		tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
+		offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
+		frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
+
+#if defined(HELIUMPLUS_DEBUG)
+		cdf_print("%s %d: peerid %d tid %d offloadind %d fragind %d\n",
+			  __func__, __LINE__, peer_id, tid, offload_ind,
+			  frag_ind);
+#endif
+		if (cdf_unlikely(frag_ind)) {
+			ol_rx_frag_indication_handler(pdev->txrx_pdev,
+						      htt_t2h_msg,
+						      peer_id, tid);
+			break;
+		}
+
+		ol_rx_in_order_indication_handler(pdev->txrx_pdev,
+						  htt_t2h_msg, peer_id,
+						  tid, offload_ind);
+		break;
+	}
+
+	default:
+		htt_t2h_lp_msg_handler(context, htt_t2h_msg);
+		return;
+
+	};
+
+	/* Free the indication buffer */
+	cdf_nbuf_free(htt_t2h_msg);
+}
+
+/*--- target->host HTT message Info Element access methods ------------------*/
+
+/*--- tx completion message ---*/
+
+uint16_t htt_tx_compl_desc_id(void *iterator, int num)
+{
+	/*
+	 * The MSDU IDs are packed , 2 per 32-bit word.
+	 * Iterate on them as an array of 16-bit elements.
+	 * This will work fine if the host endianness matches
+	 * the target endianness.
+	 * If the host endianness is opposite of the target's,
+	 * this iterator will produce descriptor IDs in a different
+	 * order than the target inserted them into the message -
+	 * if the target puts in [0, 1, 2, 3, ...] the host will
+	 * put out [1, 0, 3, 2, ...].
+	 * This is fine, except for the last ID if there are an
+	 * odd number of IDs.  But the TX_COMPL_IND handling code
+	 * in the htt_t2h_msg_handler already added a duplicate
+	 * of the final ID, if there were an odd number of IDs,
+	 * so this function can safely treat the IDs as an array
+	 * of 16-bit elements.
+	 */
+	return *(((uint16_t *) iterator) + num);
+}
+
+/*--- rx indication message ---*/
+
+int htt_rx_ind_flush(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+	return HTT_RX_IND_FLUSH_VALID_GET(*msg_word);
+}
+
+void
+htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
+			       cdf_nbuf_t rx_ind_msg,
+			       unsigned *seq_num_start, unsigned *seq_num_end)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+	msg_word++;
+	*seq_num_start = HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
+	*seq_num_end = HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
+}
+
+int htt_rx_ind_release(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+	return HTT_RX_IND_REL_VALID_GET(*msg_word);
+}
+
+void
+htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
+				 cdf_nbuf_t rx_ind_msg,
+				 unsigned *seq_num_start, unsigned *seq_num_end)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+	msg_word++;
+	*seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word);
+	*seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word);
+}
+
+void
+htt_rx_ind_mpdu_range_info(struct htt_pdev_t *pdev,
+			   cdf_nbuf_t rx_ind_msg,
+			   int mpdu_range_num,
+			   enum htt_rx_status *status, int *mpdu_count)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+	msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num;
+	*status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word);
+	*mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word);
+}
+
+/**
+ * htt_rx_ind_rssi_dbm() - Return the RSSI provided in a rx indication message.
+ *
+ * @pdev:       the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ *
+ * Return the RSSI from an rx indication message, in dBm units.
+ *
+ * Return: RSSI in dBm, or HTT_INVALID_RSSI
+ */
+int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+	int8_t rssi;
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *)
+		   (cdf_nbuf_data(rx_ind_msg) +
+		    HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+	/* check if the RX_IND message contains valid rx PPDU start info */
+	if (!HTT_RX_IND_START_VALID_GET(*msg_word))
+		return HTT_RSSI_INVALID;
+
+	rssi = HTT_RX_IND_RSSI_CMB_GET(*msg_word);
+	return (HTT_TGT_RSSI_INVALID == rssi) ?
+	       HTT_RSSI_INVALID : rssi;
+}
+
+/**
+ * htt_rx_ind_rssi_dbm_chain() - Return the RSSI for a chain provided in a rx
+ *              indication message.
+ * @pdev:       the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ * @chain:      the index of the chain (0-4)
+ *
+ * Return the RSSI for a chain from an rx indication message, in dBm units.
+ *
+ * Return: RSSI, or HTT_INVALID_RSSI
+ */
+int16_t
+htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+		      int8_t chain)
+{
+	int8_t rssi;
+	uint32_t *msg_word;
+
+	if (chain < 0 || chain > 3)
+		return HTT_RSSI_INVALID;
+
+	msg_word = (uint32_t *)
+		(cdf_nbuf_data(rx_ind_msg) +
+		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+	/* check if the RX_IND message contains valid rx PPDU start info */
+	if (!HTT_RX_IND_START_VALID_GET(*msg_word))
+		return HTT_RSSI_INVALID;
+
+	msg_word += 1 + chain;
+
+	rssi = HTT_RX_IND_RSSI_PRI20_GET(*msg_word);
+	return (HTT_TGT_RSSI_INVALID == rssi) ?
+		HTT_RSSI_INVALID :
+		rssi;
+}
+
+/**
+ * htt_rx_ind_legacy_rate() - Return the data rate
+ * @pdev:        the HTT instance the rx data was received on
+ * @rx_ind_msg:  the netbuf containing the rx indication message
+ * @legacy_rate: (output) the data rate
+ *      The legacy_rate parameter's value depends on the
+ *      legacy_rate_sel value.
+ *      If legacy_rate_sel is 0:
+ *              0x8: OFDM 48 Mbps
+ *              0x9: OFDM 24 Mbps
+ *              0xA: OFDM 12 Mbps
+ *              0xB: OFDM 6 Mbps
+ *              0xC: OFDM 54 Mbps
+ *              0xD: OFDM 36 Mbps
+ *              0xE: OFDM 18 Mbps
+ *              0xF: OFDM 9 Mbps
+ *      If legacy_rate_sel is 1:
+ *              0x8: CCK 11 Mbps long preamble
+ *              0x9: CCK 5.5 Mbps long preamble
+ *              0xA: CCK 2 Mbps long preamble
+ *              0xB: CCK 1 Mbps long preamble
+ *              0xC: CCK 11 Mbps short preamble
+ *              0xD: CCK 5.5 Mbps short preamble
+ *              0xE: CCK 2 Mbps short preamble
+ *      -1 on error.
+ * @legacy_rate_sel: (output) 0 to indicate OFDM, 1 to indicate CCK.
+ *      -1 on error.
+ *
+ * Return the data rate provided in a rx indication message.
+ */
+void
+htt_rx_ind_legacy_rate(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+		       uint8_t *legacy_rate, uint8_t *legacy_rate_sel)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *)
+		(cdf_nbuf_data(rx_ind_msg) +
+		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+	/* check if the RX_IND message contains valid rx PPDU start info */
+	if (!HTT_RX_IND_START_VALID_GET(*msg_word)) {
+		*legacy_rate = -1;
+		*legacy_rate_sel = -1;
+		return;
+	}
+
+	*legacy_rate = HTT_RX_IND_LEGACY_RATE_GET(*msg_word);
+	*legacy_rate_sel = HTT_RX_IND_LEGACY_RATE_SEL_GET(*msg_word);
+}
+
+/**
+ * htt_rx_ind_timestamp() - Return the timestamp
+ * @pdev:                  the HTT instance the rx data was received on
+ * @rx_ind_msg:            the netbuf containing the rx indication message
+ * @timestamp_microsec:    (output) the timestamp to microsecond resolution.
+ *                         -1 on error.
+ * @timestamp_submicrosec: the submicrosecond portion of the
+ *                         timestamp. -1 on error.
+ *
+ * Return the timestamp provided in a rx indication message.
+ */
+void
+htt_rx_ind_timestamp(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+		     uint32_t *timestamp_microsec,
+		     uint8_t *timestamp_submicrosec)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *)
+		(cdf_nbuf_data(rx_ind_msg) +
+		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+	/* check if the RX_IND message contains valid rx PPDU start info */
+	if (!HTT_RX_IND_END_VALID_GET(*msg_word)) {
+		*timestamp_microsec = -1;
+		*timestamp_submicrosec = -1;
+		return;
+	}
+
+	*timestamp_microsec = *(msg_word + 6);
+	*timestamp_submicrosec =
+		HTT_RX_IND_TIMESTAMP_SUBMICROSEC_GET(*msg_word);
+}
+
+#define INVALID_TSF -1
+/**
+ * htt_rx_ind_tsf32() - Return the TSF timestamp
+ * @pdev:       the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ *
+ * Return the TSF timestamp provided in a rx indication message.
+ *
+ * Return: TSF timestamp
+ */
+uint32_t
+htt_rx_ind_tsf32(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *)
+		(cdf_nbuf_data(rx_ind_msg) +
+		 HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+	/* check if the RX_IND message contains valid rx PPDU start info */
+	if (!HTT_RX_IND_END_VALID_GET(*msg_word))
+		return INVALID_TSF;
+
+	return *(msg_word + 5);
+}
+
+/**
+ * htt_rx_ind_ext_tid() - Return the extended traffic ID provided in a rx indication message.
+ * @pdev:       the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ *
+ * Return the extended traffic ID in a rx indication message.
+ *
+ * Return: Extended TID
+ */
+uint8_t
+htt_rx_ind_ext_tid(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *)
+		(cdf_nbuf_data(rx_ind_msg));
+
+	return HTT_RX_IND_EXT_TID_GET(*msg_word);
+}
+
+/*--- stats confirmation message ---*/
+
+void
+htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
+			    enum htt_dbg_stats_type *type,
+			    enum htt_dbg_stats_status *status,
+			    int *length, uint8_t **stats_data)
+{
+	uint32_t *msg_word = (uint32_t *) stats_info_list;
+	*type = HTT_T2H_STATS_CONF_TLV_TYPE_GET(*msg_word);
+	*status = HTT_T2H_STATS_CONF_TLV_STATUS_GET(*msg_word);
+	*length = HTT_T2H_STATS_CONF_TLV_HDR_SIZE +     /* header length */
+		HTT_T2H_STATS_CONF_TLV_LENGTH_GET(*msg_word); /* data len */
+	*stats_data = stats_info_list + HTT_T2H_STATS_CONF_TLV_HDR_SIZE;
+}
+
+void
+htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
+				    cdf_nbuf_t rx_frag_ind_msg,
+				    int *seq_num_start, int *seq_num_end)
+{
+	uint32_t *msg_word;
+
+	msg_word = (uint32_t *) cdf_nbuf_data(rx_frag_ind_msg);
+	msg_word++;
+	*seq_num_start = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
+	*seq_num_end = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
+}

+ 864 - 0
core/dp/htt/htt_tx.c

@@ -0,0 +1,864 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_tx.c
+ * @brief Implement transmit aspects of HTT.
+ * @details
+ *  This file contains three categories of HTT tx code:
+ *  1.  An abstraction of the tx descriptor, to hide the
+ *      differences between the HL vs. LL tx descriptor.
+ *  2.  Functions for allocating and freeing HTT tx descriptors.
+ *  3.  The function that accepts a tx frame from txrx and sends the
+ *      tx frame to HTC.
+ */
+#include <osdep.h>              /* uint32_t, offsetof, etc. */
+#include <cdf_types.h>          /* cdf_dma_addr_t */
+#include <cdf_memory.h>         /* cdf_os_mem_alloc_consistent et al */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t, etc. */
+#include <cdf_time.h>           /* cdf_mdelay */
+
+#include <htt.h>                /* htt_tx_msdu_desc_t */
+#include <htc.h>                /* HTC_HDR_LENGTH */
+#include <htc_api.h>            /* htc_flush_surprise_remove */
+#include <ol_cfg.h>             /* ol_cfg_netbuf_frags_max, etc. */
+#include <ol_htt_tx_api.h>      /* HTT_TX_DESC_VADDR_OFFSET */
+#include <ol_txrx_htt_api.h>    /* ol_tx_msdu_id_storage */
+#include <htt_internal.h>
+
+/* IPA Micro controler TX data packet HTT Header Preset */
+/* 31 | 30  29 | 28 | 27 | 26  22  | 21   16 | 15  13   | 12  8      | 7 0
+   *----------------------------------------------------------------------------
+ * R  | CS  OL | R  | PP | ext TID | vdev ID | pkt type | pkt subtyp | msg type
+ * 0  | 0      | 0  |    | 0x1F    | 0       | 2        | 0          | 0x01
+ ***----------------------------------------------------------------------------
+ * pkt ID                                    | pkt length
+ ***----------------------------------------------------------------------------
+ *                                frag_desc_ptr
+ ***----------------------------------------------------------------------------
+ *                                   peer_id
+ ***----------------------------------------------------------------------------
+ */
+#define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
+
+/*--- setup / tear-down functions -------------------------------------------*/
+
+#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
+uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
+#endif
+
+int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
+{
+	int i, pool_size;
+	uint32_t **p;
+	cdf_dma_addr_t pool_paddr;
+
+#if defined(HELIUMPLUS_PADDR64)
+	pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
+
+	if (HTT_WIFI_IP_VERSION(pdev->wifi_ip_ver.major, 0x2)) {
+		/*
+		* sizeof MSDU_EXT/Fragmentation descriptor.
+		*/
+		pdev->frag_descs.size = sizeof(struct msdu_ext_desc_t);
+	} else {
+		/*
+		 * Add the fragmentation descriptor elements.
+		 * Add the most that the OS may deliver, plus one more
+		 * in case the txrx code adds a prefix fragment (for
+		 * TSO or audio interworking SNAP header)
+		 */
+		pdev->frag_descs.size =
+			(ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8
+			+ 4;
+	}
+#else /* ! defined(HELIUMPLUS_PADDR64) */
+	/*
+	 * Start with the size of the base struct
+	 * that actually gets downloaded.
+	 *
+	 * Add the fragmentation descriptor elements.
+	 * Add the most that the OS may deliver, plus one more
+	 * in case the txrx code adds a prefix fragment (for
+	 * TSO or audio interworking SNAP header)
+	 */
+	pdev->tx_descs.size =
+		sizeof(struct htt_host_tx_desc_t)
+		+ (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
+		   /* 2x uint32_t */
+		+ 4; /* uint32_t fragmentation list terminator */
+
+	if (pdev->tx_descs.size < sizeof(uint32_t *))
+		pdev->tx_descs.size = sizeof(uint32_t *);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+	/*
+	 * Make sure tx_descs.size is a multiple of 4-bytes.
+	 * It should be, but round up just to be sure.
+	 */
+	pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
+
+	pdev->tx_descs.pool_elems = desc_pool_elems;
+	pdev->tx_descs.alloc_cnt = 0;
+
+	pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
+
+	pdev->tx_descs.pool_vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev, pool_size,
+			&pool_paddr,
+			cdf_get_dma_mem_context((&pdev->tx_descs), memctx));
+
+	pdev->tx_descs.pool_paddr = pool_paddr;
+
+	if (!pdev->tx_descs.pool_vaddr)
+		return -ENOBUFS;       /* failure */
+
+	cdf_print("%s:htt_desc_start:0x%p htt_desc_end:0x%p\n", __func__,
+		  pdev->tx_descs.pool_vaddr,
+		  (uint32_t *) (pdev->tx_descs.pool_vaddr + pool_size));
+
+#if defined(HELIUMPLUS_PADDR64)
+	pdev->frag_descs.pool_elems = desc_pool_elems;
+	/*
+	 * Allocate space for MSDU extension descriptor
+	 * H/W expects this in contiguous memory
+	 */
+	pool_size = pdev->frag_descs.pool_elems * pdev->frag_descs.size;
+
+	pdev->frag_descs.pool_vaddr = cdf_os_mem_alloc_consistent(
+		pdev->osdev, pool_size, &pool_paddr,
+		cdf_get_dma_mem_context((&pdev->frag_descs), memctx));
+
+	if (!pdev->frag_descs.pool_vaddr)
+		return -ENOBUFS; /* failure */
+
+	pdev->frag_descs.pool_paddr = pool_paddr;
+
+	cdf_print("%s:MSDU Ext.Table Start:0x%p MSDU Ext.Table End:0x%p\n",
+		  __func__, pdev->frag_descs.pool_vaddr,
+		  (u_int32_t *) (pdev->frag_descs.pool_vaddr + pool_size));
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
+	g_dbg_htt_desc_end_addr = (uint32_t *)
+				  (pdev->tx_descs.pool_vaddr + pool_size);
+	g_dbg_htt_desc_start_addr = (uint32_t *) pdev->tx_descs.pool_vaddr;
+#endif
+
+	/* link tx descriptors into a freelist */
+	pdev->tx_descs.freelist = (uint32_t *) pdev->tx_descs.pool_vaddr;
+	p = (uint32_t **) pdev->tx_descs.freelist;
+	for (i = 0; i < desc_pool_elems - 1; i++) {
+		*p = (uint32_t *) (((char *)p) + pdev->tx_descs.size);
+		p = (uint32_t **) *p;
+	}
+	*p = NULL;
+
+	return 0;               /* success */
+}
+
+void htt_tx_detach(struct htt_pdev_t *pdev)
+{
+	if (pdev) {
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			/* pool_size */
+			pdev->tx_descs.pool_elems * pdev->tx_descs.size,
+			pdev->tx_descs.pool_vaddr,
+			pdev->tx_descs.pool_paddr,
+			cdf_get_dma_mem_context((&pdev->tx_descs), memctx));
+#if defined(HELIUMPLUS_PADDR64)
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			/* pool_size */
+			pdev->frag_descs.pool_elems *
+			pdev->frag_descs.size,
+			pdev->frag_descs.pool_vaddr,
+			pdev->frag_descs.pool_paddr,
+			cdf_get_dma_mem_context((&pdev->frag_descs), memctx));
+#endif /* defined(HELIUMPLUS_PADDR64) */
+	}
+}
+
+/*--- descriptor allocation functions ---------------------------------------*/
+
+void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo)
+{
+	struct htt_host_tx_desc_t *htt_host_tx_desc;    /* includes HTC hdr */
+	struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include  HTC hdr */
+	uint16_t index;
+	uint32_t *fragmentation_descr_field_ptr;
+
+	htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
+	if (!htt_host_tx_desc)
+		return NULL;    /* pool is exhausted */
+
+	htt_tx_desc = &htt_host_tx_desc->align32.tx_desc;
+
+	if (pdev->tx_descs.freelist) {
+		pdev->tx_descs.freelist =
+			*((uint32_t **) pdev->tx_descs.freelist);
+		pdev->tx_descs.alloc_cnt++;
+	}
+	/*
+	 * For LL, set up the fragmentation descriptor address.
+	 * Currently, this HTT tx desc allocation is performed once up front.
+	 * If this is changed to have the allocation done during tx, then it
+	 * would be helpful to have separate htt_tx_desc_alloc functions for
+	 * HL vs. LL, to remove the below conditional branch.
+	 */
+	fragmentation_descr_field_ptr = (uint32_t *)
+		((uint32_t *) htt_tx_desc) +
+		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
+
+	index = ((char *)htt_host_tx_desc -
+		 (char *)(((struct htt_host_tx_desc_t *)
+			   pdev->tx_descs.pool_vaddr))) /
+		pdev->tx_descs.size;
+	/*
+	 * The fragmentation descriptor is allocated from consistent
+	 * memory. Therefore, we can use the address directly rather
+	 * than having to map it from a virtual/CPU address to a
+	 * physical/bus address.
+	 */
+#if defined(HELIUMPLUS_PADDR64)
+#if HTT_PADDR64
+	/* this is: frags_desc_ptr.lo */
+	*fragmentation_descr_field_ptr = (uint32_t)
+		(pdev->frag_descs.pool_paddr +
+		 (pdev->frag_descs.size * index));
+	fragmentation_descr_field_ptr++;
+	/* frags_desc_ptr.hi */
+	*fragmentation_descr_field_ptr = 0;
+#else /* ! HTT_PADDR64 */
+	*fragmentation_descr_field_ptr = (uint32_t)
+		(pdev->frag_descs.pool_paddr +
+		 (pdev->frag_descs.size * index));
+	cdf_print("%s %d: i %d frag_paddr 0x%x\n",
+		  __func__, __LINE__, index,
+		  (*fragmentation_descr_field_ptr));
+#endif /* HTT_PADDR64 */
+#else /* !HELIUMPLUS_PADDR64 */
+	*fragmentation_descr_field_ptr =
+		HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
+#endif /* HELIUMPLUS_PADDR64 */
+
+	/*
+	 * Include the headroom for the HTC frame header when specifying the
+	 * physical address for the HTT tx descriptor.
+	 */
+	*paddr_lo = (uint32_t) HTT_TX_DESC_PADDR(pdev, htt_host_tx_desc);
+	/*
+	 * The allocated tx descriptor space includes headroom for a
+	 * HTC frame header.  Hide this headroom, so that we don't have
+	 * to jump past the headroom each time we program a field within
+	 * the tx desc, but only once when we download the tx desc (and
+	 * the headroom) to the target via HTC.
+	 * Skip past the headroom and return the address of the HTT tx desc.
+	 */
+	return (void *)htt_tx_desc;
+}
+
+void htt_tx_desc_free(htt_pdev_handle pdev, void *tx_desc)
+{
+	char *htt_host_tx_desc = tx_desc;
+	/* rewind over the HTC frame header space */
+	htt_host_tx_desc -=
+		offsetof(struct htt_host_tx_desc_t, align32.tx_desc);
+	*((uint32_t **) htt_host_tx_desc) = pdev->tx_descs.freelist;
+	pdev->tx_descs.freelist = (uint32_t *) htt_host_tx_desc;
+	pdev->tx_descs.alloc_cnt--;
+}
+
+/*--- descriptor field access methods ---------------------------------------*/
+
+void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
+				 void *htt_tx_desc,
+				 uint32_t paddr,
+				 uint32_t frag_desc_paddr_lo,
+				 int reset)
+{
+	uint32_t *fragmentation_descr_field_ptr;
+
+	fragmentation_descr_field_ptr = (uint32_t *)
+		((uint32_t *) htt_tx_desc) +
+		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
+	if (reset) {
+#if defined(HELIUMPLUS_PADDR64)
+		*fragmentation_descr_field_ptr = frag_desc_paddr_lo;
+#else
+		*fragmentation_descr_field_ptr =
+			HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
+#endif
+	} else {
+		*fragmentation_descr_field_ptr = paddr;
+	}
+}
+
+#if defined(HELIUMPLUS_PADDR64)
+void *
+htt_tx_frag_alloc(htt_pdev_handle pdev,
+		  u_int16_t index,
+		  u_int32_t *frag_paddr_lo)
+{
+	/** Index should never be 0, since its used by the hardware
+	    to terminate the link. */
+	if (index >= pdev->tx_descs.pool_elems)
+		return NULL;
+
+	*frag_paddr_lo = (uint32_t)
+		(pdev->frag_descs.pool_paddr + (pdev->frag_descs.size * index));
+
+	return ((char *) pdev->frag_descs.pool_vaddr) +
+		(pdev->frag_descs.size * index);
+}
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+/* PUT THESE AS INLINE IN ol_htt_tx_api.h */
+
+void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)
+{
+}
+
+void htt_tx_pending_discard(htt_pdev_handle pdev)
+{
+	htc_flush_surprise_remove(pdev->htc_pdev);
+}
+
+void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc)
+{
+}
+
+/*--- tx send function ------------------------------------------------------*/
+
+#ifdef ATH_11AC_TXCOMPACT
+
+/* Scheduling the Queued packets in HTT which could not be sent out
+   because of No CE desc*/
+void htt_tx_sched(htt_pdev_handle pdev)
+{
+	cdf_nbuf_t msdu;
+	int download_len = pdev->download_len;
+	int packet_len;
+
+	HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
+	while (msdu != NULL) {
+		int not_accepted;
+		/* packet length includes HTT tx desc frag added above */
+		packet_len = cdf_nbuf_len(msdu);
+		if (packet_len < download_len) {
+			/*
+			 * This case of packet length being less than the
+			 * nominal download length can happen for a couple
+			 * of reasons:
+			 * In HL, the nominal download length is a large
+			 * artificial value.
+			 * In LL, the frame may not have the optional header
+			 * fields accounted for in the nominal download size
+			 * (LLC/SNAP header, IPv4 or IPv6 header).
+			 */
+			download_len = packet_len;
+		}
+
+		not_accepted =
+			htc_send_data_pkt(pdev->htc_pdev, msdu,
+					  pdev->htc_endpoint,
+					  download_len);
+		if (not_accepted) {
+			HTT_TX_NBUF_QUEUE_INSERT_HEAD(pdev, msdu);
+			return;
+		}
+		HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
+	}
+}
+
+int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
+{
+
+	int download_len = pdev->download_len;
+
+	int packet_len;
+
+	/* packet length includes HTT tx desc frag added above */
+	packet_len = cdf_nbuf_len(msdu);
+	if (packet_len < download_len) {
+		/*
+		 * This case of packet length being less than the nominal
+		 * download length can happen for a couple of reasons:
+		 * In HL, the nominal download length is a large artificial
+		 * value.
+		 * In LL, the frame may not have the optional header fields
+		 * accounted for in the nominal download size (LLC/SNAP header,
+		 * IPv4 or IPv6 header).
+		 */
+		download_len = packet_len;
+	}
+
+	NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
+	DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
+				(uint8_t *)(cdf_nbuf_data(msdu)),
+				sizeof(cdf_nbuf_data(msdu))));
+	if (cdf_nbuf_queue_len(&pdev->txnbufq) > 0) {
+		HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
+		htt_tx_sched(pdev);
+		return 0;
+	}
+
+	cdf_nbuf_trace_update(msdu, "HT:T:");
+	if (htc_send_data_pkt
+		    (pdev->htc_pdev, msdu, pdev->htc_endpoint, download_len)) {
+		HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
+	}
+
+	return 0;               /* success */
+
+}
+
+cdf_nbuf_t
+htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
+{
+	cdf_print("*** %s curently only applies for HL systems\n", __func__);
+	cdf_assert(0);
+	return head_msdu;
+
+}
+
+int
+htt_tx_send_nonstd(htt_pdev_handle pdev,
+		   cdf_nbuf_t msdu,
+		   uint16_t msdu_id, enum htt_pkt_type pkt_type)
+{
+	int download_len;
+
+	/*
+	 * The pkt_type could be checked to see what L2 header type is present,
+	 * and then the L2 header could be examined to determine its length.
+	 * But for simplicity, just use the maximum possible header size,
+	 * rather than computing the actual header size.
+	 */
+	download_len = sizeof(struct htt_host_tx_desc_t)
+		+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+		+ HTT_TX_HDR_SIZE_802_1Q
+		+ HTT_TX_HDR_SIZE_LLC_SNAP
+		+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
+	cdf_assert(download_len <= pdev->download_len);
+	return htt_tx_send_std(pdev, msdu, msdu_id);
+}
+
+#else                           /*ATH_11AC_TXCOMPACT */
+
+#ifdef QCA_TX_HTT2_SUPPORT
+static inline HTC_ENDPOINT_ID
+htt_tx_htt2_get_ep_id(htt_pdev_handle pdev, cdf_nbuf_t msdu)
+{
+	/*
+	 * TX HTT2 service mainly for small sized frame and check if
+	 * this candidate frame allow or not.
+	 */
+	if ((pdev->htc_tx_htt2_endpoint != ENDPOINT_UNUSED) &&
+	    cdf_nbuf_get_tx_parallel_dnload_frm(msdu) &&
+	    (cdf_nbuf_len(msdu) < pdev->htc_tx_htt2_max_size))
+		return pdev->htc_tx_htt2_endpoint;
+	else
+		return pdev->htc_endpoint;
+}
+#else
+#define htt_tx_htt2_get_ep_id(pdev, msdu)     (pdev->htc_endpoint)
+#endif /* QCA_TX_HTT2_SUPPORT */
+
+static inline int
+htt_tx_send_base(htt_pdev_handle pdev,
+		 cdf_nbuf_t msdu,
+		 uint16_t msdu_id, int download_len, uint8_t more_data)
+{
+	struct htt_host_tx_desc_t *htt_host_tx_desc;
+	struct htt_htc_pkt *pkt;
+	int packet_len;
+	HTC_ENDPOINT_ID ep_id;
+
+	/*
+	 * The HTT tx descriptor was attached as the prefix fragment to the
+	 * msdu netbuf during the call to htt_tx_desc_init.
+	 * Retrieve it so we can provide its HTC header space to HTC.
+	 */
+	htt_host_tx_desc = (struct htt_host_tx_desc_t *)
+			   cdf_nbuf_get_frag_vaddr(msdu, 0);
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return -ENOBUFS;       /* failure */
+
+	pkt->msdu_id = msdu_id;
+	pkt->pdev_ctxt = pdev->txrx_pdev;
+
+	/* packet length includes HTT tx desc frag added above */
+	packet_len = cdf_nbuf_len(msdu);
+	if (packet_len < download_len) {
+		/*
+		 * This case of packet length being less than the nominal
+		 * download length can happen for a couple reasons:
+		 * In HL, the nominal download length is a large artificial
+		 * value.
+		 * In LL, the frame may not have the optional header fields
+		 * accounted for in the nominal download size (LLC/SNAP header,
+		 * IPv4 or IPv6 header).
+		 */
+		download_len = packet_len;
+	}
+
+	ep_id = htt_tx_htt2_get_ep_id(pdev, msdu);
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       pdev->tx_send_complete_part2,
+			       (unsigned char *)htt_host_tx_desc,
+			       download_len - HTC_HDR_LENGTH,
+			       ep_id,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msdu);
+
+	cdf_nbuf_trace_update(msdu, "HT:T:");
+	NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
+	DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
+				(uint8_t *)(cdf_nbuf_data(msdu)),
+				sizeof(cdf_nbuf_data(msdu))));
+	htc_send_data_pkt(pdev->htc_pdev, &pkt->htc_pkt, more_data);
+
+	return 0;               /* success */
+}
+
+cdf_nbuf_t
+htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
+{
+	cdf_nbuf_t rejected = NULL;
+	uint16_t *msdu_id_storage;
+	uint16_t msdu_id;
+	cdf_nbuf_t msdu;
+	/*
+	 * FOR NOW, iterate through the batch, sending the frames singly.
+	 * Eventually HTC and HIF should be able to accept a batch of
+	 * data frames rather than singles.
+	 */
+	msdu = head_msdu;
+	while (num_msdus--) {
+		cdf_nbuf_t next_msdu = cdf_nbuf_next(msdu);
+		msdu_id_storage = ol_tx_msdu_id_storage(msdu);
+		msdu_id = *msdu_id_storage;
+
+		/* htt_tx_send_base returns 0 as success and 1 as failure */
+		if (htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len,
+				     num_msdus)) {
+			cdf_nbuf_set_next(msdu, rejected);
+			rejected = msdu;
+		}
+		msdu = next_msdu;
+	}
+	return rejected;
+}
+
+int
+htt_tx_send_nonstd(htt_pdev_handle pdev,
+		   cdf_nbuf_t msdu,
+		   uint16_t msdu_id, enum htt_pkt_type pkt_type)
+{
+	int download_len;
+
+	/*
+	 * The pkt_type could be checked to see what L2 header type is present,
+	 * and then the L2 header could be examined to determine its length.
+	 * But for simplicity, just use the maximum possible header size,
+	 * rather than computing the actual header size.
+	 */
+	download_len = sizeof(struct htt_host_tx_desc_t)
+		+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX      /* worst case */
+		+ HTT_TX_HDR_SIZE_802_1Q
+		+ HTT_TX_HDR_SIZE_LLC_SNAP
+		+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
+	return htt_tx_send_base(pdev, msdu, msdu_id, download_len, 0);
+}
+
+int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
+{
+	return htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len, 0);
+}
+
+#endif /*ATH_11AC_TXCOMPACT */
+#ifdef HTT_DBG
+void htt_tx_desc_display(void *tx_desc)
+{
+	struct htt_tx_msdu_desc_t *htt_tx_desc;
+
+	htt_tx_desc = (struct htt_tx_msdu_desc_t *)tx_desc;
+
+	/* only works for little-endian */
+	cdf_print("HTT tx desc (@ %p):\n", htt_tx_desc);
+	cdf_print("  msg type = %d\n", htt_tx_desc->msg_type);
+	cdf_print("  pkt subtype = %d\n", htt_tx_desc->pkt_subtype);
+	cdf_print("  pkt type = %d\n", htt_tx_desc->pkt_type);
+	cdf_print("  vdev ID = %d\n", htt_tx_desc->vdev_id);
+	cdf_print("  ext TID = %d\n", htt_tx_desc->ext_tid);
+	cdf_print("  postponed = %d\n", htt_tx_desc->postponed);
+#if HTT_PADDR64
+	cdf_print("  reserved_dword0_bits28 = %d\n", htt_tx_desc->reserved_dword0_bits28);
+	cdf_print("  cksum_offload = %d\n", htt_tx_desc->cksum_offload);
+	cdf_print("  tx_compl_req= %d\n", htt_tx_desc->tx_compl_req);
+#else /* !HTT_PADDR64 */
+	cdf_print("  batch more = %d\n", htt_tx_desc->more_in_batch);
+#endif /* HTT_PADDR64 */
+	cdf_print("  length = %d\n", htt_tx_desc->len);
+	cdf_print("  id = %d\n", htt_tx_desc->id);
+#if HTT_PADDR64
+	cdf_print("  frag desc addr.lo = %#x\n",
+		  htt_tx_desc->frags_desc_ptr.lo);
+	cdf_print("  frag desc addr.hi = %#x\n",
+		  htt_tx_desc->frags_desc_ptr.hi);
+	cdf_print("  peerid = %d\n", htt_tx_desc->peerid);
+	cdf_print("  chanfreq = %d\n", htt_tx_desc->chanfreq);
+#else /* ! HTT_PADDR64 */
+	cdf_print("  frag desc addr = %#x\n", htt_tx_desc->frags_desc_ptr);
+#endif /* HTT_PADDR64 */
+}
+#endif
+
+#ifdef IPA_OFFLOAD
+int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+			 unsigned int uc_tx_buf_sz,
+			 unsigned int uc_tx_buf_cnt,
+			 unsigned int uc_tx_partition_base)
+{
+	unsigned int tx_buffer_count;
+	cdf_nbuf_t buffer_vaddr;
+	uint32_t buffer_paddr;
+	uint32_t *header_ptr;
+	uint32_t *ring_vaddr;
+	int return_code = 0;
+	unsigned int tx_comp_ring_size;
+
+	/* Allocate CE Write Index WORD */
+	pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			4,
+			&pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+			cdf_get_dma_mem_context(
+				(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
+				memctx));
+	if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
+		cdf_print("%s: CE Write Index WORD alloc fail", __func__);
+		return -ENOBUFS;
+	}
+
+	/* Allocate TX COMP Ring */
+	tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
+	pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			tx_comp_ring_size,
+			&pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
+						 tx_comp_base),
+						memctx));
+	if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
+		cdf_print("%s: TX COMP ring alloc fail", __func__);
+		return_code = -ENOBUFS;
+		goto free_tx_ce_idx;
+	}
+
+	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);
+
+	/* Allocate TX BUF vAddress Storage */
+	pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
+		(cdf_nbuf_t *) cdf_mem_malloc(uc_tx_buf_cnt *
+					      sizeof(cdf_nbuf_t));
+	if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
+		cdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
+		return_code = -ENOBUFS;
+		goto free_tx_comp_base;
+	}
+	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
+		     uc_tx_buf_cnt * sizeof(cdf_nbuf_t));
+
+	ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
+	/* Allocate TX buffers as many as possible */
+	for (tx_buffer_count = 0;
+	     tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
+		buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
+					      uc_tx_buf_sz, 0, 4, false);
+		if (!buffer_vaddr) {
+			cdf_print("%s: TX BUF alloc fail, loop index: %d",
+				  __func__, tx_buffer_count);
+			return 0;
+		}
+
+		/* Init buffer */
+		cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
+		header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
+
+		/* HTT control header */
+		*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
+		header_ptr++;
+
+		/* PKT ID */
+		*header_ptr |= ((uint16_t) uc_tx_partition_base +
+				tx_buffer_count) << 16;
+
+		cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
+		buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
+		header_ptr++;
+
+		/* Frag Desc Pointer */
+		/* 64bits descriptor, Low 32bits */
+		*header_ptr = (uint32_t) (buffer_paddr + 20);
+		header_ptr++;
+
+		/* 64bits descriptor, high 32bits */
+		*header_ptr = 0;
+		header_ptr++;
+
+		/* chanreq, peerid */
+		*header_ptr = 0xFFFFFFFF;
+
+		/* FRAG Header */
+		/* 6 words TSO header */
+		header_ptr += 6;
+		*header_ptr = buffer_paddr + 64;
+
+		*ring_vaddr = buffer_paddr;
+		pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
+			buffer_vaddr;
+		/* Memory barrier to ensure actual value updated */
+
+		ring_vaddr += 2;
+	}
+
+	pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
+
+	return 0;
+
+free_tx_comp_base:
+	cdf_os_mem_free_consistent(pdev->osdev,
+				   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->
+								ctrl_pdev) * 4,
+				   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
+				   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+				   cdf_get_dma_mem_context((&pdev->
+							    ipa_uc_tx_rsc.
+							    tx_comp_base),
+							   memctx));
+free_tx_ce_idx:
+	cdf_os_mem_free_consistent(pdev->osdev,
+				   4,
+				   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
+				   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+				   cdf_get_dma_mem_context((&pdev->
+							    ipa_uc_tx_rsc.
+							    tx_ce_idx),
+							   memctx));
+	return return_code;
+}
+
+int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+	uint16_t idx;
+
+	if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			4,
+			pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
+			pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+			cdf_get_dma_mem_context(
+				(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
+				memctx));
+	}
+
+	if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
+		cdf_os_mem_free_consistent(
+			pdev->osdev,
+			ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
+			pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
+			pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
+						 tx_comp_base),
+						memctx));
+	}
+
+	/* Free each single buffer */
+	for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
+		if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
+			cdf_nbuf_unmap(pdev->osdev,
+				       pdev->ipa_uc_tx_rsc.
+				       tx_buf_pool_vaddr_strg[idx],
+				       CDF_DMA_FROM_DEVICE);
+			cdf_nbuf_free(pdev->ipa_uc_tx_rsc.
+				      tx_buf_pool_vaddr_strg[idx]);
+		}
+	}
+
+	/* Free storage */
+	cdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);
+
+	return 0;
+}
+#endif /* IPA_OFFLOAD */
+
+#if defined(FEATURE_TSO)
+void
+htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
+	 struct cdf_tso_info_t *tso_info)
+{
+	u_int32_t *word;
+	int i;
+	struct cdf_tso_seg_elem_t *tso_seg = tso_info->curr_seg;
+	struct msdu_ext_desc_t *msdu_ext_desc = (struct msdu_ext_desc_t *)desc;
+
+	word = (u_int32_t *)(desc);
+
+	/* Initialize the TSO flags per MSDU */
+	((struct msdu_ext_desc_t *)msdu_ext_desc)->tso_flags =
+		 tso_seg->seg.tso_flags;
+
+	/* First 24 bytes (6*4) contain the TSO flags */
+	word += 6;
+
+	for (i = 0; i < tso_seg->seg.num_frags; i++) {
+		/* [31:0] first 32 bits of the buffer pointer  */
+		*word = tso_seg->seg.tso_frags[i].paddr_low_32;
+		word++;
+		/* [15:0] the upper 16 bits of the first buffer pointer */
+		/* [31:16] length of the first buffer */
+		*word = (tso_seg->seg.tso_frags[i].length << 16);
+		word++;
+	}
+
+	if (tso_seg->seg.num_frags < FRAG_NUM_MAX) {
+		*word = 0;
+		word++;
+		*word = 0;
+	}
+}
+#endif /* FEATURE_TSO */

+ 373 - 0
core/dp/htt/htt_types.h

@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _HTT_TYPES__H_
+#define _HTT_TYPES__H_
+
+#include <osdep.h>              /* uint16_t, dma_addr_t */
+#include <cdf_types.h>          /* cdf_device_t */
+#include <cdf_lock.h>           /* cdf_spinlock_t */
+#include <cdf_softirq_timer.h>  /* cdf_softirq_timer_t */
+#include <cdf_atomic.h>         /* cdf_atomic_inc */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t */
+#include <htc_api.h>            /* HTC_PACKET */
+
+#include <ol_ctrl_api.h>        /* ol_pdev_handle */
+#include <ol_txrx_api.h>        /* ol_txrx_pdev_handle */
+
+#define DEBUG_DMA_DONE
+
+#define HTT_TX_MUTEX_TYPE cdf_spinlock_t
+
+#ifdef QCA_TX_HTT2_SUPPORT
+#ifndef HTC_TX_HTT2_MAX_SIZE
+/* Should sync to the target's implementation. */
+#define HTC_TX_HTT2_MAX_SIZE    (120)
+#endif
+#endif /* QCA_TX_HTT2_SUPPORT */
+
+
+struct htt_htc_pkt {
+	void *pdev_ctxt;
+	dma_addr_t nbuf_paddr;
+	HTC_PACKET htc_pkt;
+	uint16_t msdu_id;
+};
+
+struct htt_htc_pkt_union {
+	union {
+		struct htt_htc_pkt pkt;
+		struct htt_htc_pkt_union *next;
+	} u;
+};
+
+/*
+ * HTT host descriptor:
+ * Include the htt_tx_msdu_desc that gets downloaded to the target,
+ * but also include the HTC_FRAME_HDR and alignment padding that
+ * precede the htt_tx_msdu_desc.
+ * htc_send_data_pkt expects this header space at the front of the
+ * initial fragment (i.e. tx descriptor) that is downloaded.
+ */
+struct htt_host_tx_desc_t {
+	uint8_t htc_header[HTC_HEADER_LEN];
+	/* force the tx_desc field to begin on a 4-byte boundary */
+	union {
+		uint32_t dummy_force_align;
+		struct htt_tx_msdu_desc_t tx_desc;
+	} align32;
+};
+
+struct htt_tx_mgmt_desc_buf {
+	cdf_nbuf_t msg_buf;
+	A_BOOL is_inuse;
+	cdf_nbuf_t mgmt_frm;
+};
+
+struct htt_tx_mgmt_desc_ctxt {
+	struct htt_tx_mgmt_desc_buf *pool;
+	A_UINT32 pending_cnt;
+};
+
+struct htt_list_node {
+	struct htt_list_node *prev;
+	struct htt_list_node *next;
+};
+
+struct htt_rx_hash_entry {
+	A_UINT32 paddr;
+	cdf_nbuf_t netbuf;
+	A_UINT8 fromlist;
+	struct htt_list_node listnode;
+#ifdef RX_HASH_DEBUG
+	A_UINT32 cookie;
+#endif
+};
+
+struct htt_rx_hash_bucket {
+	struct htt_list_node listhead;
+	struct htt_rx_hash_entry *entries;
+	struct htt_list_node freepool;
+#ifdef RX_HASH_DEBUG
+	A_UINT32 count;
+#endif
+};
+
+/* IPA micro controller
+   wlan host driver
+   firmware shared memory structure */
+struct uc_shared_mem_t {
+	uint32_t *vaddr;
+	cdf_dma_addr_t paddr;
+	cdf_dma_mem_context(memctx);
+};
+
+/* Micro controller datapath offload
+ * WLAN TX resources */
+struct htt_ipa_uc_tx_resource_t {
+	struct uc_shared_mem_t tx_ce_idx;
+	struct uc_shared_mem_t tx_comp_base;
+
+	uint32_t tx_comp_idx_paddr;
+	cdf_nbuf_t *tx_buf_pool_vaddr_strg;
+	uint32_t alloc_tx_buf_cnt;
+};
+
+/* Micro controller datapath offload
+ * WLAN RX resources */
+struct htt_ipa_uc_rx_resource_t {
+	cdf_dma_addr_t rx_rdy_idx_paddr;
+	struct uc_shared_mem_t rx_ind_ring_base;
+	struct uc_shared_mem_t rx_ipa_prc_done_idx;
+	uint32_t rx_ind_ring_size;
+
+	/* 2nd RX ring */
+	cdf_dma_addr_t rx2_rdy_idx_paddr;
+	struct uc_shared_mem_t rx2_ind_ring_base;
+	struct uc_shared_mem_t rx2_ipa_prc_done_idx;
+	uint32_t rx2_ind_ring_size;
+};
+
+struct ipa_uc_rx_ring_elem_t {
+	uint32_t rx_packet_paddr;
+	uint16_t vdev_id;
+	uint16_t rx_packet_leng;
+};
+
+#if defined(HELIUMPLUS_PADDR64)
+struct msdu_ext_desc_t {
+#if defined(FEATURE_TSO)
+	struct cdf_tso_flags_t tso_flags;
+#else
+	u_int32_t tso_flag0;
+	u_int32_t tso_flag1;
+	u_int32_t tso_flag2;
+	u_int32_t tso_flag3;
+	u_int32_t tso_flag4;
+	u_int32_t tso_flag5;
+#endif
+	u_int32_t frag_ptr0;
+	u_int32_t frag_len0;
+	u_int32_t frag_ptr1;
+	u_int32_t frag_len1;
+	u_int32_t frag_ptr2;
+	u_int32_t frag_len2;
+	u_int32_t frag_ptr3;
+	u_int32_t frag_len3;
+	u_int32_t frag_ptr4;
+	u_int32_t frag_len4;
+	u_int32_t frag_ptr5;
+	u_int32_t frag_len5;
+};
+#endif  /* defined(HELIUMPLUS_PADDR64) */
+
+struct htt_pdev_t {
+	ol_pdev_handle ctrl_pdev;
+	ol_txrx_pdev_handle txrx_pdev;
+	HTC_HANDLE htc_pdev;
+	cdf_device_t osdev;
+
+	HTC_ENDPOINT_ID htc_endpoint;
+
+#ifdef QCA_TX_HTT2_SUPPORT
+	HTC_ENDPOINT_ID htc_tx_htt2_endpoint;
+	uint16_t htc_tx_htt2_max_size;
+#endif /* QCA_TX_HTT2_SUPPORT */
+
+#ifdef ATH_11AC_TXCOMPACT
+	HTT_TX_MUTEX_TYPE txnbufq_mutex;
+	cdf_nbuf_queue_t txnbufq;
+	struct htt_htc_pkt_union *htt_htc_pkt_misclist;
+#endif
+
+	struct htt_htc_pkt_union *htt_htc_pkt_freelist;
+	struct {
+		int is_full_reorder_offload;
+		int default_tx_comp_req;
+		int ce_classify_enabled;
+	} cfg;
+	struct {
+		uint8_t major;
+		uint8_t minor;
+	} tgt_ver;
+#if defined(HELIUMPLUS_PADDR64)
+	struct {
+		u_int8_t major;
+		u_int8_t minor;
+	} wifi_ip_ver;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+	struct {
+		struct {
+			/*
+			 * Ring of network buffer objects -
+			 * This ring is used exclusively by the host SW.
+			 * This ring mirrors the dev_addrs_ring that is shared
+			 * between the host SW and the MAC HW.
+			 * The host SW uses this netbufs ring to locate the nw
+			 * buffer objects whose data buffers the HW has filled.
+			 */
+			cdf_nbuf_t *netbufs_ring;
+			/*
+			 * Ring of buffer addresses -
+			 * This ring holds the "physical" device address of the
+			 * rx buffers the host SW provides for MAC HW to fill.
+			 */
+#if HTT_PADDR64
+			uint64_t *paddrs_ring;
+#else   /* ! HTT_PADDR64 */
+			uint32_t *paddrs_ring;
+#endif
+			cdf_dma_mem_context(memctx);
+		} buf;
+		/*
+		 * Base address of ring, as a "physical" device address rather
+		 * than a CPU address.
+		 */
+		uint32_t base_paddr;
+		int size;       /* how many elems in the ring (power of 2) */
+		unsigned size_mask;     /* size - 1 */
+
+		int fill_level; /* how many rx buffers to keep in the ring */
+		int fill_cnt;   /* # of rx buffers (full+empty) in the ring */
+
+		/*
+		 * target_idx -
+		 * Without reorder offload:
+		 * not used
+		 * With reorder offload:
+		 * points to the location in the rx ring from which rx buffers
+		 * are available to copy into the MAC DMA ring
+		 */
+		struct {
+			uint32_t *vaddr;
+			uint32_t paddr;
+			cdf_dma_mem_context(memctx);
+		} target_idx;
+
+		/*
+		 * alloc_idx/host_idx -
+		 * Without reorder offload:
+		 * where HTT SW has deposited empty buffers
+		 * This is allocated in consistent mem, so that the FW can read
+		 * this variable, and program the HW's FW_IDX reg with the value
+		 * of this shadow register
+		 * With reorder offload:
+		 * points to the end of the available free rx buffers
+		 */
+		struct {
+			uint32_t *vaddr;
+			uint32_t paddr;
+			cdf_dma_mem_context(memctx);
+		} alloc_idx;
+
+		/* sw_rd_idx -
+		 * where HTT SW has processed bufs filled by rx MAC DMA */
+		struct {
+			unsigned msdu_desc;
+			unsigned msdu_payld;
+		} sw_rd_idx;
+
+		/*
+		 * refill_retry_timer - timer triggered when the ring is not
+		 * refilled to the level expected
+		 */
+		cdf_softirq_timer_t refill_retry_timer;
+
+		/*
+		 * refill_ref_cnt - ref cnt for Rx buffer replenishment - this
+		 * variable is used to guarantee that only one thread tries
+		 * to replenish Rx ring.
+		 */
+		cdf_atomic_t refill_ref_cnt;
+#ifdef DEBUG_DMA_DONE
+		uint32_t dbg_initial_msdu_payld;
+		uint32_t dbg_mpdu_range;
+		uint32_t dbg_mpdu_count;
+		uint32_t dbg_ring_idx;
+		uint32_t dbg_refill_cnt;
+		uint32_t dbg_sync_success;
+#endif
+#ifdef HTT_RX_RESTORE
+		int rx_reset;
+		uint8_t htt_rx_restore;
+#endif
+		struct htt_rx_hash_bucket *hash_table;
+		uint32_t listnode_offset;
+	} rx_ring;
+	long rx_fw_desc_offset;
+	int rx_mpdu_range_offset_words;
+	int rx_ind_msdu_byte_idx;
+
+	struct {
+		int size;       /* of each HTT tx desc */
+		int pool_elems;
+		int alloc_cnt;
+		char *pool_vaddr;
+		uint32_t pool_paddr;
+		uint32_t *freelist;
+		cdf_dma_mem_context(memctx);
+	} tx_descs;
+#if defined(HELIUMPLUS_PADDR64)
+	struct {
+		int size; /* of each Fragment/MSDU-Ext descriptor */
+		int pool_elems;
+		char *pool_vaddr;
+		uint32_t pool_paddr;
+		cdf_dma_mem_context(memctx);
+	} frag_descs;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+	int download_len;
+	void (*tx_send_complete_part2)(void *pdev, A_STATUS status,
+				       cdf_nbuf_t msdu, uint16_t msdu_id);
+
+	HTT_TX_MUTEX_TYPE htt_tx_mutex;
+
+	struct {
+		int htc_err_cnt;
+	} stats;
+
+	struct htt_tx_mgmt_desc_ctxt tx_mgmt_desc_ctxt;
+	struct targetdef_s *targetdef;
+	struct ce_reg_def *target_ce_def;
+
+	struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
+	struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
+};
+
+#define HTT_EPID_GET(_htt_pdev_hdl)  \
+	(((struct htt_pdev_t *)(_htt_pdev_hdl))->htc_endpoint)
+
+#if defined(HELIUMPLUS_PADDR64)
+#define HTT_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major == (x)) &&	\
+				 ((pdev)->wifi_ip_ver.minor == (y)))
+
+#define HTT_SET_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major = (x)) && \
+				     ((pdev)->wifi_ip_ver.minor = (y)))
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+#endif /* _HTT_TYPES__H_ */

+ 533 - 0
core/dp/htt/rx_desc.h

@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+/*
+ * REMIND: Copy one of rx_desc related structures here for export,
+ *         hopes they are always the same between Peregrine and Rome in future
+ */
+struct rx_attention {
+	volatile
+	uint32_t first_mpdu:1, /* [0] */
+		last_mpdu:1, /* [1] */
+		mcast_bcast:1, /* [2] */
+		peer_idx_invalid:1, /* [3] */
+		peer_idx_timeout:1, /* [4] */
+		power_mgmt:1, /* [5] */
+		non_qos:1, /* [6] */
+		null_data:1, /* [7] */
+		mgmt_type:1, /* [8] */
+		ctrl_type:1, /* [9] */
+		more_data:1, /* [10] */
+		eosp:1, /* [11] */
+		u_apsd_trigger:1, /* [12] */
+		fragment:1, /* [13] */
+		order:1, /* [14] */
+		classification:1, /* [15] */
+		overflow_err:1, /* [16] */
+		msdu_length_err:1, /* [17] */
+		tcp_udp_chksum_fail:1, /* [18] */
+		ip_chksum_fail:1, /* [19] */
+		sa_idx_invalid:1, /* [20] */
+		da_idx_invalid:1, /* [21] */
+		sa_idx_timeout:1, /* [22] */
+		da_idx_timeout:1, /* [23] */
+		encrypt_required:1, /* [24] */
+		directed:1, /* [25] */
+		buffer_fragment:1, /* [26] */
+		mpdu_length_err:1, /* [27] */
+		tkip_mic_err:1, /* [28] */
+		decrypt_err:1, /* [29] */
+		fcs_err:1, /* [30] */
+		msdu_done:1; /* [31] */
+};
+
+struct rx_frag_info {
+	volatile
+	uint32_t ring0_more_count:8,   /* [7:0] */
+		ring1_more_count:8, /* [15:8] */
+		ring2_more_count:8, /* [23:16] */
+		ring3_more_count:8; /* [31:24] */
+	volatile
+	uint32_t ring4_more_count:8, /* [7:0] */
+		ring5_more_count:8, /* [15:8] */
+		ring6_more_count:8, /* [23:16] */
+		ring7_more_count:8; /* [31:24] */
+};
+
+struct rx_msdu_start {
+	volatile
+	uint32_t msdu_length:14, /* [13:0] */
+#if defined(HELIUMPLUS)
+		l3_offset:7, /* [20:14] */
+		ipsec_ah:1, /* [21] */
+		reserved_0a:2, /* [23:22] */
+		l4_offset:7, /* [30:24] */
+		ipsec_esp:1; /* [31] */
+#else
+		ip_offset:6, /* [19:14] */
+		ring_mask:4, /* [23:20] */
+		tcp_udp_offset:7, /* [30:24] */
+		reserved_0c:1; /* [31] */
+#endif /* defined(HELIUMPLUS) */
+#if defined(HELIUMPLUS)
+	volatile uint32_t flow_id_toeplitz:32; /* [31:0] */
+#else
+	volatile uint32_t flow_id_crc:32; /* [31:0] */
+#endif /* defined(HELIUMPLUS) */
+	volatile
+	uint32_t msdu_number:8, /* [7:0] */
+		decap_format:2, /* [9:8] */
+		ipv4_proto:1, /* [10] */
+		ipv6_proto:1, /* [11] */
+		tcp_proto:1, /* [12] */
+		udp_proto:1, /* [13] */
+		ip_frag:1, /* [14] */
+		tcp_only_ack:1, /* [15] */
+		sa_idx:11, /* [26:16] */
+		reserved_2b:5; /* [31:27] */
+#if defined(HELIUMPLUS_PADDR64)
+	volatile
+	uint32_t da_idx:11, /* [10:0] */
+		da_is_bcast_mcast:1, /* [11] */
+		reserved_3a:4, /* [15:12] */
+		ip4_protocol_ip6_next_header:8, /* [23:16] */
+		ring_mask:8; /* [31:24] */
+	volatile uint32_t toeplitz_hash_2_or_4:32; /* [31:0] */
+#endif /* defined(HELIUMPLUS_PADDR64) */
+};
+
+struct rx_msdu_end {
+	volatile
+	uint32_t ip_hdr_chksum:16, /* [15:0] */
+		tcp_udp_chksum:16; /* [31:16] */
+	volatile
+	uint32_t key_id_octet:8, /* [7:0] */
+#if defined(HELIUMPLUS)
+		classification_rule:6, /* [13:8] */
+		classify_not_done_truncate:1, /* [14] */
+		classify_not_done_cce_dis:1, /* [15] */
+#else
+		classification_filter:8, /* [15:8] */
+#endif /* defined(HELIUMPLUS) */
+	ext_wapi_pn_63_48:16; /* [31:16] */
+	volatile uint32_t ext_wapi_pn_95_64:32; /* [31:0] */
+	volatile uint32_t ext_wapi_pn_127_96:32; /* [31:0] */
+	volatile
+	uint32_t reported_mpdu_length:14, /* [13:0] */
+		first_msdu:1, /* [14] */
+		last_msdu:1, /* [15] */
+#if defined(HELIUMPLUS)
+		sa_idx_timeout:1, /* [16] */
+		da_idx_timeout:1, /* [17] */
+		msdu_limit_error:1, /* [18] */
+		classify_ring_mask:8, /* [26:19] */
+#endif /* defined(HELIUMPLUS) */
+		reserved_3a:3, /* [29:27] */
+		pre_delim_err:1, /* [30] */
+		reserved_3b:1; /* [31] */
+#if defined(HELIUMPLUS_PADDR64)
+	volatile uint32_t ipv6_options_crc:32;
+	volatile uint32_t tcp_seq_number:32;
+	volatile uint32_t tcp_ack_number:32;
+	volatile
+	uint32_t tcp_flag:9, /* [8:0] */
+		lro_eligible:1, /* [9] */
+		l3_header_padding:3, /* [12:10] */
+		reserved_8a:3, /* [15:13] */
+		window_size:16; /* [31:16] */
+	volatile
+	uint32_t da_offset:6, /* [5:0] */
+		sa_offset:6, /* [11:6] */
+		da_offset_valid:1, /* [12] */
+		sa_offset_valid:1, /* [13] */
+		type_offset:7, /* [20:14] */
+		reserved_9a:11; /* [31:21] */
+	volatile uint32_t rule_indication_31_0:32;
+	volatile uint32_t rule_indication_63_32:32;
+	volatile uint32_t rule_indication_95_64:32;
+	volatile uint32_t rule_indication_127_96:32;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+};
+
+struct rx_mpdu_end {
+	volatile
+	uint32_t reserved_0:13, /* [12:0] */
+		overflow_err:1, /* [13] */
+		last_mpdu:1, /* [14] */
+		post_delim_err:1, /* [15] */
+		post_delim_cnt:12, /* [27:16] */
+		mpdu_length_err:1, /* [28] */
+		tkip_mic_err:1, /* [29] */
+		decrypt_err:1, /* [30] */
+		fcs_err:1; /* [31] */
+};
+
+
+#if defined(HELIUMPLUS)
+
+struct rx_mpdu_start {
+	volatile
+	uint32_t peer_idx:11, /* [10:0] */
+		fr_ds:1, /* [11] */
+		to_ds:1, /* [12] */
+		encrypted:1, /* [13] */
+		retry:1, /* [14] */
+		reserved:1, /* [15] */
+		seq_num:12, /* [27:16] */
+		encrypt_type:4; /* [31:28] */
+	volatile uint32_t pn_31_0:32; /* [31:0] */
+	volatile
+	uint32_t pn_47_32:16, /* [15:0] */
+		toeplitz_hash:2, /* [17:16] */
+		reserved_2:10, /* [27:18] */
+		tid:4; /* [31:28] */
+};
+
+
+struct rx_ppdu_start {
+	volatile
+	uint32_t rssi_pri_chain0:8, /* [7:0] */
+		rssi_sec20_chain0:8, /* [15:8] */
+		rssi_sec40_chain0:8, /* [23:16] */
+		rssi_sec80_chain0:8; /* [31:24] */
+	volatile
+	uint32_t rssi_pri_chain1:8, /* [7:0] */
+		rssi_sec20_chain1:8, /* [15:8] */
+		rssi_sec40_chain1:8, /* [23:16] */
+		rssi_sec80_chain1:8; /* [31:24] */
+	volatile
+	uint32_t rssi_pri_chain2:8, /* [7:0] */
+		rssi_sec20_chain2:8, /* [15:8] */
+		rssi_sec40_chain2:8, /* [23:16] */
+		rssi_sec80_chain2:8; /* [31:24] */
+	volatile
+	uint32_t rssi_pri_chain3:8, /* [7:0] */
+		rssi_sec20_chain3:8, /* [15:8] */
+		rssi_sec40_chain3:8, /* [23:16] */
+		rssi_sec80_chain3:8; /* [31:24] */
+	volatile
+	uint32_t rssi_comb:8, /* [7:0] */
+		bandwidth:3, /* [10:8] */
+		reserved_4a:5, /* [15:11] */
+		rssi_comb_ht:8, /* [23:16] */
+		reserved_4b:8; /* [31:24] */
+	volatile
+	uint32_t l_sig_rate:4, /*[3:0] */
+		l_sig_rate_select:1, /* [4] */
+		l_sig_length:12, /* [16:5] */
+		l_sig_parity:1, /* [17] */
+		l_sig_tail:6, /* [23:18] */
+		preamble_type:8; /* [31:24] */
+	volatile
+	uint32_t ht_sig_vht_sig_ah_sig_a_1:24, /* [23:0] */
+		captured_implicit_sounding:1, /* [24] */
+		reserved_6:7; /* [31:25] */
+	volatile
+	uint32_t ht_sig_vht_sig_ah_sig_a_2:24, /* [23:0] */
+		reserved_7:8; /* [31:24] */
+	volatile uint32_t vht_sig_b:32; /* [31:0] */
+	volatile
+	uint32_t service:16, /* [15:0] */
+		reserved_9:16; /* [31:16] */
+};
+struct rx_location_info {
+	volatile
+	uint32_t rtt_fac_legacy:14, /* [13:0] */
+		rtt_fac_legacy_status:1, /* [14] */
+		rtt_fac_vht:14, /* [28:15] */
+		rtt_fac_vht_status:1, /* [29] */
+		rtt_cfr_status:1, /* [30] */
+		rtt_cir_status:1; /* [31] */
+	volatile
+	uint32_t rtt_fac_sifs:10, /* [9:0] */
+		rtt_fac_sifs_status:2, /* [11:10] */
+		rtt_channel_dump_size:11, /* [22:12] */
+		rtt_mac_phy_phase:2, /* [24:23] */
+		rtt_hw_ifft_mode:1, /* [25] */
+		rtt_btcf_status:1, /* [26] */
+		rtt_preamble_type:2, /* [28:27] */
+		rtt_pkt_bw:2, /* [30:29] */
+		rtt_gi_type:1; /* [31] */
+	volatile
+	uint32_t rtt_mcs_rate:4, /* [3:0] */
+		rtt_strongest_chain:2, /* [5:4] */
+		rtt_phase_jump:7, /* [12:6] */
+		rtt_rx_chain_mask:4, /* [16:13] */
+		rtt_tx_data_start_x_phase:1, /* [17] */
+		reserved_2:13, /* [30:18] */
+		rx_location_info_valid:1; /* [31] */
+};
+
+struct rx_pkt_end {
+	volatile
+	uint32_t rx_success:1, /* [0] */
+		reserved_0a:2, /* [2:1] */
+		error_tx_interrupt_rx:1, /* [3] */
+		error_ofdm_power_drop:1, /* [4] */
+		error_ofdm_restart:1, /* [5] */
+		error_cck_power_drop:1, /* [6] */
+		error_cck_restart:1, /* [7] */
+		reserved_0b:24; /* [31:8] */
+	volatile uint32_t phy_timestamp_1_lower_32:32; /* [31:0] */
+	volatile uint32_t phy_timestamp_1_upper_32:32; /* [31:0] */
+	volatile uint32_t phy_timestamp_2_lower_32:32; /* [31:0] */
+	volatile uint32_t phy_timestamp_2_upper_32:32; /* [31:0] */
+	struct rx_location_info rx_location_info;
+};
+
+struct rx_phy_ppdu_end {
+	volatile
+	uint32_t reserved_0a:2, /* [1:0] */
+		error_radar:1, /* [2] */
+		error_rx_abort:1, /* [3] */
+		error_rx_nap:1, /* [4] */
+		error_ofdm_timing:1, /* [5] */
+		error_ofdm_signal_parity:1, /* [6] */
+		error_ofdm_rate_illegal:1, /* [7] */
+		error_ofdm_length_illegal:1, /* [8] */
+		error_ppdu_ofdm_restart:1, /* [9] */
+		error_ofdm_service:1, /* [10] */
+		error_ppdu_ofdm_power_drop:1, /* [11] */
+		error_cck_blocker:1, /* [12] */
+		error_cck_timing:1, /* [13] */
+		error_cck_header_crc:1, /* [14] */
+		error_cck_rate_illegal:1, /* [15] */
+		error_cck_length_illegal:1, /* [16] */
+		error_ppdu_cck_restart:1, /* [17] */
+		error_cck_service:1, /* [18] */
+		error_ppdu_cck_power_drop:1, /* [19] */
+		error_ht_crc_err:1, /* [20] */
+		error_ht_length_illegal:1, /* [21] */
+		error_ht_rate_illegal:1, /* [22] */
+		error_ht_zlf:1, /* [23] */
+		error_false_radar_ext:1, /* [24] */
+		error_green_field:1, /* [25] */
+		error_spectral_scan:1, /* [26] */
+		error_rx_bw_gt_dyn_bw:1, /* [27] */
+		error_leg_ht_mismatch:1, /* [28] */
+		error_vht_crc_error:1, /* [29] */
+		error_vht_siga_unsupported:1, /* [30] */
+		error_vht_lsig_len_invalid:1; /* [31] */
+	volatile
+	uint32_t error_vht_ndp_or_zlf:1, /* [0] */
+		error_vht_nsym_lt_zero:1, /* [1] */
+		error_vht_rx_extra_symbol_mismatch:1, /* [2] */
+		error_vht_rx_skip_group_id0:1, /* [3] */
+		error_vht_rx_skip_group_id1to62:1, /* [4] */
+		error_vht_rx_skip_group_id63:1, /* [5] */
+		error_ofdm_ldpc_decoder_disabled:1, /* [6] */
+		error_defer_nap:1, /* [7] */
+		error_fdomain_timeout:1, /* [8] */
+		error_lsig_rel_check:1, /* [9] */
+		error_bt_collision:1, /* [10] */
+		error_unsupported_mu_feedback:1, /* [11] */
+		error_ppdu_tx_interrupt_rx:1, /* [12] */
+		error_rx_unsupported_cbf:1, /* [13] */
+		reserved_1:18; /* [31:14] */
+};
+
+struct rx_timing_offset {
+	volatile
+	uint32_t timing_offset:12, /* [11:0] */
+		reserved:20; /* [31:12] */
+};
+
+struct rx_ppdu_end {
+	volatile uint32_t evm_p0:32;
+	volatile uint32_t evm_p1:32;
+	volatile uint32_t evm_p2:32;
+	volatile uint32_t evm_p3:32;
+	volatile uint32_t evm_p4:32;
+	volatile uint32_t evm_p5:32;
+	volatile uint32_t evm_p6:32;
+	volatile uint32_t evm_p7:32;
+	volatile uint32_t evm_p8:32;
+	volatile uint32_t evm_p9:32;
+	volatile uint32_t evm_p10:32;
+	volatile uint32_t evm_p11:32;
+	volatile uint32_t evm_p12:32;
+	volatile uint32_t evm_p13:32;
+	volatile uint32_t evm_p14:32;
+	volatile uint32_t evm_p15:32;
+	volatile uint32_t reserved_16:32;
+	volatile uint32_t reserved_17:32;
+	volatile uint32_t wb_timestamp_lower_32:32;
+	volatile uint32_t wb_timestamp_upper_32:32;
+	struct rx_pkt_end rx_pkt_end;
+	struct rx_phy_ppdu_end rx_phy_ppdu_end;
+	struct rx_timing_offset rx_timing_offset;
+	volatile
+	uint32_t rx_antenna:24, /* [23:0] */
+		tx_ht_vht_ack:1, /* [24] */
+		rx_pkt_end_valid:1, /* [25] */
+		rx_phy_ppdu_end_valid:1, /* [26] */
+		rx_timing_offset_valid:1, /* [27] */
+		bb_captured_channel:1, /* [28] */
+		unsupported_mu_nc:1, /* [29] */
+		otp_txbf_disable:1, /* [30] */
+		reserved_31:1; /* [31] */
+	volatile
+	uint32_t coex_bt_tx_from_start_of_rx:1, /* [0] */
+		coex_bt_tx_after_start_of_rx:1, /* [1] */
+		coex_wan_tx_from_start_of_rx:1, /* [2] */
+		coex_wan_tx_after_start_of_rx:1, /* [3] */
+		coex_wlan_tx_from_start_of_rx:1, /* [4] */
+		coex_wlan_tx_after_start_of_rx:1, /* [5] */
+		mpdu_delimiter_errors_seen:1, /* [6] */
+		ftm:1, /* [7] */
+		ftm_dialog_token:8, /* [15:8] */
+		ftm_follow_up_dialog_token:8, /* [23:16] */
+		reserved_32:8; /* [31:24] */
+	volatile
+	uint32_t before_mpdu_cnt_passing_fcs:8, /* [7:0] */
+		before_mpdu_cnt_failing_fcs:8, /* [15:8] */
+		after_mpdu_cnt_passing_fcs:8, /* [23:16] */
+		after_mpdu_cnt_failing_fcs:8; /* [31:24] */
+	volatile uint32_t phy_timestamp_tx_lower_32:32; /* [31:0] */
+	volatile uint32_t phy_timestamp_tx_upper_32:32; /* [31:0] */
+	volatile
+	uint32_t bb_length:16, /* [15:0] */
+		bb_data:1, /* [16] */
+		peer_idx_valid:1, /* [17] */
+		peer_idx:11, /* [28:18] */
+		reserved_26:2, /* [30:29] */
+		ppdu_done:1; /* [31] */
+};
+#else
+struct rx_ppdu_start {
+	volatile
+	uint32_t rssi_chain0_pri20:8, /* [7:0] */
+		rssi_chain0_sec20:8, /* [15:8] */
+		rssi_chain0_sec40:8, /* [23:16] */
+		rssi_chain0_sec80:8; /* [31:24] */
+	volatile
+	uint32_t rssi_chain1_pri20:8, /* [7:0] */
+		rssi_chain1_sec20:8, /* [15:8] */
+		rssi_chain1_sec40:8, /* [23:16] */
+		rssi_chain1_sec80:8; /* [31:24] */
+	volatile
+	uint32_t rssi_chain2_pri20:8, /* [7:0] */
+		rssi_chain2_sec20:8, /* [15:8] */
+		rssi_chain2_sec40:8, /* [23:16] */
+		rssi_chain2_sec80:8; /* [31:24] */
+	volatile
+	uint32_t rssi_chain3_pri20:8, /* [7:0] */
+		rssi_chain3_sec20:8, /* [15:8] */
+		rssi_chain3_sec40:8, /* [23:16] */
+		rssi_chain3_sec80:8; /* [31:24] */
+	volatile
+	uint32_t rssi_comb:8,  /* [7:0] */
+		reserved_4a:16, /* [23:8] */
+		is_greenfield:1, /* [24] */
+		reserved_4b:7; /* [31:25] */
+	volatile
+	uint32_t l_sig_rate:4, /* [3:0] */
+		l_sig_rate_select:1, /* [4] */
+		l_sig_length:12, /* [16:5] */
+		l_sig_parity:1, /* [17] */
+		l_sig_tail:6, /* [23:18] */
+		preamble_type:8; /* [31:24] */
+	volatile
+	uint32_t ht_sig_vht_sig_a_1:24, /* [23:0] */
+		reserved_6:8; /* [31:24] */
+	volatile
+	uint32_t ht_sig_vht_sig_a_2:24, /* [23:0] */
+		txbf_h_info:1, /* [24] */
+		reserved_7:7; /* [31:25] */
+	volatile
+	uint32_t vht_sig_b:29, /* [28:0] */
+		reserved_8:3; /* [31:29] */
+	volatile
+	uint32_t service:16,   /* [15:0] */
+		reserved_9:16; /* [31:16] */
+};
+
+
+struct rx_mpdu_start {
+	volatile
+	uint32_t peer_idx:11,  /* [10:0] */
+		fr_ds:1, /* [11] */
+		to_ds:1, /* [12] */
+		encrypted:1, /* [13] */
+		retry:1, /* [14] */
+		txbf_h_info:1, /* [15] */
+		seq_num:12, /* [27:16] */
+		encrypt_type:4; /* [31:28] */
+	volatile uint32_t pn_31_0:32;   /* [31:0] */
+	volatile
+	uint32_t pn_47_32:16,  /* [15:0] */
+		directed:1, /* [16] */
+		reserved_2:11, /* [27:17] */
+		tid:4; /* [31:28] */
+};
+
+struct rx_ppdu_end {
+	volatile uint32_t evm_p0:32;    /* [31:0] */
+	volatile uint32_t evm_p1:32;    /* [31:0] */
+	volatile uint32_t evm_p2:32;    /* [31:0] */
+	volatile uint32_t evm_p3:32;    /* [31:0] */
+	volatile uint32_t evm_p4:32;    /* [31:0] */
+	volatile uint32_t evm_p5:32;    /* [31:0] */
+	volatile uint32_t evm_p6:32;    /* [31:0] */
+	volatile uint32_t evm_p7:32;    /* [31:0] */
+	volatile uint32_t evm_p8:32;    /* [31:0] */
+	volatile uint32_t evm_p9:32;    /* [31:0] */
+	volatile uint32_t evm_p10:32;   /* [31:0] */
+	volatile uint32_t evm_p11:32;   /* [31:0] */
+	volatile uint32_t evm_p12:32;   /* [31:0] */
+	volatile uint32_t evm_p13:32;   /* [31:0] */
+	volatile uint32_t evm_p14:32;   /* [31:0] */
+	volatile uint32_t evm_p15:32;   /* [31:0] */
+	volatile uint32_t tsf_timestamp:32; /* [31:0] */
+	volatile uint32_t wb_timestamp:32; /* [31:0] */
+	volatile
+	uint32_t locationing_timestamp:8, /* [7:0] */
+		phy_err_code:8, /* [15:8] */
+		phy_err:1, /* [16] */
+		rx_location:1, /* [17] */
+		txbf_h_info:1, /* [18] */
+		reserved_18:13; /* [31:19] */
+	volatile
+	uint32_t rx_antenna:24, /* [23:0] */
+		tx_ht_vht_ack:1, /* [24] */
+		bb_captured_channel:1, /* [25] */
+		reserved_19:6; /* [31:26] */
+	volatile
+	uint32_t rtt_correction_value:24, /* [23:0] */
+		reserved_20:7, /* [30:24] */
+		rtt_normal_mode:1; /* [31] */
+	volatile
+	uint32_t bb_length:16, /* [15:0] */
+		reserved_21:15, /* [30:16] */
+		ppdu_done:1; /* [31] */
+};
+#endif /* defined(HELIUMPLUS) */
+
+#endif /*_RX_DESC_H_*/

+ 543 - 0
core/dp/ol/inc/ol_cfg.h

@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_CFG__H_
+#define _OL_CFG__H_
+
+#include <cdf_types.h>          /* uint32_t */
+#include <ol_ctrl_api.h>        /* ol_pdev_handle */
+#include <cds_ieee80211_common.h>   /* ieee80211_qosframe_htc_addr4 */
+#include <enet.h>               /* LLC_SNAP_HDR_LEN */
+#include "wlan_tgt_def_config.h"
+
+/**
+ * @brief format of data frames delivered to/from the WLAN driver by/to the OS
+ */
+enum wlan_frm_fmt {
+	wlan_frm_fmt_unknown,
+	wlan_frm_fmt_raw,
+	wlan_frm_fmt_native_wifi,
+	wlan_frm_fmt_802_3,
+};
+
+struct wlan_ipa_uc_rsc_t {
+	u8 uc_offload_enabled;
+	u32 tx_max_buf_cnt;
+	u32 tx_buf_size;
+	u32 rx_ind_ring_size;
+	u32 tx_partition_base;
+};
+
+/* Config parameters for txrx_pdev */
+struct txrx_pdev_cfg_t {
+	u8 is_high_latency;
+	u8 defrag_timeout_check;
+	u8 rx_pn_check;
+	u8 pn_rx_fwd_check;
+	u8 host_addba;
+	u8 tx_free_at_download;
+	u8 rx_fwd_inter_bss;
+	u32 max_thruput_mbps;
+	u32 target_tx_credit;
+	u32 vow_config;
+	u32 tx_download_size;
+	u32 max_peer_id;
+	u32 max_vdev;
+	u32 max_nbuf_frags;
+	u32 throttle_period_ms;
+	enum wlan_frm_fmt frame_type;
+	u8 rx_fwd_disabled;
+	u8 is_packet_log_enabled;
+	u8 is_full_reorder_offload;
+	struct wlan_ipa_uc_rsc_t ipa_uc_rsc;
+	bool ip_tcp_udp_checksum_offload;
+	bool enable_rxthread;
+	bool ce_classify_enabled;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	uint32_t tx_flow_stop_queue_th;
+	uint32_t tx_flow_start_queue_offset;
+#endif
+};
+
+/**
+ * @brief Specify whether the system is high-latency or low-latency.
+ * @details
+ *  Indicate whether the system is operating in high-latency (message
+ *  based, e.g. USB) mode or low-latency (memory-mapped, e.g. PCIe) mode.
+ *  Some chips support just one type of host / target interface.
+ *  Other chips support both LL and HL interfaces (e.g. PCIe and USB),
+ *  so the selection will be made based on which bus HW is present, or
+ *  which is preferred if both are present.
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 -> high-latency -OR- 0 -> low-latency
+ */
+int ol_cfg_is_high_latency(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the range of peer IDs.
+ * @details
+ *  Specify the maximum peer ID.  This is the maximum number of peers,
+ *  minus one.
+ *  This is used by the host to determine the size of arrays indexed by
+ *  peer ID.
+ *
+ * @param pdev - handle to the physical device
+ * @return maximum peer ID
+ */
+int ol_cfg_max_peer_id(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the max number of virtual devices within a physical device.
+ * @details
+ *  Specify how many virtual devices may exist within a physical device.
+ *
+ * @param pdev - handle to the physical device
+ * @return maximum number of virtual devices
+ */
+int ol_cfg_max_vdevs(ol_pdev_handle pdev);
+
+/**
+ * @brief Check whether host-side rx PN check is enabled or disabled.
+ * @details
+ *  Choose whether to allocate rx PN state information and perform
+ *  rx PN checks (if applicable, based on security type) on the host.
+ *  If the rx PN check is specified to be done on the host, the host SW
+ *  will determine which peers are using a security type (e.g. CCMP) that
+ *  requires a PN check.
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 -> host performs rx PN check -OR- 0 -> no host-side rx PN check
+ */
+int ol_cfg_rx_pn_check(ol_pdev_handle pdev);
+
+/**
+ * @brief Check whether host-side rx forwarding is enabled or disabled.
+ * @details
+ *  Choose whether to check whether to forward rx frames to tx on the host.
+ *  For LL systems, this rx -> tx host-side forwarding check is typically
+ *  enabled.
+ *  For HL systems, the rx -> tx forwarding check is typically done on the
+ *  target.  However, even in HL systems, the host-side rx -> tx forwarding
+ *  will typically be enabled, as a second-tier safety net in case the
+ *  target doesn't have enough memory to store all rx -> tx forwarded frames.
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 -> host does rx->tx forward -OR- 0 -> no host-side rx->tx forward
+ */
+int ol_cfg_rx_fwd_check(ol_pdev_handle pdev);
+
+/**
+ * @brief set rx fwd disable/enable.
+ * @details
+ *  Choose whether to forward rx frames to tx (where applicable) within the
+ *  WLAN driver, or to leave all forwarding up to the operating system.
+ *  currently only intra-bss fwd is supported.
+ *
+ * @param pdev - handle to the physical device
+ * @param disable_rx_fwd 1 -> no rx->tx forward -> rx->tx forward
+ */
+void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disalbe_rx_fwd);
+
+/**
+ * @brief Check whether rx forwarding is enabled or disabled.
+ * @details
+ *  Choose whether to forward rx frames to tx (where applicable) within the
+ *  WLAN driver, or to leave all forwarding up to the operating system.
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 -> no rx->tx forward -OR- 0 -> rx->tx forward (in host or target)
+ */
+int ol_cfg_rx_fwd_disabled(ol_pdev_handle pdev);
+
+/**
+ * @brief Check whether to perform inter-BSS or intra-BSS rx->tx forwarding.
+ * @details
+ *  Check whether data received by an AP on one virtual device destined
+ *  to a STA associated with a different virtual device within the same
+ *  physical device should be forwarded within the driver, or whether
+ *  forwarding should only be done within a virtual device.
+ *
+ * @param pdev - handle to the physical device
+ * @return
+ *      1 -> forward both within and between vdevs
+ *      -OR-
+ *      0 -> forward only within a vdev
+ */
+int ol_cfg_rx_fwd_inter_bss(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify data frame format used by the OS.
+ * @details
+ *  Specify what type of frame (802.3 or native WiFi) the host data SW
+ *  should expect from and provide to the OS shim.
+ *
+ * @param pdev - handle to the physical device
+ * @return enumerated data frame format
+ */
+enum wlan_frm_fmt ol_cfg_frame_type(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the peak throughput.
+ * @details
+ *  Specify the peak throughput that a system is expected to support.
+ *  The data SW uses this configuration to help choose the size for its
+ *  tx descriptor pool and rx buffer ring.
+ *  The data SW assumes that the peak throughput applies to either rx or tx,
+ *  rather than having separate specs of the rx max throughput vs. the tx
+ *  max throughput.
+ *
+ * @param pdev - handle to the physical device
+ * @return maximum supported throughput in Mbps (not MBps)
+ */
+int ol_cfg_max_thruput_mbps(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the maximum number of fragments per tx network buffer.
+ * @details
+ *  Specify the maximum number of fragments that a tx frame provided to
+ *  the WLAN driver by the OS may contain.
+ *  In LL systems, the host data SW uses this maximum fragment count to
+ *  determine how many elements to allocate in the fragmentation descriptor
+ *  it creates to specify to the tx MAC DMA where to locate the tx frame's
+ *  data.
+ *  This maximum fragments count is only for regular frames, not TSO frames,
+ *  since TSO frames are sent in segments with a limited number of fragments
+ *  per segment.
+ *
+ * @param pdev - handle to the physical device
+ * @return maximum number of fragments that can occur in a regular tx frame
+ */
+int ol_cfg_netbuf_frags_max(ol_pdev_handle pdev);
+
+/**
+ * @brief For HL systems, specify when to free tx frames.
+ * @details
+ *  In LL systems, the host's tx frame is referenced by the MAC DMA, and
+ *  thus cannot be freed until the target indicates that it is finished
+ *  transmitting the frame.
+ *  In HL systems, the entire tx frame is downloaded to the target.
+ *  Consequently, the target has its own copy of the tx frame, and the
+ *  host can free the tx frame as soon as the download completes.
+ *  Alternatively, the HL host can keep the frame allocated until the
+ *  target explicitly tells the HL host it is done transmitting the frame.
+ *  This gives the target the option of discarding its copy of the tx
+ *  frame, and then later getting a new copy from the host.
+ *  This function tells the host whether it should retain its copy of the
+ *  transmit frames until the target explicitly indicates it is finished
+ *  transmitting them, or if it should free its copy as soon as the
+ *  tx frame is downloaded to the target.
+ *
+ * @param pdev - handle to the physical device
+ * @return
+ *      0 -> retain the tx frame until the target indicates it is done
+ *          transmitting the frame
+ *      -OR-
+ *      1 -> free the tx frame as soon as the download completes
+ */
+int ol_cfg_tx_free_at_download(ol_pdev_handle pdev);
+
+/**
+ * @brief Low water mark for target tx credit.
+ * Tx completion handler is invoked to reap the buffers when the target tx
+ * credit goes below Low Water Mark.
+ */
+#define OL_CFG_NUM_MSDU_REAP 512
+#define ol_cfg_tx_credit_lwm(pdev)					       \
+	((CFG_TGT_NUM_MSDU_DESC >  OL_CFG_NUM_MSDU_REAP) ?		       \
+	 (CFG_TGT_NUM_MSDU_DESC -  OL_CFG_NUM_MSDU_REAP) : 0)
+
+/**
+ * @brief In a HL system, specify the target initial credit count.
+ * @details
+ *  The HL host tx data SW includes a module for determining which tx frames
+ *  to download to the target at a given time.
+ *  To make this judgement, the HL tx download scheduler has to know
+ *  how many buffers the HL target has available to hold tx frames.
+ *  Due to the possibility that a single target buffer pool can be shared
+ *  between rx and tx frames, the host may not be able to obtain a precise
+ *  specification of the tx buffer space available in the target, but it
+ *  uses the best estimate, as provided by this configuration function,
+ *  to determine how best to schedule the tx frame downloads.
+ *
+ * @param pdev - handle to the physical device
+ * @return the number of tx buffers available in a HL target
+ */
+uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the LL tx MSDU header download size.
+ * @details
+ *  In LL systems, determine how many bytes from a tx frame to download,
+ *  in order to provide the target FW's Descriptor Engine with enough of
+ *  the packet's payload to interpret what kind of traffic this is,
+ *  and who it is for.
+ *  This download size specification does not include the 802.3 / 802.11
+ *  frame encapsulation headers; it starts with the encapsulated IP packet
+ *  (or whatever ethertype is carried within the ethernet-ish frame).
+ *  The LL host data SW will determine how many bytes of the MSDU header to
+ *  download by adding this download size specification to the size of the
+ *  frame header format specified by the ol_cfg_frame_type configuration
+ *  function.
+ *
+ * @param pdev - handle to the physical device
+ * @return the number of bytes beyond the 802.3 or native WiFi header to
+ *      download to the target for tx classification
+ */
+int ol_cfg_tx_download_size(ol_pdev_handle pdev);
+
+/**
+ * brief Specify where defrag timeout and duplicate detection is handled
+ * @details
+ *   non-aggregate duplicate detection and timing out stale fragments
+ *   requires additional target memory. To reach max client
+ *   configurations (128+), non-aggregate duplicate detection and the
+ *   logic to time out stale fragments is moved to the host.
+ *
+ * @param pdev - handle to the physical device
+ * @return
+ *  0 -> target is responsible non-aggregate duplicate detection and
+ *          timing out stale fragments.
+ *
+ *  1 -> host is responsible non-aggregate duplicate detection and
+ *          timing out stale fragments.
+ */
+int ol_cfg_rx_host_defrag_timeout_duplicate_check(ol_pdev_handle pdev);
+
+/**
+ * brief Query for the period in ms used for throttling for
+ * thermal mitigation
+ * @details
+ *   In LL systems, transmit data throttling is used for thermal
+ *   mitigation where data is paused and resumed during the
+ *   throttle period i.e. the throttle period consists of an
+ *   "on" phase when transmit is allowed and an "off" phase when
+ *   transmit is suspended. This function returns the total
+ *   period used for throttling.
+ *
+ * @param pdev - handle to the physical device
+ * @return the total throttle period in ms
+ */
+int ol_cfg_throttle_period_ms(ol_pdev_handle pdev);
+
+/**
+ * brief Check whether full reorder offload is
+ * enabled/disable by the host
+ * @details
+ *   If the host does not support receive reorder (i.e. the
+ *   target performs full receive re-ordering) this will return
+ *   "enabled"
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 - enable, 0 - disable
+ */
+int ol_cfg_is_full_reorder_offload(ol_pdev_handle pdev);
+
+int ol_cfg_is_rx_thread_enabled(ol_pdev_handle pdev);
+
+/**
+ * ol_cfg_is_ip_tcp_udp_checksum_offload_enabled() - return
+ *                        ip_tcp_udp_checksum_offload is enable/disable
+ * @pdev : handle to the physical device
+ *
+ * Return: 1 - enable, 0 - disable
+ */
+static inline
+int ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(ol_pdev_handle pdev)
+{
+	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+	return cfg->ip_tcp_udp_checksum_offload;
+}
+
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+int ol_cfg_get_tx_flow_stop_queue_th(ol_pdev_handle pdev);
+
+int ol_cfg_get_tx_flow_start_queue_offset(ol_pdev_handle pdev);
+#endif
+
+bool ol_cfg_is_ce_classify_enabled(ol_pdev_handle pdev);
+
+enum wlan_target_fmt_translation_caps {
+	wlan_frm_tran_cap_raw = 0x01,
+	wlan_frm_tran_cap_native_wifi = 0x02,
+	wlan_frm_tran_cap_8023 = 0x04,
+};
+
+/**
+ * @brief Specify the maximum header size added by SW tx encapsulation
+ * @details
+ *  This function returns the maximum size of the new L2 header, not the
+ *  difference between the new and old L2 headers.
+ *  Thus, this function returns the maximum 802.11 header size that the
+ *  tx SW may need to add to tx data frames.
+ *
+ * @param pdev - handle to the physical device
+ */
+static inline int ol_cfg_sw_encap_hdr_max_size(ol_pdev_handle pdev)
+{
+	/*
+	 *  24 byte basic 802.11 header
+	 * + 6 byte 4th addr
+	 * + 2 byte QoS control
+	 * + 4 byte HT control
+	 * + 8 byte LLC/SNAP
+	 */
+	return sizeof(struct ieee80211_qosframe_htc_addr4) + LLC_SNAP_HDR_LEN;
+}
+
+static inline uint8_t ol_cfg_tx_encap(ol_pdev_handle pdev)
+{
+	/* tx encap done in HW */
+	return 0;
+}
+
+static inline int ol_cfg_host_addba(ol_pdev_handle pdev)
+{
+	/*
+	 * ADDBA negotiation is handled by the target FW for Peregrine + Rome.
+	 */
+	return 0;
+}
+
+/**
+ * @brief If the host SW's ADDBA negotiation fails, should it be retried?
+ *
+ * @param pdev - handle to the physical device
+ */
+static inline int ol_cfg_addba_retry(ol_pdev_handle pdev)
+{
+	return 0;               /* disabled for now */
+}
+
+/**
+ * @brief How many frames to hold in a paused vdev's tx queue in LL systems
+ */
+static inline int ol_tx_cfg_max_tx_queue_depth_ll(ol_pdev_handle pdev)
+{
+	/*
+	 * Store up to 1500 frames for a paused vdev.
+	 * For example, if the vdev is sending 300 Mbps of traffic, and the
+	 * PHY is capable of 600 Mbps, then it will take 56 ms for the PHY to
+	 * drain both the 700 frames that are queued initially, plus the next
+	 * 700 frames that come in while the PHY is catching up.
+	 * So in this example scenario, the PHY will remain fully utilized
+	 * in a MCC system that has a channel-switching period of 56 ms or less.
+	 * 700 frames calculation was correct when FW drain packet without
+	 * any overhead. Actual situation drain overhead will slowdown drain
+	 * speed. And channel period is less than 56 msec
+	 * Worst scenario, 1500 frames should be stored in host.
+	 */
+	return 1500;
+}
+
+/**
+ * @brief Set packet log config in HTT config based on CFG ini configuration
+ */
+void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val);
+
+/**
+ * @brief Get packet log config from HTT config
+ */
+uint8_t ol_cfg_is_packet_log_enabled(ol_pdev_handle pdev);
+
+#ifdef IPA_OFFLOAD
+/**
+ * @brief IPA micro controller data path offload enable or not
+ * @detail
+ *  This function returns IPA micro controller data path offload
+ *  feature enabled or not
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ *  This function returns IPA micro controller data path offload
+ *  TX buffer size which should be pre-allocated by driver.
+ *  Default buffer size is 2K
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ *  This function returns IPA micro controller data path offload
+ *  TX buffer count which should be pre-allocated by driver.
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ *  This function returns IPA micro controller data path offload
+ *  RX indication ring size which will notified by WLAN FW to IPA
+ *  micro controller
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev);
+#else
+static inline unsigned int ol_cfg_ipa_uc_offload_enabled(
+	ol_pdev_handle pdev)
+{
+	return 0;
+}
+
+static inline unsigned int ol_cfg_ipa_uc_tx_buf_size(
+	ol_pdev_handle pdev)
+{
+	return 0;
+}
+
+static inline unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(
+	ol_pdev_handle pdev)
+{
+	return 0;
+}
+
+static inline unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(
+	ol_pdev_handle pdev)
+{
+	return 0;
+}
+
+static inline unsigned int ol_cfg_ipa_uc_tx_partition_base(
+	ol_pdev_handle pdev)
+{
+	return 0;
+}
+#endif /* IPA_OFFLOAD */
+#endif /* _OL_CFG__H_ */

+ 43 - 0
core/dp/ol/inc/ol_ctrl_addba_api.h

@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_CTRL_ADDBA_API_H_
+#define _OL_CTRL_ADDBA_API_H_
+#define ol_ctrl_addba_attach(a, b, c, d, e)             0
+#define ol_ctrl_addba_detach(a)                         0
+#define ol_ctrl_addba_init(a, b, c, d, e)               0
+#define ol_ctrl_addba_cleanup(a)                        0
+#define ol_ctrl_addba_request_setup(a, b, c, d, e, f)   0
+#define ol_ctrl_addba_response_setup(a, b, c, d, e, f)  0
+#define ol_ctrl_addba_request_process(a, b, c, d, e)    0
+#define ol_ctrl_addba_response_process(a, b, c, d)      0
+#define ol_ctrl_addba_clear(a)                          0
+#define ol_ctrl_delba_process(a, b, c)                  0
+#define ol_ctrl_addba_get_status(a, b)                  0
+#define ol_ctrl_addba_set_response(a, b, c)             0
+#define ol_ctrl_addba_clear_response(a)                 0
+#endif

+ 44 - 0
core/dp/ol/inc/ol_ctrl_api.h

@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_ctrl_api.h
+ * @brief Definitions used in multiple external interfaces to the control SW.
+ */
+#ifndef _OL_CTRL_API__H_
+#define _OL_CTRL_API__H_
+
+struct ol_pdev_t;
+typedef struct ol_pdev_t *ol_pdev_handle;
+
+struct ol_vdev_t;
+typedef struct ol_vdev_t *ol_vdev_handle;
+
+struct ol_peer_t;
+typedef struct ol_peer_t *ol_peer_handle;
+
+#endif /* _OL_CTRL_API__H_ */

+ 47 - 0
core/dp/ol/inc/ol_defines.h

@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Offload specific Opaque Data types.
+ */
+#ifndef _DEV_OL_DEFINES_H
+#define _DEV_OL_DEFINES_H
+
+/**
+ * @brief Opaque handle of wmi structure
+ */
+struct wmi_unified;
+typedef struct wmi_unified *wmi_unified_t;
+
+typedef void *ol_scn_t;
+/**
+ * @wmi_event_handler function prototype
+ */
+typedef int (*wmi_unified_event_handler)(ol_scn_t scn_handle,
+					 uint8_t *event_buf, uint32_t len);
+
+#endif /* _DEV_OL_DEFINES_H */

+ 353 - 0
core/dp/ol/inc/ol_htt_api.h

@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_htt_api.h
+ * @brief Specify the general HTT API functions called by the host data SW.
+ * @details
+ *  This file declares the HTT API functions that are not specific to
+ *  either tx nor rx.
+ */
+#ifndef _OL_HTT_API__H_
+#define _OL_HTT_API__H_
+
+#include <cdf_types.h>          /* cdf_device_t */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t */
+#include <athdefs.h>            /* A_STATUS */
+#include <htc_api.h>            /* HTC_HANDLE */
+#include <ol_ctrl_api.h>        /* ol_pdev_handle */
+#include <ol_txrx_api.h>        /* ol_txrx_pdev_handle */
+#include "htt.h"                /* htt_dbg_stats_type, etc. */
+
+/* TID */
+#define OL_HTT_TID_NON_QOS_UNICAST     16
+#define OL_HTT_TID_NON_QOS_MCAST_BCAST 18
+
+struct htt_pdev_t;
+typedef struct htt_pdev_t *htt_pdev_handle;
+
+htt_pdev_handle
+htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
+	ol_pdev_handle ctrl_pdev,
+	HTC_HANDLE htc_pdev, cdf_device_t osdev);
+
+/**
+ * @brief Allocate and initialize a HTT instance.
+ * @details
+ *  This function allocates and initializes an HTT instance.
+ *  This involves allocating a pool of HTT tx descriptors in
+ *  consistent memory, allocating and filling a rx ring (LL only),
+ *  and connecting the HTC's HTT_DATA_MSG service.
+ *  The HTC service connect call will block, so this function
+ *  needs to be called in passive context.
+ *  Because HTC setup has not been completed at the time this function
+ *  is called, this function cannot send any HTC messages to the target.
+ *  Messages to configure the target are instead sent in the
+ *  htc_attach_target function.
+ *
+ * @param pdev - data SW's physical device handle
+ *      (used as context pointer during HTT -> txrx calls)
+ * @param desc_pool_size - number of HTT descriptors to (pre)allocate
+ * @return success -> HTT pdev handle; failure -> NULL
+ */
+int
+htt_attach(struct htt_pdev_t *pdev, int desc_pool_size);
+
+/**
+ * @brief Send HTT configuration messages to the target.
+ * @details
+ *  For LL only, this function sends a rx ring configuration message to the
+ *  target.  For HL, this function is a no-op.
+ *
+ * @param htt_pdev - handle to the HTT instance being initialized
+ */
+A_STATUS htt_attach_target(htt_pdev_handle htt_pdev);
+
+/**
+ * enum htt_op_mode - Virtual device operation mode
+ *
+ * @htt_op_mode_unknown: Unknown mode
+ * @htt_op_mode_ap: AP mode
+ * @htt_op_mode_ibss: IBSS mode
+ * @htt_op_mode_sta: STA (client) mode
+ * @htt_op_mode_monitor: Monitor mode
+ * @htt_op_mode_ocb: OCB mode
+ */
+enum htt_op_mode {
+	htt_op_mode_unknown,
+	htt_op_mode_ap,
+	htt_op_mode_ibss,
+	htt_op_mode_sta,
+	htt_op_mode_monitor,
+	htt_op_mode_ocb,
+};
+
+/* no-ops */
+#define htt_vdev_attach(htt_pdev, vdev_id, op_mode)
+#define htt_vdev_detach(htt_pdev, vdev_id)
+#define htt_peer_qos_update(htt_pdev, peer_id, qos_capable)
+#define htt_peer_uapsdmask_update(htt_pdev, peer_id, uapsd_mask)
+
+void htt_pdev_free(htt_pdev_handle pdev);
+
+/**
+ * @brief Deallocate a HTT instance.
+ *
+ * @param htt_pdev - handle to the HTT instance being torn down
+ */
+void htt_detach(htt_pdev_handle htt_pdev);
+
+/**
+ * @brief Stop the communication between HTT and target
+ * @details
+ *  For ISOC solution, this function stop the communication between HTT and
+ *  target.
+ *  For Peregrine/Rome, it's already stopped by ol_ath_disconnect_htc
+ *  before ol_txrx_pdev_detach called in ol_ath_detach. So this function is
+ *  a no-op.
+ *  Peregrine/Rome HTT layer is on top of HTC while ISOC solution HTT layer is
+ *  on top of DXE layer.
+ *
+ * @param htt_pdev - handle to the HTT instance being initialized
+ */
+void htt_detach_target(htt_pdev_handle htt_pdev);
+
+/*
+ * @brief Tell the target side of HTT to suspend H2T processing until synced
+ * @param htt_pdev - the host HTT object
+ * @param sync_cnt - what sync count value the target HTT FW should wait for
+ *      before resuming H2T processing
+ */
+A_STATUS htt_h2t_sync_msg(htt_pdev_handle htt_pdev, uint8_t sync_cnt);
+
+int
+htt_h2t_aggr_cfg_msg(htt_pdev_handle htt_pdev,
+		     int max_subfrms_ampdu, int max_subfrms_amsdu);
+
+/**
+ * @brief Get the FW status
+ * @details
+ *  Trigger FW HTT to retrieve FW status.
+ *  A separate HTT message will come back with the statistics we want.
+ *
+ * @param pdev - handle to the HTT instance
+ * @param stats_type_upload_mask - bitmask identifying which stats to upload
+ * @param stats_type_reset_mask - bitmask identifying which stats to reset
+ * @param cookie - unique value to distinguish and identify stats requests
+ * @return 0 - succeed to send the request to FW; otherwise, failed to do so.
+ */
+int
+htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
+		      uint32_t stats_type_upload_mask,
+		      uint32_t stats_type_reset_mask,
+		      uint8_t cfg_stats_type,
+		      uint32_t cfg_val, uint64_t cookie);
+
+/**
+ * @brief Get the fields from HTT T2H stats upload message's stats info header
+ * @details
+ *  Parse the a HTT T2H message's stats info tag-length-value header,
+ *  to obtain the stats type, status, data lenght, and data address.
+ *
+ * @param stats_info_list - address of stats record's header
+ * @param[out] type - which type of FW stats are contained in the record
+ * @param[out] status - whether the stats are (fully) present in the record
+ * @param[out] length - how large the data portion of the stats record is
+ * @param[out] stats_data - where the data portion of the stats record is
+ */
+void
+htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
+			    enum htt_dbg_stats_type *type,
+			    enum htt_dbg_stats_status *status,
+			    int *length, uint8_t **stats_data);
+
+/**
+ * @brief Display a stats record from the HTT T2H STATS_CONF message.
+ * @details
+ *  Parse the stats type and status, and invoke a type-specified printout
+ *  to display the stats values.
+ *
+ *  @param stats_data - buffer holding the stats record from the STATS_CONF msg
+ *  @param concise - whether to do a verbose or concise printout
+ */
+void htt_t2h_stats_print(uint8_t *stats_data, int concise);
+
+#ifndef HTT_DEBUG_LEVEL
+#if defined(DEBUG)
+#define HTT_DEBUG_LEVEL 10
+#else
+#define HTT_DEBUG_LEVEL 0
+#endif
+#endif
+
+#if HTT_DEBUG_LEVEL > 5
+void htt_display(htt_pdev_handle pdev, int indent);
+#else
+#define htt_display(pdev, indent)
+#endif
+
+#define HTT_DXE_RX_LOG 0
+#define htt_rx_reorder_log_print(pdev)
+
+#ifdef IPA_OFFLOAD
+/**
+ * @brief send IPA UC resource config message to firmware with HTT message
+ * @details
+ *  send IPA UC resource config message to firmware with HTT message
+ *
+ * @param pdev - handle to the HTT instance
+ */
+int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev);
+
+/**
+ * @brief Client request resource information
+ * @details
+ *  OL client will reuqest IPA UC related resource information
+ *  Resource information will be distributted to IPA module
+ *  All of the required resources should be pre-allocated
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ce_sr_base_paddr - copy engine source ring base physical address
+ * @param ce_sr_ring_size - copy engine source ring size
+ * @param ce_reg_paddr - copy engine register physical address
+ * @param tx_comp_ring_base_paddr - tx comp ring base physical address
+ * @param tx_comp_ring_size - tx comp ring size
+ * @param tx_num_alloc_buffer - number of allocated tx buffer
+ * @param rx_rdy_ring_base_paddr - rx ready ring base physical address
+ * @param rx_rdy_ring_size - rx ready ring size
+ * @param rx_proc_done_idx_paddr - rx process done index physical address
+ */
+int
+htt_ipa_uc_get_resource(htt_pdev_handle pdev,
+			uint32_t *ce_sr_base_paddr,
+			uint32_t *ce_sr_ring_size,
+			cdf_dma_addr_t *ce_reg_paddr,
+			uint32_t *tx_comp_ring_base_paddr,
+			uint32_t *tx_comp_ring_size,
+			uint32_t *tx_num_alloc_buffer,
+			uint32_t *rx_rdy_ring_base_paddr,
+			uint32_t *rx_rdy_ring_size,
+			uint32_t *rx_proc_done_idx_paddr);
+
+/**
+ * @brief Client set IPA UC doorbell register
+ * @details
+ *  IPA UC let know doorbell register physical address
+ *  WLAN firmware will use this physical address to notify IPA UC
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ipa_uc_tx_doorbell_paddr - tx comp doorbell physical address
+ * @param ipa_uc_rx_doorbell_paddr - rx ready doorbell physical address
+ */
+int
+htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
+			      uint32_t ipa_uc_tx_doorbell_paddr,
+			      uint32_t ipa_uc_rx_doorbell_paddr);
+
+/**
+ * @brief Client notify IPA UC data path active or not
+ *
+ * @param pdev - handle to the HTT instance
+ * @param uc_active - UC data path is active or not
+ * @param is_tx - UC TX is active or not
+ */
+int
+htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev, bool uc_active, bool is_tx);
+
+/**
+ * @brief query uc data path stats
+ *
+ * @param pdev - handle to the HTT instance
+ */
+int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev);
+
+/**
+ * @brief Attach IPA UC data path
+ *
+ * @param pdev - handle to the HTT instance
+ */
+int htt_ipa_uc_attach(struct htt_pdev_t *pdev);
+
+/**
+ * @brief detach IPA UC data path
+ *
+ * @param pdev - handle to the HTT instance
+ */
+void htt_ipa_uc_detach(struct htt_pdev_t *pdev);
+#else
+static inline int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
+{
+	return 0;
+}
+
+static inline int
+htt_ipa_uc_get_resource(htt_pdev_handle pdev,
+			uint32_t *ce_sr_base_paddr,
+			uint32_t *ce_sr_ring_size,
+			cdf_dma_addr_t *ce_reg_paddr,
+			uint32_t *tx_comp_ring_base_paddr,
+			uint32_t *tx_comp_ring_size,
+			uint32_t *tx_num_alloc_buffer,
+			uint32_t *rx_rdy_ring_base_paddr,
+			uint32_t *rx_rdy_ring_size,
+			uint32_t *rx_proc_done_idx_paddr)
+{
+	return 0;
+}
+
+static inline int
+htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
+			      uint32_t ipa_uc_tx_doorbell_paddr,
+			      uint32_t ipa_uc_rx_doorbell_paddr)
+{
+	return 0;
+}
+
+static inline int
+htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev, bool uc_active,
+	bool is_tx)
+{
+	return 0;
+}
+
+static inline int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev)
+{
+	return 0;
+}
+
+static inline int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
+{
+	return 0;
+}
+
+static inline void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+	return;
+}
+#endif /* IPA_OFFLOAD */
+
+#endif /* _OL_HTT_API__H_ */

+ 863 - 0
core/dp/ol/inc/ol_htt_rx_api.h

@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_htt_rx_api.h
+ * @brief Specify the rx HTT API functions called by the host data SW.
+ * @details
+ *  This file declares the HTT API functions that are specifically
+ *  related to receive processing.
+ *  In particular, this file specifies methods of the abstract HTT rx
+ *  descriptor, and functions to iterate though a series of rx descriptors
+ *  and rx MSDU buffers.
+ */
+#ifndef _OL_HTT_RX_API__H_
+#define _OL_HTT_RX_API__H_
+
+/* #include <osapi_linux.h>     / * uint16_t, etc. * / */
+#include <osdep.h>              /* uint16_t, etc. */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t */
+#include <cdf_types.h>          /* bool */
+
+#include <htt.h>                /* HTT_RX_IND_MPDU_STATUS */
+#include <ol_htt_api.h>         /* htt_pdev_handle */
+
+#include <cds_ieee80211_defines.h>  /* ieee80211_rx_status */
+#include <ol_vowext_dbg_defs.h>
+
+/*================ constants and types used in the rx API ===================*/
+
+#define HTT_RSSI_INVALID 0x7fff
+
+/**
+ * struct ocb_rx_stats_hdr_t - RX stats header
+ * @version:		The version must be 1.
+ * @length:		The length of this structure
+ * @channel_freq:	The center frequency for the packet
+ * @rssi_cmb:		combined RSSI from all chains
+ * @rssi[4]:		rssi for chains 0 through 3 (for 20 MHz bandwidth)
+ * @tsf32:		timestamp in TSF units
+ * @timestamp_microsec:	timestamp in microseconds
+ * @datarate:		MCS index
+ * @timestamp_submicrosec: submicrosecond portion of the timestamp
+ * @ext_tid:		Extended TID
+ * @reserved:		Ensure the size of the structure is a multiple of 4.
+ *			Must be 0.
+ *
+ * When receiving an OCB packet, the RX stats is sent to the user application
+ * so that the user application can do processing based on the RX stats.
+ * This structure will be preceded by an ethernet header with
+ * the proto field set to 0x8152. This struct includes various RX
+ * paramaters including RSSI, data rate, and center frequency.
+ */
+PREPACK struct ocb_rx_stats_hdr_t {
+	uint16_t version;
+	uint16_t length;
+	uint16_t channel_freq;
+	int16_t rssi_cmb;
+	int16_t rssi[4];
+	uint32_t tsf32;
+	uint32_t timestamp_microsec;
+	uint8_t datarate;
+	uint8_t timestamp_submicrosec;
+	uint8_t ext_tid;
+	uint8_t reserved;
+};
+
+/*================ rx indication message field access methods ===============*/
+
+/**
+ * @brief Check if a rx indication message has a rx reorder flush command.
+ * @details
+ *  Space is reserved in each rx indication message for a rx reorder flush
+ *  command, to release specified MPDUs from the rx reorder holding array
+ *  before processing the new MPDUs referenced by the rx indication message.
+ *  This rx reorder flush command contains a flag to show whether the command
+ *  is valid within a given rx indication message.
+ *  This function checks the validity flag from the rx indication
+ *  flush command IE within the rx indication message.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @return
+ *      1 - the message's rx flush command is valid and should be processed
+ *          before processing new rx MPDUs,
+ *      -OR-
+ *      0 - the message's rx flush command is invalid and should be ignored
+ */
+int htt_rx_ind_flush(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+/**
+ * @brief Return the sequence number starting the range of MPDUs to flush.
+ * @details
+ *  Read the fields of the rx indication message that identify the start
+ *  and end of the range of MPDUs to flush from the rx reorder holding array
+ *  and send on to subsequent stages of rx processing.
+ *  These sequence numbers are the 6 LSBs of the 12-bit 802.11 sequence
+ *  number.  These sequence numbers are masked with the block ack window size,
+ *  rounded up to a power of two (minus one, to create a bitmask) to obtain
+ *  the corresponding index into the rx reorder holding array.
+ *  The series of MPDUs to flush includes the one specified by the start
+ *  sequence number.
+ *  The series of MPDUs to flush excludes the one specified by the end
+ *  sequence number; the MPDUs up to but not including the end sequence number
+ *  are to be flushed.
+ *  These start and end seq num fields are only valid if the "flush valid"
+ *  flag is set.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @param seq_num_start - (call-by-reference output) sequence number
+ *      for the start of the range of MPDUs to flush
+ * @param seq_num_end - (call-by-reference output) sequence number
+ *      for the end of the range of MPDUs to flush
+ */
+void
+htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
+			       cdf_nbuf_t rx_ind_msg,
+			       unsigned *seq_num_start, unsigned *seq_num_end);
+
+/**
+ * @brief Check if a rx indication message has a rx reorder release command.
+ * @details
+ *  Space is reserved in each rx indication message for a rx reorder release
+ *  command, to release specified MPDUs from the rx reorder holding array
+ *  after processing the new MPDUs referenced by the rx indication message.
+ *  This rx reorder release command contains a flag to show whether the command
+ *  is valid within a given rx indication message.
+ *  This function checks the validity flag from the rx indication
+ *  release command IE within the rx indication message.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @return
+ *      1 - the message's rx release command is valid and should be processed
+ *          after processing new rx MPDUs,
+ *      -OR-
+ *      0 - the message's rx release command is invalid and should be ignored
+ */
+int htt_rx_ind_release(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+/**
+ * @brief Return the sequence number starting the range of MPDUs to release.
+ * @details
+ *  Read the fields of the rx indication message that identify the start
+ *  and end of the range of MPDUs to release from the rx reorder holding
+ *  array and send on to subsequent stages of rx processing.
+ *  These sequence numbers are the 6 LSBs of the 12-bit 802.11 sequence
+ *  number.  These sequence numbers are masked with the block ack window size,
+ *  rounded up to a power of two (minus one, to create a bitmask) to obtain
+ *  the corresponding index into the rx reorder holding array.
+ *  The series of MPDUs to release includes the one specified by the start
+ *  sequence number.
+ *  The series of MPDUs to release excludes the one specified by the end
+ *  sequence number; the MPDUs up to but not including the end sequence number
+ *  are to be released.
+ *  These start and end seq num fields are only valid if the "release valid"
+ *  flag is set.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @param seq_num_start - (call-by-reference output) sequence number
+ *        for the start of the range of MPDUs to release
+ * @param seq_num_end - (call-by-reference output) sequence number
+ *        for the end of the range of MPDUs to release
+ */
+void
+htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
+				 cdf_nbuf_t rx_ind_msg,
+				 unsigned *seq_num_start,
+				 unsigned *seq_num_end);
+
+/*
+ * For now, the host HTT -> host data rx status enum
+ * exactly matches the target HTT -> host HTT rx status enum;
+ * no translation is required.
+ * However, the host data SW should only use the htt_rx_status,
+ * so that in the future a translation from target HTT rx status
+ * to host HTT rx status can be added, if the need ever arises.
+ */
+enum htt_rx_status {
+	htt_rx_status_unknown = HTT_RX_IND_MPDU_STATUS_UNKNOWN,
+	htt_rx_status_ok = HTT_RX_IND_MPDU_STATUS_OK,
+	htt_rx_status_err_fcs = HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+	htt_rx_status_err_dup = HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+	htt_rx_status_err_replay = HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+	htt_rx_status_err_inv_peer = HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+	htt_rx_status_ctrl_mgmt_null = HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+	htt_rx_status_tkip_mic_err = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+
+	htt_rx_status_err_misc = HTT_RX_IND_MPDU_STATUS_ERR_MISC
+};
+
+/**
+ * @brief Check the status MPDU range referenced by a rx indication message.
+ * @details
+ *  Check the status of a range of MPDUs referenced by a rx indication message.
+ *  This status determines whether the MPDUs should be processed or discarded.
+ *  If the status is OK, then the MPDUs within the range should be processed
+ *  as usual.
+ *  Otherwise (FCS error, duplicate error, replay error, unknown sender error,
+ *  etc.) the MPDUs within the range should be discarded.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @param mpdu_range_num - which MPDU range within the rx ind msg to check,
+ *        starting from 0
+ * @param status - (call-by-reference output) MPDU status
+ * @param mpdu_count - (call-by-reference output) count of MPDUs comprising
+ *        the specified MPDU range
+ */
+void
+htt_rx_ind_mpdu_range_info(htt_pdev_handle pdev,
+			   cdf_nbuf_t rx_ind_msg,
+			   int mpdu_range_num,
+			   enum htt_rx_status *status, int *mpdu_count);
+
+/**
+ * @brief Return the RSSI provided in a rx indication message.
+ * @details
+ *  Return the RSSI from an rx indication message, converted to dBm units.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @return RSSI in dBm, or HTT_INVALID_RSSI
+ */
+int16_t
+htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+int16_t
+htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+			  int8_t chain);
+
+void
+htt_rx_ind_legacy_rate(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+		       uint8_t *legacy_rate, uint8_t *legacy_rate_sel);
+
+
+void
+htt_rx_ind_timestamp(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+		     uint32_t *timestamp_microsec,
+		     uint8_t *timestamp_submicrosec);
+
+uint32_t
+htt_rx_ind_tsf32(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+uint8_t
+htt_rx_ind_ext_tid(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+
+/*==================== rx MPDU descriptor access methods ====================*/
+
+/**
+ * @brief Check if the retry bit is set in Rx-descriptor
+ * @details
+ * This function returns the retry bit of the 802.11 header for the
+ *  provided rx MPDU descriptor.
+ *
+ * @param pdev - the handle of the physical device the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return boolean -- true if retry is set, false otherwise
+ */
+extern
+bool (*htt_rx_mpdu_desc_retry)(
+		htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return a rx MPDU's sequence number.
+ * @details
+ *  This function returns the LSBs of the 802.11 sequence number for the
+ *  provided rx MPDU descriptor.
+ *  Depending on the system, 6-12 LSBs from the 802.11 sequence number are
+ *  returned.  (Typically, either the 8 or 12 LSBs are returned.)
+ *  This sequence number is masked with the block ack window size,
+ *  rounded up to a power of two (minus one, to create a bitmask) to obtain
+ *  the corresponding index into the rx reorder holding array.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return the LSBs of the sequence number for the MPDU
+ */
+extern uint16_t
+(*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return a rx MPDU's rx reorder array index, based on sequence number.
+ * @details
+ *  This function returns a sequence-number based index into the rx
+ *  reorder array for the specified MPDU.
+ *  In some systems, this rx reorder array is simply the LSBs of the
+ *  sequence number, or possibly even the full sequence number.
+ *  To support such systems, the returned index has to be masked with
+ *  the power-of-two array size before using the value to index the
+ *  rx reorder array.
+ *  In other systems, this rx reorder array index is
+ *      (sequence number) % (block ack window size)
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return the rx reorder array index the MPDU goes into
+ */
+/* use sequence number (or LSBs thereof) as rx reorder array index */
+#define htt_rx_mpdu_desc_reorder_idx htt_rx_mpdu_desc_seq_num
+
+union htt_rx_pn_t {
+	/* WEP: 24-bit PN */
+	uint32_t pn24;
+
+	/* TKIP or CCMP: 48-bit PN */
+	uint64_t pn48;
+
+	/* WAPI: 128-bit PN */
+	uint64_t pn128[2];
+};
+
+/**
+ * @brief Find the packet number (PN) for a MPDU.
+ * @details
+ *  This function only applies when the rx PN check is configured to be
+ *  performed in the host rather than the target, and on peers using a
+ *  security type for which a PN check applies.
+ *  The pn_len_bits argument is used to determine which element of the
+ *  htt_rx_pn_t union to deposit the PN value read from the MPDU descriptor
+ *  into.
+ *  A 24-bit PN is deposited into pn->pn24.
+ *  A 48-bit PN is deposited into pn->pn48.
+ *  A 128-bit PN is deposited in little-endian order into pn->pn128.
+ *  Specifically, bits 63:0 of the PN are copied into pn->pn128[0], while
+ *  bits 127:64 of the PN are copied into pn->pn128[1].
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @param pn - the location to copy the packet number into
+ * @param pn_len_bits - the PN size, in bits
+ */
+extern void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
+				   void *mpdu_desc,
+				   union htt_rx_pn_t *pn, int pn_len_bits);
+
+/**
+ * @brief This function Returns the TID value from the Rx descriptor
+ *                             for Low Latency driver
+ * @details
+ *  This function returns the TID set in the 802.11 QoS Control for the MPDU
+ *  in the packet header, by looking at the mpdu_start of the Rx descriptor.
+ *  Rx descriptor gets a copy of the TID from the MAC.
+ * @pdev:  Handle (pointer) to HTT pdev.
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return:        Actual TID set in the packet header.
+ */
+extern
+uint8_t (*htt_rx_mpdu_desc_tid)(
+			htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return the TSF timestamp indicating when a MPDU was received.
+ * @details
+ *  This function provides the timestamp indicating when the PPDU that
+ *  the specified MPDU belongs to was received.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return 32 LSBs of TSF time at which the MPDU's PPDU was received
+ */
+uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return the 802.11 header of the MPDU
+ * @details
+ *  This function provides a pointer to the start of the 802.11 header
+ *  of the Rx MPDU
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return pointer to 802.11 header of the received MPDU
+ */
+char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return the RSSI provided in a rx descriptor.
+ * @details
+ *  Return the RSSI from a rx descriptor, converted to dBm units.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return RSSI in dBm, or HTT_INVALID_RSSI
+ */
+int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc);
+
+/*==================== rx MSDU descriptor access methods ====================*/
+
+/**
+ * @brief Check if a MSDU completes a MPDU.
+ * @details
+ *  When A-MSDU aggregation is used, a single MPDU will consist of
+ *  multiple MSDUs.  This function checks a MSDU's rx descriptor to
+ *  see whether the MSDU is the final MSDU within a MPDU.
+ *
+ * @param pdev - the handle of the physical device the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ *      0 - there are subsequent MSDUs within the A-MSDU / MPDU
+ *      -OR-
+ *      1 - this is the last MSDU within its MPDU
+ */
+extern bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev,
+					       void *msdu_desc);
+
+/**
+ * @brief Check if a MSDU is first msdu of MPDU.
+ * @details
+ *  When A-MSDU aggregation is used, a single MPDU will consist of
+ *  multiple MSDUs.  This function checks a MSDU's rx descriptor to
+ *  see whether the MSDU is the first MSDU within a MPDU.
+ *
+ * @param pdev - the handle of the physical device the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ *      0 - this is interior MSDU in the A-MSDU / MPDU
+ *      -OR-
+ *      1 - this is the first MSDU within its MPDU
+ */
+extern bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev,
+					   void *msdu_desc);
+
+/**
+ * @brief Retrieve encrypt bit from a mpdu desc.
+ * @details
+ *  Fw will pass all the frame  to the host whether encrypted or not, and will
+ *  indicate the encrypt flag in the desc, this function is to get the info
+ *  and used to make a judge whether should make pn check, because
+ *  non-encrypted frames always get the same pn number 0.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return 0 - the frame was not encrypted
+ *         1 - the frame was encrypted
+ */
+extern bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Indicate whether a rx desc has a WLAN unicast vs. mcast/bcast flag.
+ * @details
+ *  A flag indicating whether a MPDU was delivered over WLAN as unicast or
+ *  multicast/broadcast may be only valid once per MPDU (LL), or within each
+ *  rx descriptor for the MSDUs within the MPDU (HL).  (In practice, it is
+ *  unlikely that A-MSDU aggregation will be used in HL, so typically HL will
+ *  only have one MSDU per MPDU anyway.)
+ *  This function indicates whether the specified rx descriptor contains
+ *  a WLAN ucast vs. mcast/bcast flag.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ *      0 - The rx descriptor does not contain a WLAN ucast vs. mcast flag.
+ *      -OR-
+ *      1 - The rx descriptor has a valid WLAN ucast vs. mcast flag.
+ */
+extern int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev,
+					      void *msdu_desc);
+
+/**
+ * @brief Indicate whether a MSDU was received as unicast or mcast/bcast
+ * @details
+ *  Indicate whether the MPDU that the specified MSDU belonged to was
+ *  delivered over the WLAN as unicast, or as multicast/broadcast.
+ *  This query can only be performed on rx descriptors for which
+ *  htt_rx_msdu_has_wlan_mcast_flag is true.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ *      0 - The MSDU was delivered over the WLAN as unicast.
+ *      -OR-
+ *      1 - The MSDU was delivered over the WLAN as broadcast or multicast.
+ */
+extern bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Indicate whether a MSDU was received as a fragmented frame
+ * @details
+ *  This query can only be performed on LL system.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ *      0 - The MSDU was a non-fragmented frame.
+ *      -OR-
+ *      1 - The MSDU was fragmented frame.
+ */
+extern int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Indicate if a MSDU should be delivered to the OS shim or discarded.
+ * @details
+ *  Indicate whether a MSDU should be discarded or delivered to the OS shim.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ *      0 - The MSDU should be delivered to the OS
+ *      -OR-
+ *      non-zero - The MSDU should not be delivered to the OS.
+ *          If the "forward" flag is set, it should be forwarded to tx.
+ *          Else, it should be discarded.
+ */
+int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Indicate whether a MSDU should be forwarded to tx.
+ * @details
+ *  Indicate whether a MSDU should be forwarded to tx, e.g. for intra-BSS
+ *  STA-to-STA forwarding in an AP, or for multicast echo in an AP.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ *      0 - The MSDU should not be forwarded
+ *      -OR-
+ *      non-zero - The MSDU should be forwarded.
+ *          If the "discard" flag is set, then the original MSDU can be
+ *          directly forwarded into the tx path.
+ *          Else, a copy (clone?) of the rx MSDU needs to be created to
+ *          send to the tx path.
+ */
+int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Indicate whether a MSDU's contents need to be inspected.
+ * @details
+ *  Indicate whether the host data SW needs to examine the contents of the
+ *  received MSDU, and based on the packet type infer what special handling
+ *  to provide for the MSDU.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ *      0 - No inspection + special handling is required.
+ *      -OR-
+ *      non-zero - Inspect the MSDU contents to infer what special handling
+ *          to apply to the MSDU.
+ */
+int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Provide all action specifications for a rx MSDU
+ * @details
+ *  Provide all action specifications together.  This provides the same
+ *  information in a single function call as would be provided by calling
+ *  the functions htt_rx_msdu_discard, htt_rx_msdu_forward, and
+ *  htt_rx_msdu_inspect.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @param[out] discard - 1: discard the MSDU, 0: deliver the MSDU to the OS
+ * @param[out] forward - 1: forward the rx MSDU to tx, 0: no rx->tx forward
+ * @param[out] inspect - 1: process according to MSDU contents, 0: no inspect
+ */
+void
+htt_rx_msdu_actions(htt_pdev_handle pdev,
+		    void *msdu_desc, int *discard, int *forward, int *inspect);
+
+/**
+ * @brief Get the key id sent in IV of the frame
+ * @details
+ *  Provide the key index octet which is taken from IV.
+ *  This is valid only for the first MSDU.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @key_id - Key id octet
+ * @return indication of whether key id access is successful
+ *   true - Success
+ *   false - if this is not first msdu
+ */
+extern bool
+(*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
+			   void *mpdu_desc, uint8_t *key_id);
+
+extern bool
+(*htt_rx_msdu_chan_info_present)(
+	htt_pdev_handle pdev,
+	void *mpdu_desc);
+
+extern bool
+(*htt_rx_msdu_center_freq)(
+	htt_pdev_handle pdev,
+	struct ol_txrx_peer_t *peer,
+	void *mpdu_desc,
+	uint16_t *primary_chan_center_freq_mhz,
+	uint16_t *contig_chan1_center_freq_mhz,
+	uint16_t *contig_chan2_center_freq_mhz,
+	uint8_t *phy_mode);
+
+/*====================== rx MSDU + descriptor delivery ======================*/
+
+/**
+ * @brief Return a linked-list of network buffer holding the next rx A-MSDU.
+ * @details
+ *  In some systems, the rx MSDUs are uploaded along with the rx
+ *  indication message, while in other systems the rx MSDUs are uploaded
+ *  out of band, via MAC DMA.
+ *  This function provides an abstract way to obtain a linked-list of the
+ *  next MSDUs, regardless of whether the MSDU was delivered in-band with
+ *  the rx indication message, or out of band through MAC DMA.
+ *  In a LL system, this function returns a linked list of the one or more
+ *  MSDUs that together comprise an A-MSDU.
+ *  In a HL system, this function returns a degenerate linked list consisting
+ *  of a single MSDU (head_msdu == tail_msdu).
+ *  This function also makes sure each MSDU's rx descriptor can be found
+ *  through the MSDU's network buffer.
+ *  In most systems, this is trivial - a single network buffer stores both
+ *  the MSDU rx descriptor and the MSDU payload.
+ *  In systems where the rx descriptor is in a separate buffer from the
+ *  network buffer holding the MSDU payload, a pointer to the rx descriptor
+ *  has to be stored in the network buffer.
+ *  After this function call, the descriptor for a given MSDU can be
+ *  obtained via the htt_rx_msdu_desc_retrieve function.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @param head_msdu - call-by-reference network buffer handle, which gets set
+ *      in this function to point to the head MSDU of the A-MSDU
+ * @param tail_msdu - call-by-reference network buffer handle, which gets set
+ *      in this function to point to the tail MSDU of the A-MSDU, or the
+ *      same MSDU that the head_msdu points to if only a single MSDU is
+ *      delivered at a time.
+ * @return indication of whether any MSDUs in the AMSDU use chaining:
+ * 0 - no buffer chaining
+ * 1 - buffers are chained
+ */
+extern int
+(*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
+		    cdf_nbuf_t rx_ind_msg,
+		    cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+
+extern int
+(*htt_rx_frag_pop)(htt_pdev_handle pdev,
+		   cdf_nbuf_t rx_ind_msg,
+		   cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+
+/**
+ * @brief Return a linked list of buffers holding one MSDU
+ *  In some systems the buffers are delivered along with offload delivery
+ *  indication message itself, while in other systems the buffers are uploaded
+ *  out of band, via MAC DMA.
+ * @details
+ *  This function provides an abstract way to obtain a linked-list of the
+ *  buffers corresponding to an msdu, regardless of whether the MSDU was
+ *  delivered in-band with the rx indication message, or out of band through
+ *  MAC DMA.
+ *  In a LL system, this function returns a linked list of one or more
+ *  buffers corresponding to an MSDU
+ *  In a HL system , TODO
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param offload_deliver_msg - the nebuf containing the offload deliver message
+ * @param head_msdu - call-by-reference network buffer handle, which gets set in this
+ *      function to the head buffer of this MSDU
+ * @param tail_msdu - call-by-reference network buffer handle, which gets set in this
+ *      function to the tail buffer of this MSDU
+ */
+extern int
+(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
+			   cdf_nbuf_t offload_deliver_msg,
+			   int *vdev_id,
+			   int *peer_id,
+			   int *tid,
+			   uint8_t *fw_desc,
+			   cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+
+/**
+ * @brief Return the rx descriptor for the next rx MPDU.
+ * @details
+ *  The rx MSDU descriptors may be uploaded as part of the rx indication
+ *  message, or delivered separately out of band.
+ *  This function provides an abstract way to obtain the next MPDU descriptor,
+ *  regardless of whether the MPDU descriptors are delivered in-band with
+ *  the rx indication message, or out of band.
+ *  This is used to iterate through the series of MPDU descriptors referenced
+ *  by a rx indication message.
+ *  The htt_rx_amsdu_pop function should be called before this function
+ *  (or at least before using the returned rx descriptor handle), so that
+ *  the cache location for the rx descriptor will be flushed before the
+ *  rx descriptor gets used.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @return next abstract rx descriptor from the series of MPDUs referenced
+ *      by an rx ind msg
+ */
+extern void *
+(*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+/**
+ * @brief Retrieve a previously-stored rx descriptor from a MSDU buffer.
+ * @details
+ *  The data SW will call the htt_rx_msdu_desc_link macro/function to
+ *  link a MSDU's rx descriptor with the buffer holding the MSDU payload.
+ *  This function retrieves the rx MSDU descriptor.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu - the buffer containing the MSDU payload
+ * @return the corresponding abstract rx MSDU descriptor
+ */
+extern void *
+(*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, cdf_nbuf_t msdu);
+
+/**
+ * @brief Free both an rx MSDU descriptor and the associated MSDU buffer.
+ * @details
+ *  Usually the WLAN driver does not free rx MSDU buffers, but needs to
+ *  do so when an invalid frame (e.g. FCS error) was deposited into the
+ *  queue of rx buffers.
+ *  This function frees both the rx descriptor and the rx frame.
+ *  On some systems, the rx descriptor and rx frame are stored in the
+ *  same buffer, and thus one free suffices for both objects.
+ *  On other systems, the rx descriptor and rx frame are stored
+ *  separately, so distinct frees are internally needed.
+ *  However, in either case, the rx descriptor has been associated with
+ *  the MSDU buffer, and can be retrieved by htt_rx_msdu_desc_retrieve.
+ *  Hence, it is only necessary to provide the MSDU buffer; the HTT SW
+ *  internally finds the corresponding MSDU rx descriptor.
+ *
+ * @param htt_pdev - the HTT instance the rx data was received on
+ * @param rx_msdu_desc - rx descriptor for the MSDU being freed
+ * @param msdu - rx frame buffer for the MSDU being freed
+ */
+void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu);
+
+/**
+ * @brief Look up and free the rx descriptor for a MSDU.
+ * @details
+ *  When the driver delivers rx frames to the OS, it first needs
+ *  to free the associated rx descriptors.
+ *  In some systems the rx descriptors are allocated in the same
+ *  buffer as the rx frames, so this operation is a no-op.
+ *  In other systems, the rx descriptors are stored separately
+ *  from the rx frames, so the rx descriptor has to be freed.
+ *  The descriptor is located from the MSDU buffer with the
+ *  htt_rx_desc_frame_free macro/function.
+ *
+ * @param htt_pdev - the HTT instance the rx data was received on
+ * @param msdu - rx frame buffer for the rx MSDU descriptor being freed
+ */
+void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu);
+
+/**
+ * @brief Add new MSDU buffers for the target to fill.
+ * @details
+ *  In some systems, the underlying upload mechanism (HIF) allocates new rx
+ *  buffers itself.  In other systems, the underlying upload mechanism
+ *  (MAC DMA) needs to be provided with new rx buffers.
+ *  This function is used as an abstract method to indicate to the underlying
+ *  data upload mechanism when it is an appropriate time to allocate new rx
+ *  buffers.
+ *  If the allocation is automatically handled, a la HIF, then this function
+ *  call is ignored.
+ *  If the allocation has to be done explicitly, a la MAC DMA, then this
+ *  function provides the context and timing for such replenishment
+ *  allocations.
+ *
+ * @param pdev - the HTT instance the rx data will be received on
+ */
+void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev);
+
+/**
+ * @brief Links list of MSDUs into an single MPDU. Updates RX stats
+ * @details
+ *  When HW MSDU splitting is turned on each MSDU in an AMSDU MPDU occupies
+ *  a separate wbuf for delivery to the network stack. For delivery to the
+ *  monitor mode interface they need to be restitched into an MPDU. This
+ *  function does this. Also updates the RX status if the MPDU starts
+ *  a new PPDU
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param head_msdu - network buffer handle, which points to the first MSDU
+ *      in the list. This is a NULL terminated list
+ * @param rx_staus - pointer to the status associated with this MPDU.
+ *      Updated only if there is a new PPDU and new status associated with it
+ * @param clone_not_reqd - If set the MPDU linking destroys the passed in
+ *      list, else operates on a cloned nbuf
+ * @return network buffer handle to the MPDU
+ */
+cdf_nbuf_t
+htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
+				cdf_nbuf_t head_msdu,
+				struct ieee80211_rx_status *rx_status,
+				unsigned clone_not_reqd);
+
+/**
+ * @brief Return the sequence number of MPDUs to flush.
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_frag_ind_msg - the netbuf containing the rx fragment indication message
+ * @param seq_num_start - (call-by-reference output) sequence number
+ *      for the start of the range of MPDUs to flush
+ * @param seq_num_end - (call-by-reference output) sequence number
+ *      for the end of the range of MPDUs to flush
+ */
+void
+htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
+				    cdf_nbuf_t rx_frag_ind_msg,
+				    int *seq_num_start, int *seq_num_end);
+/**
+ * @brief Return the HL rx desc size
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the hl rx desc pointer
+ *
+ */
+uint16_t htt_rx_msdu_rx_desc_size_hl(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief populates vowext stats by processing RX desc.
+ * @param msdu - network buffer handle
+ * @param vowstats - handle to vow ext stats.
+ */
+void htt_rx_get_vowext_stats(cdf_nbuf_t msdu, struct vow_extstats *vowstats);
+
+/**
+ * @brief parses the offload message passed by the target.
+ * @param pdev - pdev handle
+ * @param paddr - physical address of the rx buffer
+ * @param vdev_id - reference to vdev id to be filled
+ * @param peer_id - reference to the peer id to be filled
+ * @param tid - reference to the tid to be filled
+ * @param fw_desc - reference to the fw descriptor to be filled
+ * @param peer_id - reference to the peer id to be filled
+ * @param head_buf - reference to the head buffer
+ * @param tail_buf - reference to the tail buffer
+ */
+int
+htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
+				 uint32_t *msg_word,
+				 int msdu_iter,
+				 int *vdev_id,
+				 int *peer_id,
+				 int *tid,
+				 uint8_t *fw_desc,
+				 cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+#endif /* _OL_HTT_RX_API__H_ */

+ 969 - 0
core/dp/ol/inc/ol_htt_tx_api.h

@@ -0,0 +1,969 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_htt_tx_api.h
+ * @brief Specify the tx HTT API functions called by the host data SW.
+ * @details
+ *  This file declares the HTT API functions that are specifically
+ *  related to transmit processing.
+ *  In particular, the methods of the abstract HTT tx descriptor are
+ *  specified.
+ */
+#ifndef _OL_HTT_TX_API__H_
+#define _OL_HTT_TX_API__H_
+
+/* #include <osapi_linux.h>    / * uint16_t, etc. * / */
+#include <osdep.h>              /* uint16_t, etc. */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t */
+#include <ol_cfg.h>             /* wlan_frm_fmt */
+
+#include <htt.h>                /* needed by inline functions */
+#include <cdf_net_types.h>
+#include <ol_htt_api.h>         /* htt_pdev_handle */
+#include <htt_types.h>
+#include <cdf_trace.h>
+
+/* Remove these macros when they get added to htt.h. */
+#ifndef HTT_TX_DESC_EXTENSION_GET
+#define HTT_TX_DESC_EXTENSION_OFFSET_BYTES 0
+#define HTT_TX_DESC_EXTENSION_OFFSET_DWORD 0
+#define HTT_TX_DESC_EXTENSION_M        0x10000000
+#define HTT_TX_DESC_EXTENSION_S        28
+
+#define HTT_TX_DESC_EXTENSION_GET(_var) \
+	(((_var) & HTT_TX_DESC_EXTENSION_M) >> HTT_TX_DESC_EXTENSION_S)
+#define HTT_TX_DESC_EXTENSION_SET(_var, _val)				\
+	do {								\
+		HTT_CHECK_SET_VAL(HTT_TX_DESC_EXTENSION, _val);		\
+		((_var) |= ((_val) << HTT_TX_DESC_EXTENSION_S));	\
+	} while (0)
+#endif
+
+/*================ meta-info about tx MSDUs =================================*/
+
+/*
+ * For simplicity, use the IEEE 802.11 frame type values.
+ */
+enum htt_frm_type {
+	htt_frm_type_mgmt = 0,
+	htt_frm_type_ctrl = 1,
+	htt_frm_type_data = 2
+};
+
+/*
+ * For simplicity, use the IEEE 802.11 frame sub-type values.
+ */
+enum htt_frm_subtype {
+	htt_frm_subtype_mgmt_assoc_req = 0,
+	htt_frm_subtype_mgmt_assoc_resp = 1,
+	htt_frm_subtype_mgmt_reassoc_req = 2,
+	htt_frm_subtype_mgmt_reassoc_resp = 3,
+	htt_frm_subtype_mgmt_probe_req = 4,
+	htt_frm_subtype_mgmt_probe_resp = 5,
+	htt_frm_subtype_mgmt_timing_adv = 6,
+	htt_frm_subtype_mgmt_beacon = 8,
+	htt_frm_subtype_mgmt_atim = 9,
+	htt_frm_subtype_mgmt_disassoc = 10,
+	htt_frm_subtype_mgmt_auth = 11,
+	htt_frm_subtype_mgmt_deauth = 12,
+	htt_frm_subtype_mgmt_action = 13,
+	htt_frm_subtype_mgmt_action_no_ack = 14,
+
+	htt_frm_subtype_data_data = 0,
+	htt_frm_subtype_data_data_cf_ack = 1,
+	htt_frm_subtype_data_data_cf_poll = 2,
+	htt_frm_subtype_data_data_cf_ack_cf_poll = 3,
+	htt_frm_subtype_data_null = 4,
+	htt_frm_subtype_data_cf_ack = 5,
+	htt_frm_subtype_data_cf_poll = 6,
+	htt_frm_subtype_data_cf_ack_cf_poll = 7,
+	htt_frm_subtype_data_QoS_data = 8,
+	htt_frm_subtype_data_QoS_data_cf_ack = 9,
+	htt_frm_subtype_data_QoS_data_cf_poll = 10,
+	htt_frm_subtype_data_QoS_data_cf_ack_cf_poll = 11,
+	htt_frm_subtype_data_QoS_null = 12,
+	htt_frm_subtype_data_QoS_cf_poll = 14,
+	htt_frm_subtype_data_QoS_cf_ack_cf_poll = 15,
+};
+
+enum htt_ofdm_datarate {		/* Value    MBPS    Modulation  Coding*/
+	htt_ofdm_datarate_6_mbps = 0,	/* 0        6       BPSK        1/2   */
+	htt_ofdm_datarate_9_mbps = 1,	/* 1        9       BPSK        3/4   */
+	htt_ofdm_datarate_12_mbps = 2,	/* 2        12      QPSK        1/2   */
+	htt_ofdm_datarate_18_mbps = 3,	/* 3        18      QPSK        3/4   */
+	htt_ofdm_datarate_24_mbps = 4,	/* 4        24      16-QAM      1/2   */
+	htt_ofdm_datarate_36_mbps = 5,	/* 5        36      16-QAM      3/4   */
+	htt_ofdm_datarate_48_mbps = 6,	/* 6        48      64-QAM      1/2   */
+	htt_ofdm_datarate_54_mbps = 7,	/* 7        54      64-QAM      3/4   */
+	htt_ofdm_datarate_max = 7,
+};
+
+/**
+ * struct ocb_tx_ctrl_hdr_t - TX control header
+ * @version:		must be 1
+ * @length:		length of this structure
+ * @channel_freq:	channel on which to transmit the packet
+ * @valid_pwr:		bit 0: if set, tx pwr spec is valid
+ * @valid_datarate:	bit 1: if set, tx MCS mask spec is valid
+ * @valid_retries:	bit 2: if set, tx retries spec is valid
+ * @valid_chain_mask:	bit 3: if set, chain mask is valid
+ * @valid_expire_tsf:	bit 4: if set, tx expire TSF spec is valid
+ * @valid_tid:		bit 5: if set, TID is valid
+ * @reserved0_15_6:	bits 15:6 - unused, set to 0x0
+ * @all_flags:		union of all the flags
+ * @expire_tsf_lo:	TX expiry time (TSF) LSBs
+ * @expire_tsf_hi:	TX expiry time (TSF) MSBs
+ * @pwr:		Specify what power the tx frame needs to be transmitted
+ *			at. The power a signed (two's complement) value is in
+ *			units of 0.5 dBm. The value needs to be appropriately
+ *			sign-extended when extracting the value from the message
+ *			and storing it in a variable that is larger than A_INT8.
+ *			If the transmission uses multiple tx chains, this power
+ *			spec is the total transmit power, assuming incoherent
+ *			combination of per-chain power to produce the total
+ *			power.
+ * @datarate:		The desired modulation and coding scheme.
+ *			VALUE    DATA RATE   MODULATION  CODING RATE
+ *			@ 20 MHz
+ *			(MBPS)
+ *			0        6           BPSK        1/2
+ *			1        9           BPSK        3/4
+ *			2        12          QPSK        1/2
+ *			3        18          QPSK        3/4
+ *			4        24          16-QAM      1/2
+ *			5        36          16-QAM      3/4
+ *			6        48          64-QAM      1/2
+ *			7        54          64-QAM      3/4
+ * @retry_limit:	Specify the maximum number of transmissions, including
+ *			the initial transmission, to attempt before giving up if
+ *			no ack is received.
+ *			If the tx rate is specified, then all retries shall use
+ *			the same rate as the initial transmission.
+ *			If no tx rate is specified, the target can choose
+ *			whether to retain the original rate during the
+ *			retransmissions, or to fall back to a more robust rate.
+ * @chain_mask:		specify which chains to transmit from
+ * @ext_tid:		Extended Traffic ID (0-15)
+ * @reserved:		Ensure that the size of the structure is a multiple of
+ *			4. Must be 0.
+ *
+ * When sending an OCB packet, the user application has
+ * the option of including the following struct following an ethernet header
+ * with the proto field set to 0x8151. This struct includes various TX
+ * paramaters including the TX power and MCS.
+ */
+PREPACK struct ocb_tx_ctrl_hdr_t {
+	uint16_t version;
+	uint16_t length;
+	uint16_t channel_freq;
+
+	union {
+		struct {
+			uint16_t
+			valid_pwr:1,
+			valid_datarate:1,
+			valid_retries:1,
+			valid_chain_mask:1,
+			valid_expire_tsf:1,
+			valid_tid:1,
+			reserved0_15_6:10;
+		};
+		uint16_t all_flags;
+	};
+
+	uint32_t expire_tsf_lo;
+	uint32_t expire_tsf_hi;
+	int8_t pwr;
+	uint8_t datarate;
+	uint8_t retry_limit;
+	uint8_t chain_mask;
+	uint8_t ext_tid;
+	uint8_t reserved[3];
+} POSTPACK;
+
+/**
+ * @brief tx MSDU meta-data that HTT may use to program the FW/HW tx descriptor
+ */
+struct htt_msdu_info_t {
+	/* the info sub-struct specifies the characteristics of the MSDU */
+	struct {
+		uint16_t ethertype;
+#define HTT_INVALID_PEER_ID 0xffff
+		uint16_t peer_id;
+		uint8_t vdev_id;
+		uint8_t ext_tid;
+		/*
+		 * l2_hdr_type - L2 format (802.3, native WiFi 802.11,
+		 * or raw 802.11)
+		 * Based on attach-time configuration, the tx frames provided
+		 * by the OS to the tx data SW are expected to be either
+		 * 802.3 format or the "native WiFi" variant of 802.11 format.
+		 * Internally, the driver may also inject tx frames into the tx
+		 * datapath, and these frames may be either 802.3 format or
+		 * 802.11 "raw" format, with no further 802.11 encapsulation
+		 * needed.
+		 * The tx frames are tagged with their frame format, so target
+		 * FW/HW will know how to interpret the packet's encapsulation
+		 * headers when doing tx classification, and what form of 802.11
+		 * header encapsulation is needed, if any.
+		 */
+		uint8_t l2_hdr_type;    /* enum htt_pkt_type */
+		/*
+		 * frame_type - is the tx frame management or data?
+		 * Just to avoid confusion, the enum values for this frame type
+		 * field use the 802.11 frame type values, although it is
+		 * unexpected for control frames to be sent through the host
+		 * data path.
+		 */
+		uint8_t frame_type;     /* enum htt_frm_type */
+		/*
+		 * frame subtype - this field specifies the sub-type of
+		 * management frames
+		 * Just to avoid confusion, the enum values for this frame
+		 * subtype field use the 802.11 management frame subtype values.
+		 */
+		uint8_t frame_subtype;  /* enum htt_frm_subtype */
+		uint8_t is_unicast;
+
+		/* dest_addr is not currently used.
+		 * It could be used as an input to a Tx BD (Riva tx descriptor)
+		 * signature computation.
+		   uint8_t *dest_addr;
+		 */
+
+		uint8_t l3_hdr_offset;  /* wrt cdf_nbuf_data(msdu), in bytes */
+
+		/* l4_hdr_offset is not currently used.
+		 * It could be used to specify to a TCP/UDP checksum computation
+		 * engine where the TCP/UDP header starts.
+		 */
+		/* uint8_t l4_hdr_offset; - wrt cdf_nbuf_data(msdu), in bytes */
+	} info;
+	/* the action sub-struct specifies how to process the MSDU */
+	struct {
+		uint8_t use_6mbps;      /* mgmt frames: option to force
+					   6 Mbps rate */
+		uint8_t do_encrypt;
+		uint8_t do_tx_complete;
+		uint8_t tx_comp_req;
+
+		/*
+		 * cksum_offload - Specify whether checksum offload is
+		 * enabled or not
+		 * Target FW uses this flag to turn on HW checksumming
+		 * 0x0 - No checksum offload
+		 * 0x1 - L3 header checksum only
+		 * 0x2 - L4 checksum only
+		 * 0x3 - L3 header checksum + L4 checksum
+		 */
+		cdf_nbuf_tx_cksum_t cksum_offload;
+	} action;
+};
+
+static inline void htt_msdu_info_dump(struct htt_msdu_info_t *msdu_info)
+{
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "HTT MSDU info object (%p)\n", msdu_info);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  ethertype: %#x\n", msdu_info->info.ethertype);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  peer_id: %d\n", msdu_info->info.peer_id);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  vdev_id: %d\n", msdu_info->info.vdev_id);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  ext_tid: %d\n", msdu_info->info.ext_tid);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  l2_hdr_type: %d\n", msdu_info->info.l2_hdr_type);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  frame_type: %d\n", msdu_info->info.frame_type);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  frame_subtype: %d\n", msdu_info->info.frame_subtype);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  is_unicast: %u\n", msdu_info->info.is_unicast);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  l3_hdr_offset: %u\n", msdu_info->info.l3_hdr_offset);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  use 6 Mbps: %d\n", msdu_info->action.use_6mbps);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  do_encrypt: %d\n", msdu_info->action.do_encrypt);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  do_tx_complete: %d\n", msdu_info->action.do_tx_complete);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  is_unicast: %u\n", msdu_info->info.is_unicast);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+		  "  is_unicast: %u\n", msdu_info->info.is_unicast);
+}
+
+/*================ tx completion message field access methods ===============*/
+
+/**
+ * @brief Look up the descriptor ID of the nth MSDU from a tx completion msg.
+ * @details
+ *  A tx completion message tells the host that the target is done
+ *  transmitting a series of MSDUs.  The message uses a descriptor ID
+ *  to identify each such MSDU.  This function/macro is used to
+ *  find the ID of one such MSDU referenced by the tx completion message.
+ *
+ * @param iterator - tx completion message context provided by HTT to the
+ *      tx completion message handler.  This abstract reference to the
+ *      HTT tx completion message's payload allows the data SW's tx
+ *      completion handler to not care about the format of the HTT
+ *      tx completion message.
+ * @param num - (zero-based) index to specify a single MSDU within the
+ *      series of MSDUs referenced by the tx completion message
+ * @return descriptor ID for the specified MSDU
+ */
+uint16_t htt_tx_compl_desc_id(void *iterator, int num);
+
+/*========================= tx descriptor operations ========================*/
+
+/**
+ * @brief Allocate a HTT abstract tx descriptor.
+ * @details
+ *  Allocate a HTT abstract tx descriptor from a pool within "consistent"
+ *  memory, which is accessible by HIF and/or MAC DMA as well as by the
+ *  host CPU.
+ *  It is expected that the tx datapath will allocate HTT tx descriptors
+ *  and link them with datapath SW tx descriptors up front as the driver
+ *  is loaded.  Thereafter, the link from datapath SW tx descriptor to
+ *  HTT tx descriptor will be maintained until the driver is unloaded.
+ *
+ * @param htt_pdev - handle to the HTT instance making the allocation
+ * @param[OUT] paddr_lo - physical address of the HTT descriptor
+ * @return success -> descriptor handle, -OR- failure -> NULL
+ */
+void *htt_tx_desc_alloc(htt_pdev_handle htt_pdev, uint32_t *paddr_lo);
+
+/**
+ * @brief Free a HTT abstract tx descriptor.
+ *
+ * @param htt_pdev - handle to the HTT instance that made the allocation
+ * @param htt_tx_desc - the descriptor to free
+ */
+void htt_tx_desc_free(htt_pdev_handle htt_pdev, void *htt_tx_desc);
+
+#if defined(HELIUMPLUS_PADDR64)
+/* TODO: oka: use kernel-doc format */
+/**
+ * @brief Free a HTT abstract tx descriptor.
+ *
+ * @param htt_pdev - handle to the HTT instance that made the allocation
+ * @param htt_tx_desc - the descriptor to free
+ */
+void *
+htt_tx_frag_alloc(htt_pdev_handle pdev,
+		  u_int16_t index,
+		  u_int32_t *frag_paddr_lo);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+/**
+ * @brief Discard all tx frames in the process of being downloaded.
+ * @details
+ * This function dicards any tx frames queued in HTT or the layers
+ * under HTT.
+ * The download completion callback is invoked on these frames.
+ *
+ * @param htt_pdev - handle to the HTT instance
+ * @param[OUT] frag_paddr_lo - physical address of the fragment descriptor
+ *                             (MSDU Link Extension Descriptor)
+ */
+void htt_tx_pending_discard(htt_pdev_handle pdev);
+
+/**
+ * @brief Download a MSDU descriptor and (a portion of) the MSDU payload.
+ * @details
+ *  This function is used within LL systems to download a tx descriptor and
+ *  the initial portion of the tx MSDU payload, and within HL systems to
+ *  download the tx descriptor and the entire tx MSDU payload.
+ *  The HTT layer determines internally how much of the tx descriptor
+ *  actually needs to be downloaded. In particular, the HTT layer does not
+ *  download the fragmentation descriptor, and only for the LL case downloads
+ *  the physical address of the fragmentation descriptor.
+ *  In HL systems, the tx descriptor and the entire frame are downloaded.
+ *  In LL systems, only the tx descriptor and the header of the frame are
+ *  downloaded.  To determine how much of the tx frame to download, this
+ *  function assumes the tx frame is the default frame type, as specified
+ *  by ol_cfg_frame_type.  "Raw" frames need to be transmitted through the
+ *  alternate htt_tx_send_nonstd function.
+ *  The tx descriptor has already been attached to the cdf_nbuf object during
+ *  a preceding call to htt_tx_desc_init.
+ *
+ * @param htt_pdev - the handle of the physical device sending the tx data
+ * @param msdu - the frame being transmitted
+ * @param msdu_id - unique ID for the frame being transmitted
+ * @return 0 -> success, -OR- 1 -> failure
+ */
+int
+htt_tx_send_std(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu, uint16_t msdu_id);
+
+/**
+ * @brief Download a Batch Of Tx MSDUs
+ * @details
+ *     Each MSDU already has the MSDU ID stored in the headroom of the
+ *     netbuf data buffer, and has the HTT tx descriptor already attached
+ *     as a prefix fragment to the netbuf.
+ *
+ * @param htt_pdev - the handle of the physical device sending the tx data
+ * @param head_msdu - the MSDU Head for Tx batch being transmitted
+ * @param num_msdus - The total Number of MSDU's provided for batch tx
+ * @return null-terminated linked-list of unaccepted frames
+ */
+cdf_nbuf_t
+htt_tx_send_batch(htt_pdev_handle htt_pdev,
+		  cdf_nbuf_t head_msdu, int num_msdus);
+
+/* The htt scheduler for queued packets in htt
+ * htt when unable to send to HTC because of lack of resource
+ * forms a nbuf queue which is flushed when tx completion event from
+ * target is recieved
+ */
+
+void htt_tx_sched(htt_pdev_handle pdev);
+
+/**
+ * @brief Same as htt_tx_send_std, but can handle raw frames.
+ */
+int
+htt_tx_send_nonstd(htt_pdev_handle htt_pdev,
+		   cdf_nbuf_t msdu,
+		   uint16_t msdu_id, enum htt_pkt_type pkt_type);
+
+/**
+ * htt_pkt_dl_len_get() Gets the HTT PKT download length.
+ * @pdev: pointer to struct htt_pdev_t
+ *
+ * Return: size of HTT packet download length.
+ */
+int
+htt_pkt_dl_len_get(struct htt_pdev_t *pdev);
+
+#define HTT_TX_CLASSIFY_BIT_S	4  /* Used to set
+				    * classify bit in HTT desc.*/
+
+/**
+ * enum htt_ce_tx_pkt_type - enum of packet types to be set in CE
+ *			     descriptor
+ * @tx_pkt_type_raw: Value set for RAW frames
+ * @tx_pkt_type_native_wifi: Value set for NATIVE WIFI frames
+ * @tx_pkt_type_eth2: Value set for Ethernet II frames (mostly default)
+ * @tx_pkt_type_802_3: Value set for 802.3 / original ethernet frames
+ * @tx_pkt_type_mgmt: Value set for MGMT frames over HTT
+ *
+ */
+enum htt_ce_tx_pkt_type {
+	tx_pkt_type_raw = 0,
+	tx_pkt_type_native_wifi = 1,
+	tx_pkt_type_eth2 = 2,
+	tx_pkt_type_802_3 = 3,
+	tx_pkt_type_mgmt = 4
+};
+
+
+extern const uint32_t htt_to_ce_pkt_type[];
+
+/**
+ * Provide a constant to specify the offset of the HTT portion of the
+ * HTT tx descriptor, to avoid having to export the descriptor defintion.
+ * The htt module checks internally that this exported offset is consistent
+ * with the private tx descriptor definition.
+ *
+ * Similarly, export a definition of the HTT tx descriptor size, and then
+ * check internally that this exported constant matches the private tx
+ * descriptor definition.
+ */
+#define HTT_TX_DESC_VADDR_OFFSET 8
+
+/**
+ * htt_tx_desc_init() - Initialize the per packet HTT Tx descriptor
+ * @pdev:		  The handle of the physical device sending the
+ *			  tx data
+ * @htt_tx_desc:	  Abstract handle to the tx descriptor
+ * @htt_tx_desc_paddr_lo: Physical address of the HTT tx descriptor
+ * @msdu_id:		  ID to tag the descriptor with.
+ *			  The FW sends this ID back to host as a cookie
+ *			  during Tx completion, which the host uses to
+ *			  identify the MSDU.
+ *			  This ID is an index into the OL Tx desc. array.
+ * @msdu:		  The MSDU that is being prepared for transmission
+ * @msdu_info:		  Tx MSDU meta-data
+ * @tso_info:		  Storage for TSO meta-data
+ *
+ * This function initializes the HTT tx descriptor.
+ * HTT Tx descriptor is a host-f/w interface structure, and meta-data
+ * accompanying every packet downloaded to f/w via the HTT interface.
+ */
+static inline
+void
+htt_tx_desc_init(htt_pdev_handle pdev,
+		 void *htt_tx_desc,
+		 uint32_t htt_tx_desc_paddr_lo,
+		 uint16_t msdu_id,
+		 cdf_nbuf_t msdu, struct htt_msdu_info_t *msdu_info,
+		 struct cdf_tso_info_t *tso_info,
+		 struct ocb_tx_ctrl_hdr_t *tx_ctrl,
+		 uint8_t is_dsrc)
+{
+	uint8_t  pkt_type, pkt_subtype = 0, ce_pkt_type = 0;
+	uint32_t hw_classify = 0, data_attr = 0;
+	uint32_t *word0, *word1, local_word3;
+#if HTT_PADDR64
+	uint32_t *word4;
+#else /* ! HTT_PADDR64 */
+	uint32_t *word3;
+#endif /* HTT_PADDR64 */
+	uint32_t local_word0, local_word1;
+	struct htt_host_tx_desc_t *htt_host_tx_desc =
+		(struct htt_host_tx_desc_t *)
+		(((char *)htt_tx_desc) - HTT_TX_DESC_VADDR_OFFSET);
+	bool desc_ext_required = (tx_ctrl && tx_ctrl->all_flags != 0);
+
+	word0 = (uint32_t *) htt_tx_desc;
+	word1 = word0 + 1;
+	/*
+	 * word2 is frag desc pointer
+	 * word3 or 4 is peer_id
+	 */
+#if HTT_PADDR64
+	word4 = word0 + 4;      /* Dword 3 */
+#else /* ! HTT_PADDR64  */
+	word3 = word0 + 3;      /* Dword 3 */
+#endif /* HTT_PADDR64 */
+
+	pkt_type = msdu_info->info.l2_hdr_type;
+
+	if (cdf_likely(pdev->cfg.ce_classify_enabled)) {
+		if (cdf_likely(pkt_type == htt_pkt_type_eth2 ||
+			pkt_type == htt_pkt_type_ethernet))
+			cdf_nbuf_tx_info_get(msdu, pkt_type, pkt_subtype,
+				     hw_classify);
+
+		ce_pkt_type = htt_to_ce_pkt_type[pkt_type];
+		if (0xffffffff == ce_pkt_type) {
+			CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_DEBUG,
+			"Invalid HTT pkt type %d\n", pkt_type);
+			return;
+		}
+	}
+
+	/*
+	 * HTT Tx Desc is in uncached memory. Used cached writes per word, to
+	 * reduce unnecessary memory access.
+	 */
+
+	local_word0 = 0;
+	if (msdu_info) {
+		HTT_H2T_MSG_TYPE_SET(local_word0, HTT_H2T_MSG_TYPE_TX_FRM);
+		HTT_TX_DESC_PKT_TYPE_SET(local_word0, pkt_type);
+		HTT_TX_DESC_PKT_SUBTYPE_SET(local_word0, pkt_subtype);
+		HTT_TX_DESC_VDEV_ID_SET(local_word0, msdu_info->info.vdev_id);
+		if (tx_ctrl && tx_ctrl->valid_tid)
+			HTT_TX_DESC_EXT_TID_SET(local_word0, tx_ctrl->ext_tid);
+		else
+			HTT_TX_DESC_EXT_TID_SET(local_word0,
+				msdu_info->info.ext_tid);
+		HTT_TX_DESC_EXTENSION_SET(local_word0, desc_ext_required);
+		HTT_TX_DESC_EXT_TID_SET(local_word0, msdu_info->info.ext_tid);
+		HTT_TX_DESC_CKSUM_OFFLOAD_SET(local_word0,
+					      msdu_info->action.cksum_offload);
+		HTT_TX_DESC_NO_ENCRYPT_SET(local_word0,
+					   msdu_info->action.do_encrypt ?
+					   0 : 1);
+	}
+
+	*word0 = local_word0;
+
+	local_word1 = 0;
+
+#if defined(FEATURE_TSO)
+	if (tso_info->is_tso)
+		HTT_TX_DESC_FRM_LEN_SET(local_word1, tso_info->total_len);
+	else
+#endif
+		HTT_TX_DESC_FRM_LEN_SET(local_word1, cdf_nbuf_len(msdu));
+
+	HTT_TX_DESC_FRM_ID_SET(local_word1, msdu_id);
+	*word1 = local_word1;
+
+	/* Initialize peer_id to INVALID_PEER because
+	   this is NOT Reinjection path */
+	local_word3 = HTT_INVALID_PEER;
+	if (tx_ctrl && tx_ctrl->channel_freq)
+		HTT_TX_DESC_CHAN_FREQ_SET(local_word3, tx_ctrl->channel_freq);
+#if HTT_PADDR64
+	*word4 = local_word3;
+#else /* ! HTT_PADDR64 */
+	*word3 = local_word3;
+#endif /* HTT_PADDR64 */
+
+	/*
+	 *  If any of the tx control flags are set, then we need the extended
+	 *  HTT header.
+	 */
+	if (desc_ext_required) {
+		struct htt_tx_msdu_desc_ext_t local_desc_ext = {0};
+
+		/*
+		 * Copy the info that was read from TX control header from the
+		 * user application to the extended HTT header.
+		 * First copy everything
+		 * to a local temp structure, and then copy everything to the
+		 * actual uncached structure in one go to save memory writes.
+		 */
+		local_desc_ext.valid_pwr = tx_ctrl->valid_pwr;
+		local_desc_ext.valid_mcs_mask = tx_ctrl->valid_datarate;
+		local_desc_ext.valid_retries = tx_ctrl->valid_retries;
+		local_desc_ext.valid_expire_tsf = tx_ctrl->valid_expire_tsf;
+		local_desc_ext.valid_chainmask = tx_ctrl->valid_chain_mask;
+
+		local_desc_ext.pwr = tx_ctrl->pwr;
+		if (tx_ctrl->valid_datarate &&
+				tx_ctrl->datarate <= htt_ofdm_datarate_max)
+			local_desc_ext.mcs_mask =
+				(1 << (tx_ctrl->datarate + 4));
+		local_desc_ext.retry_limit = tx_ctrl->retry_limit;
+		local_desc_ext.expire_tsf_lo = tx_ctrl->expire_tsf_lo;
+		local_desc_ext.expire_tsf_hi = tx_ctrl->expire_tsf_hi;
+		local_desc_ext.chain_mask = tx_ctrl->chain_mask;
+
+		local_desc_ext.is_dsrc = (is_dsrc != 0);
+
+		cdf_nbuf_push_head(msdu, sizeof(local_desc_ext));
+		cdf_mem_copy(cdf_nbuf_data(msdu), &local_desc_ext,
+				sizeof(local_desc_ext));
+	}
+
+	/*
+	 * Specify that the data provided by the OS is a bytestream,
+	 * and thus should not be byte-swapped during the HIF download
+	 * even if the host is big-endian.
+	 * There could be extra fragments added before the OS's fragments,
+	 * e.g. for TSO, so it's incorrect to clear the frag 0 wordstream flag.
+	 * Instead, clear the wordstream flag for the final fragment, which
+	 * is certain to be (one of the) fragment(s) provided by the OS.
+	 * Setting the flag for this final fragment suffices for specifying
+	 * all fragments provided by the OS rather than added by the driver.
+	 */
+	cdf_nbuf_set_frag_is_wordstream(msdu, cdf_nbuf_get_num_frags(msdu) - 1,
+					0);
+
+	/* store a link to the HTT tx descriptor within the netbuf */
+	cdf_nbuf_frag_push_head(msdu, sizeof(struct htt_host_tx_desc_t),
+				(char *)htt_host_tx_desc, /* virtual addr */
+				htt_tx_desc_paddr_lo,
+				0 /* phys addr MSBs - n/a */);
+
+	/*
+	 * Indicate that the HTT header (and HTC header) is a meta-data
+	 * "wordstream", i.e. series of uint32_t, rather than a data
+	 * bytestream.
+	 * This allows the HIF download to byteswap the HTT + HTC headers if
+	 * the host is big-endian, to convert to the target's little-endian
+	 * format.
+	 */
+	cdf_nbuf_set_frag_is_wordstream(msdu, 0, 1);
+
+	if (cdf_likely(pdev->cfg.ce_classify_enabled &&
+		(msdu_info->info.l2_hdr_type != htt_pkt_type_mgmt))) {
+		uint32_t pkt_offset = cdf_nbuf_get_frag_len(msdu, 0);
+		data_attr = hw_classify << CDF_CE_TX_CLASSIFY_BIT_S;
+		data_attr |= ce_pkt_type << CDF_CE_TX_PKT_TYPE_BIT_S;
+		data_attr |= pkt_offset  << CDF_CE_TX_PKT_OFFSET_BIT_S;
+	}
+
+	cdf_nbuf_data_attr_set(msdu, data_attr);
+}
+
+/**
+ * @brief Set a flag to indicate that the MSDU in question was postponed.
+ * @details
+ *  In systems in which the host retains its tx frame until the target sends
+ *  a tx completion, the target has the option of discarding it's copy of
+ *  the tx descriptor (and frame, for HL) and sending a "postpone" message
+ *  to the host, to inform the host that it must eventually download the
+ *  tx descriptor (and frame, for HL).
+ *  Before the host downloads the postponed tx desc/frame again, it will use
+ *  this function to set a flag in the HTT tx descriptor indicating that this
+ *  is a re-send of a postponed frame, rather than a new frame.  The target
+ *  uses this flag to keep the correct order between re-sent and new tx frames.
+ *  This function is relevant for LL systems.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param desc - abstract handle to the tx descriptor
+ */
+void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc);
+
+/**
+ * @brief Set a flag to tell the target that more tx downloads are en route.
+ * @details
+ *  At times, particularly in response to a U-APSD trigger in a HL system, the
+ *  host will download multiple tx descriptors (+ frames, in HL) in a batch.
+ *  The host will use this function to set a "more" flag in the initial
+ *  and interior frames of the batch, to tell the target that more tx frame
+ *  downloads within the batch are imminent.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param desc - abstract handle to the tx descriptor
+ */
+void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc);
+
+/**
+ * @brief Specify the number of fragments in the fragmentation descriptor.
+ * @details
+ *  Specify the number of fragments within the MSDU, i.e. the number of
+ *  elements within the fragmentation descriptor.
+ *  For LL, this is used to terminate the list of fragments used by the
+ *  HW's tx MAC DMA.
+ *  For HL, this is used to terminate the list of fragments provided to
+ *  HTC for download.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param desc - abstract handle to the tx descriptor
+ * @param num_frags - the number of fragments comprising the MSDU
+ */
+static inline
+void
+htt_tx_desc_num_frags(htt_pdev_handle pdev, void *desc, uint32_t num_frags)
+{
+	/*
+	 * Set the element after the valid frag elems to 0x0,
+	 * to terminate the list of fragments.
+	 */
+#if defined(HELIUMPLUS_PADDR64)
+	if (HTT_WIFI_IP(pdev, 2, 0)) {
+		/** Skip TSO related 4 dwords WIFI2.0*/
+		desc = (void *)&(((struct msdu_ext_desc_t *)desc)->frag_ptr0);
+		/* Frag ptr is 48 bit wide so clear the next dword as well */
+		*((uint32_t *)(((char *)desc) + (num_frags << 3))) = 0;
+		*((uint32_t *)
+		  (((char *)desc) + (num_frags << 3) + sizeof(uint32_t))) = 0;
+		/* TODO: OKA: remove the magic constants */
+	} else {
+		/* XXXOKA -- Looks like a bug, called with htt_frag_desc */
+		*((u_int32_t *)
+		  (((char *) desc) + HTT_TX_DESC_LEN + num_frags * 8)) = 0;
+	}
+#else /* ! HELIUMPLUS_PADDR64 */
+	*((uint32_t *)
+	  (((char *)desc) + HTT_TX_DESC_LEN + num_frags * 8)) = 0;
+#endif /* HELIUMPLUS_PADDR64 */
+}
+
+/* checksum offload flags for hw */
+#define IPV4_CSUM_EN     0x00010000
+#define UDP_IPV4_CSUM_EN 0x00020000
+#define UDP_IPV6_CSUM_EN 0x00040000
+#define TCP_IPV4_CSUM_EN 0x00080000
+#define TCP_IPV6_CSUM_EN 0x00100000
+#define PARTIAL_CSUM_EN  0x00200000
+
+/**
+ * @brief Specify the location and size of a fragment of a tx MSDU.
+ * @details
+ *  In LL systems, the tx MAC DMA needs to know how the MSDU is constructed
+ *  from fragments.
+ *  In LL and HL systems, the HIF's download DMA to the target (LL: tx desc
+ *  + header of tx payload; HL: tx desc + entire tx payload) needs to know
+ *  where to find the fragments to download.
+ *  The tx data SW uses this function to specify the location and size of
+ *  each of the MSDU's fragments.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param desc - abstract handle to the HTT tx descriptor
+ * @param frag_num - which fragment is being specified (zero-based indexing)
+ * @param frag_phys_addr - DMA/physical address of the fragment
+ * @param frag_len - number of bytes within the fragment
+ */
+static inline
+void
+htt_tx_desc_frag(htt_pdev_handle pdev,
+		 void *desc,
+		 int frag_num, uint32_t frag_phys_addr, uint16_t frag_len)
+{
+	u_int32_t *word;
+
+#if defined(HELIUMPLUS_PADDR64)
+	if (HTT_WIFI_IP(pdev, 2, 0)) {
+		word = (u_int32_t *)(desc);
+		/* Initialize top 6 words of TSO flags per packet */
+		*word++ = 0;
+		*word++ = 0;
+		*word++ = 0;
+		if (((struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev))
+		    ->ip_tcp_udp_checksum_offload)
+			*word |= (IPV4_CSUM_EN | TCP_IPV4_CSUM_EN |
+					TCP_IPV6_CSUM_EN | UDP_IPV4_CSUM_EN |
+					UDP_IPV6_CSUM_EN);
+		else
+			*word = 0;
+		word++;
+		*word++ = 0;
+		*word++ = 0;
+
+		cdf_assert_always(word == &(((struct msdu_ext_desc_t *)
+					     desc)->frag_ptr0));
+
+		/* Each fragment consumes 2 DWORDS */
+		word += (frag_num << 1);
+		*word = frag_phys_addr;
+
+		word++;
+		*word = (frag_len<<16);
+
+	} else {
+		/* For Helium+, this block cannot exist */
+		CDF_ASSERT(0);
+	}
+#else /* !defined(HELIUMPLUS_PADDR64) */
+	word = (uint32_t *) (((char *)desc) + HTT_TX_DESC_LEN + frag_num * 8);
+	*word = frag_phys_addr;
+	word++;
+	*word = frag_len;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+}
+
+void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
+				 void *desc,
+				 uint32_t paddr,
+				 uint32_t frag_desc_paddr_lo,
+				 int reset);
+
+/**
+ * @brief Specify the type and subtype of a tx frame.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param type - format of the MSDU (802.3, native WiFi, raw, or mgmt)
+ * @param sub_type - sub_type (relevant for raw frames)
+ */
+static inline
+void
+htt_tx_desc_type(htt_pdev_handle pdev,
+		 void *htt_tx_desc, enum wlan_frm_fmt type, uint8_t sub_type)
+{
+	uint32_t *word0;
+
+	word0 = (uint32_t *) htt_tx_desc;
+	/* clear old values */
+	*word0 &= ~(HTT_TX_DESC_PKT_TYPE_M | HTT_TX_DESC_PKT_SUBTYPE_M);
+	/* write new values */
+	HTT_TX_DESC_PKT_TYPE_SET(*word0, type);
+	HTT_TX_DESC_PKT_SUBTYPE_SET(*word0, sub_type);
+}
+
+/***** TX MGMT DESC management APIs ****/
+
+/* Number of mgmt descriptors in the pool */
+#define HTT_MAX_NUM_MGMT_DESCS 32
+
+/** htt_tx_mgmt_desc_pool_alloc
+ * @description - allocates the memory for mgmt frame descriptors
+ * @param  - htt pdev object
+ * @param  - num of descriptors to be allocated in the pool
+ */
+void htt_tx_mgmt_desc_pool_alloc(struct htt_pdev_t *pdev, A_UINT32 num_elems);
+
+/** htt_tx_mgmt_desc_alloc
+ * @description - reserves a mgmt descriptor from the pool
+ * @param  - htt pdev object
+ * @param  - pointer to variable to hold the allocated desc id
+ * @param  - pointer to the mamangement from UMAC
+ * @return - pointer the allocated mgmt descriptor
+ */
+cdf_nbuf_t
+htt_tx_mgmt_desc_alloc(struct htt_pdev_t *pdev, A_UINT32 *desc_id,
+		       cdf_nbuf_t mgmt_frm);
+
+/** htt_tx_mgmt_desc_free
+ * @description - releases the management descriptor back to the pool
+ * @param  - htt pdev object
+ * @param  - descriptor ID
+ */
+void
+htt_tx_mgmt_desc_free(struct htt_pdev_t *pdev, A_UINT8 desc_id,
+		      A_UINT32 status);
+
+/** htt_tx_mgmt_desc_pool_free
+ * @description - releases all the resources allocated for mgmt desc pool
+ * @param  - htt pdev object
+ */
+void htt_tx_mgmt_desc_pool_free(struct htt_pdev_t *pdev);
+
+/**
+ * @brief Provide a buffer to store a 802.11 header added by SW tx encap
+ *
+ * @param htt_tx_desc - which frame the 802.11 header is being added to
+ * @param new_l2_hdr_size - how large the buffer needs to be
+ */
+#define htt_tx_desc_mpdu_header(htt_tx_desc, new_l2_hdr_size) /*NULL*/
+/**
+ * @brief How many tx credits would be consumed by the specified tx frame.
+ *
+ * @param msdu - the tx frame in question
+ * @return number of credits used for this tx frame
+ */
+#define htt_tx_msdu_credit(msdu) 1      /* 1 credit per buffer */
+#ifdef HTT_DBG
+void htt_tx_desc_display(void *tx_desc);
+#else
+#define htt_tx_desc_display(tx_desc)
+#endif
+
+static inline void htt_tx_desc_set_peer_id(void *htt_tx_desc, uint16_t peer_id)
+{
+	uint16_t *peer_id_field_ptr;
+
+	peer_id_field_ptr = (uint16_t *)
+			    (htt_tx_desc +
+			     HTT_TX_DESC_PEERID_DESC_PADDR_OFFSET_BYTES);
+
+	*peer_id_field_ptr = peer_id;
+}
+
+static inline
+void htt_tx_desc_set_chanfreq(void *htt_tx_desc, uint16_t chanfreq)
+{
+	uint16_t *chanfreq_field_ptr;
+
+	/* The reason we dont use CHAN_FREQ_OFFSET_BYTES is because
+	   it uses DWORD as unit */
+	/* The reason we dont use the SET macro in htt.h is because
+	   htt_tx_desc is incomplete type */
+	chanfreq_field_ptr = (uint16_t *)
+		(htt_tx_desc +
+		 HTT_TX_DESC_PEERID_DESC_PADDR_OFFSET_BYTES
+		 + sizeof(A_UINT16));
+
+	*chanfreq_field_ptr = chanfreq;
+}
+
+#if defined(FEATURE_TSO)
+void
+htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
+	 struct cdf_tso_info_t *tso_info);
+#else
+#define htt_tx_desc_fill_tso_info(pdev, desc, tso_info)
+#endif
+#endif /* _OL_HTT_TX_API__H_ */

+ 42 - 0
core/dp/ol/inc/ol_osif_api.h

@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2012, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_osif_api.h
+ * @brief Definitions used in multiple external interfaces to the txrx SW.
+ */
+#ifndef _OL_OSIF_API__H_
+#define _OL_OSIF_API__H_
+
+/**
+ * @typedef ol_osif_vdev_handle
+ * @brief opaque handle for OS shim virtual device object
+ */
+struct ol_osif_vdev_t;
+typedef struct ol_osif_vdev_t *ol_osif_vdev_handle;
+
+#endif /* _OL_OSIF_API__H_ */

+ 103 - 0
core/dp/ol/inc/ol_params.h

@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Defintions for the Atheros Wireless LAN controller driver.
+ */
+#ifndef _DEV_OL_PARAMS_H
+#define _DEV_OL_PARAMS_H
+#include "ol_txrx_stats.h"
+#include "wlan_defs.h"          /* for wlan statst definitions */
+/*
+** Enumeration of PDEV Configuration parameter
+*/
+
+enum ol_ath_param_t {
+	OL_ATH_PARAM_TXCHAINMASK = 0,
+	OL_ATH_PARAM_RXCHAINMASK,
+	OL_ATH_PARAM_TXCHAINMASKLEGACY,
+	OL_ATH_PARAM_RXCHAINMASKLEGACY,
+	OL_ATH_PARAM_CHAINMASK_SEL,
+	OL_ATH_PARAM_AMPDU,
+	OL_ATH_PARAM_AMPDU_LIMIT,
+	OL_ATH_PARAM_AMPDU_SUBFRAMES,
+	OL_ATH_PARAM_LDPC,
+	OL_ATH_PARAM_NON_AGG_SW_RETRY_TH,
+	OL_ATH_PARAM_AGG_SW_RETRY_TH,
+	OL_ATH_PARAM_STA_KICKOUT_TH,
+	OL_ATH_PARAM_WLAN_PROF_ENABLE,
+	OL_ATH_PARAM_LTR_ENABLE,
+	OL_ATH_PARAM_LTR_AC_LATENCY_BE,
+	OL_ATH_PARAM_LTR_AC_LATENCY_BK,
+	OL_ATH_PARAM_LTR_AC_LATENCY_VI,
+	OL_ATH_PARAM_LTR_AC_LATENCY_VO,
+	OL_ATH_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	OL_ATH_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	OL_ATH_PARAM_LTR_SLEEP_OVERRIDE,
+	OL_ATH_PARAM_LTR_RX_OVERRIDE,
+	OL_ATH_PARAM_L1SS_ENABLE,
+	OL_ATH_PARAM_DSLEEP_ENABLE,
+	OL_ATH_PARAM_PCIELP_TXBUF_FLUSH,
+	OL_ATH_PARAM_PCIELP_TXBUF_WATERMARK,
+	OL_ATH_PARAM_PCIELP_TXBUF_TMO_EN,
+	OL_ATH_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	OL_ATH_PARAM_BCN_BURST,
+	OL_ATH_PARAM_ARP_AC_OVERRIDE,
+	OL_ATH_PARAM_TXPOWER_LIMIT2G,
+	OL_ATH_PARAM_TXPOWER_LIMIT5G,
+	OL_ATH_PARAM_TXPOWER_SCALE,
+	OL_ATH_PARAM_DCS,
+	OL_ATH_PARAM_ANI_ENABLE,
+	OL_ATH_PARAM_ANI_POLL_PERIOD,
+	OL_ATH_PARAM_ANI_LISTEN_PERIOD,
+	OL_ATH_PARAM_ANI_OFDM_LEVEL,
+	OL_ATH_PARAM_ANI_CCK_LEVEL,
+	OL_ATH_PARAM_PROXYSTA,
+	OL_ATH_PARAM_DYN_TX_CHAINMASK,
+	OL_ATH_PARAM_VOW_EXT_STATS,
+	OL_ATH_PARAM_PWR_GATING_ENABLE,
+	OL_ATH_PARAM_CHATTER,
+};
+
+/*
+** Enumeration of PDEV Configuration parameter
+*/
+
+enum ol_hal_param_t {
+	OL_HAL_CONFIG_DMA_BEACON_RESPONSE_TIME = 0
+};
+
+/*
+** structure to hold all stats information
+** for offload device interface
+*/
+struct ol_stats {
+	int txrx_stats_level;
+	struct ol_txrx_stats txrx_stats;
+	struct wlan_dbg_stats stats;
+};
+#endif /* _DEV_OL_PARAMS_H  */

+ 113 - 0
core/dp/ol/inc/ol_txrx_api.h

@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_api.h
+ * @brief Definitions used in multiple external interfaces to the txrx SW.
+ */
+#ifndef _OL_TXRX_API__H_
+#define _OL_TXRX_API__H_
+
+/**
+ * @typedef ol_txrx_pdev_handle
+ * @brief opaque handle for txrx physical device object
+ */
+struct ol_txrx_pdev_t;
+typedef struct ol_txrx_pdev_t *ol_txrx_pdev_handle;
+
+/**
+ * @typedef ol_txrx_vdev_handle
+ * @brief opaque handle for txrx virtual device object
+ */
+struct ol_txrx_vdev_t;
+typedef struct ol_txrx_vdev_t *ol_txrx_vdev_handle;
+
+/**
+ * @typedef ol_txrx_peer_handle
+ * @brief opaque handle for txrx peer object
+ */
+struct ol_txrx_peer_t;
+typedef struct ol_txrx_peer_t *ol_txrx_peer_handle;
+
+/**
+ * @brief ADDBA negotiation status, used both during requests and confirmations
+ */
+enum ol_addba_status {
+	/* status: negotiation started or completed successfully */
+	ol_addba_success,
+
+	/* reject: aggregation is not applicable - don't try again */
+	ol_addba_reject,
+
+	/* busy: ADDBA negotiation couldn't be performed - try again later */
+	ol_addba_busy,
+};
+
+enum ol_sec_type {
+	ol_sec_type_none,
+	ol_sec_type_wep128,
+	ol_sec_type_wep104,
+	ol_sec_type_wep40,
+	ol_sec_type_tkip,
+	ol_sec_type_tkip_nomic,
+	ol_sec_type_aes_ccmp,
+	ol_sec_type_wapi,
+
+	/* keep this last! */
+	ol_sec_type_types
+};
+
+/**
+ * @enum ol_tx_spec
+ * @brief indicate what non-standard transmission actions to apply
+ * @details
+ *  Indicate one or more of the following:
+ *    - The tx frame already has a complete 802.11 header.
+ *      Thus, skip 802.3/native-WiFi to 802.11 header encapsulation and
+ *      A-MSDU aggregation.
+ *    - The tx frame should not be aggregated (A-MPDU or A-MSDU)
+ *    - The tx frame is already encrypted - don't attempt encryption.
+ *    - The tx frame is a segment of a TCP jumbo frame.
+ *    - This tx frame should not be unmapped and freed by the txrx layer
+ *      after transmission, but instead given to a registered tx completion
+ *      callback.
+ *  More than one of these specification can apply, though typically
+ *  only a single specification is applied to a tx frame.
+ *  A compound specification can be created, as a bit-OR of these
+ *  specifications.
+ */
+enum ol_tx_spec {
+	ol_tx_spec_std = 0x0,   /* do regular processing */
+	ol_tx_spec_raw = 0x1,   /* skip encap + A-MSDU aggr */
+	ol_tx_spec_no_aggr = 0x2,       /* skip encap + all aggr */
+	ol_tx_spec_no_encrypt = 0x4,    /* skip encap + encrypt */
+	ol_tx_spec_tso = 0x8,   /* TCP segmented */
+	ol_tx_spec_nwifi_no_encrypt = 0x10,     /* skip encrypt for nwifi */
+	ol_tx_spec_no_free = 0x20,      /* give to cb rather than free */
+};
+
+#endif /* _OL_TXRX_API__H_ */

+ 1312 - 0
core/dp/ol/inc/ol_txrx_ctrl_api.h

@@ -0,0 +1,1312 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_ctrl_api.h
+ * @brief Define the host data API functions called by the host control SW.
+ */
+#ifndef _OL_TXRX_CTRL_API__H_
+#define _OL_TXRX_CTRL_API__H_
+
+#include <athdefs.h>            /* A_STATUS */
+#include <cdf_nbuf.h>           /* cdf_nbuf_t */
+#include <cdf_types.h>          /* cdf_device_t */
+#include <htc_api.h>            /* HTC_HANDLE */
+
+#include <ol_osif_api.h>        /* ol_osif_vdev_handle */
+#include <ol_txrx_api.h>        /* ol_txrx_pdev_handle, etc. */
+#include <ol_ctrl_api.h>        /* ol_pdev_handle, ol_vdev_handle */
+
+#include <wlan_defs.h>          /* MAX_SPATIAL_STREAM */
+
+#define OL_ATH_TX_DRAIN_WAIT_DELAY 50
+
+/* Maximum number of station supported by data path, including BC. */
+#define WLAN_MAX_STA_COUNT  (HAL_NUM_STA)
+
+/* The symbolic station ID return to HDD to specify the packet is bc/mc */
+#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1)
+
+/* The symbolic station ID return to HDD to specify the packet is
+       to soft-AP itself */
+#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2)
+
+/**
+ * enum wlan_op_mode - Virtual device operation mode
+ *
+ * @wlan_op_mode_unknown: Unknown mode
+ * @wlan_op_mode_ap: AP mode
+ * @wlan_op_mode_ibss: IBSS mode
+ * @wlan_op_mode_sta: STA (client) mode
+ * @wlan_op_mode_monitor: Monitor mode
+ * @wlan_op_mode_ocb: OCB mode
+ */
+enum wlan_op_mode {
+	wlan_op_mode_unknown,
+	wlan_op_mode_ap,
+	wlan_op_mode_ibss,
+	wlan_op_mode_sta,
+	wlan_op_mode_monitor,
+	wlan_op_mode_ocb,
+};
+
+#define OL_TXQ_PAUSE_REASON_FW                (1 << 0)
+#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1)
+#define OL_TXQ_PAUSE_REASON_TX_ABORT          (1 << 2)
+#define OL_TXQ_PAUSE_REASON_VDEV_STOP         (1 << 3)
+#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4)
+
+
+/**
+ * enum netif_action_type - Type of actions on netif queues
+ * @WLAN_STOP_ALL_NETIF_QUEUE: stop all netif queues
+ * @WLAN_START_ALL_NETIF_QUEUE: start all netif queues
+ * @WLAN_WAKE_ALL_NETIF_QUEUE: wake all netif queues
+ * @WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER: stop all queues and off carrier
+ * @WLAN_START_ALL_NETIF_QUEUE_N_CARRIER: start all queues and on carrier
+ * @WLAN_NETIF_TX_DISABLE: disable tx
+ * @WLAN_NETIF_TX_DISABLE_N_CARRIER: disable tx and off carrier
+ * @WLAN_NETIF_CARRIER_ON: on carrier
+ * @WLAN_NETIF_CARRIER_OFF: off carrier
+ */
+enum netif_action_type {
+	WLAN_STOP_ALL_NETIF_QUEUE,
+	WLAN_START_ALL_NETIF_QUEUE,
+	WLAN_WAKE_ALL_NETIF_QUEUE,
+	WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER,
+	WLAN_START_ALL_NETIF_QUEUE_N_CARRIER,
+	WLAN_NETIF_TX_DISABLE,
+	WLAN_NETIF_TX_DISABLE_N_CARRIER,
+	WLAN_NETIF_CARRIER_ON,
+	WLAN_NETIF_CARRIER_OFF,
+	WLAN_NETIF_ACTION_TYPE_MAX,
+};
+
+/**
+ * enum netif_reason_type - reason for netif queue action
+ * @WLAN_CONTROL_PATH: action from control path
+ * @WLAN_DATA_FLOW_CONTROL: because of flow control
+ * @WLAN_FW_PAUSE: because of firmware pause
+ * @WLAN_TX_ABORT: because of tx abort
+ * @WLAN_VDEV_STOP: because of vdev stop
+ * @WLAN_PEER_UNAUTHORISED: because of peer is unauthorised
+ * @WLAN_THERMAL_MITIGATION: because of thermal mitigation
+ */
+enum netif_reason_type {
+	WLAN_CONTROL_PATH,
+	WLAN_DATA_FLOW_CONTROL,
+	WLAN_FW_PAUSE,
+	WLAN_TX_ABORT,
+	WLAN_VDEV_STOP,
+	WLAN_PEER_UNAUTHORISED,
+	WLAN_THERMAL_MITIGATION,
+	WLAN_REASON_TYPE_MAX,
+};
+
+
+/* command options for dumpStats*/
+#define WLAN_HDD_STATS        0
+#define WLAN_TXRX_STATS       1
+#define WLAN_TXRX_HIST_STATS  2
+#define WLAN_TXRX_TSO_STATS   3
+#define WLAN_HDD_NETIF_OPER_HISTORY 4
+#define WLAN_DUMP_TX_FLOW_POOL_INFO 5
+#define WLAN_TXRX_DESC_STATS  6
+
+ol_txrx_pdev_handle
+ol_txrx_pdev_alloc(ol_pdev_handle ctrl_pdev,
+		   HTC_HANDLE htc_pdev, cdf_device_t osdev);
+
+/**
+ * @brief Set up the data SW subsystem.
+ * @details
+ *  As part of the WLAN device attach, the data SW subsystem has
+ *  to be attached as a component within the WLAN device.
+ *  This attach allocates and initializes the physical device object
+ *  used by the data SW.
+ *  The data SW subsystem attach needs to happen after the target has
+ *  be started, and host / target parameter negotiation has completed,
+ *  since the host data SW uses some of these host/target negotiated
+ *  parameters (e.g. peer ID range) during the initializations within
+ *  its attach function.
+ *  However, the host data SW is not allowed to send HTC messages to the
+ *  target within this pdev_attach function call, since the HTC setup
+ *  has not complete at this stage of initializations.  Any messaging
+ *  to the target has to be done in the separate pdev_attach_target call
+ *  that is invoked after HTC setup is complete.
+ *
+ * @param pdev - txrx_pdev handle
+ * @return 0 for success or error code
+ */
+int
+ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev);
+
+/**
+ * @brief Do final steps of data SW setup that send messages to the target.
+ * @details
+ *  The majority of the data SW setup are done by the pdev_attach function,
+ *  but this function completes the data SW setup by sending datapath
+ *  configuration messages to the target.
+ *
+ * @param data_pdev - the physical device being initialized
+ */
+A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle data_pdev);
+
+/**
+ * @brief Allocate and initialize the data object for a new virtual device.
+ * @param data_pdev - the physical device the virtual device belongs to
+ * @param vdev_mac_addr - the MAC address of the virtual device
+ * @param vdev_id - the ID used to identify the virtual device to the target
+ * @param op_mode - whether this virtual device is operating as an AP,
+ *      an IBSS, or a STA
+ * @return
+ *      success: handle to new data vdev object, -OR-
+ *      failure: NULL
+ */
+ol_txrx_vdev_handle
+ol_txrx_vdev_attach(ol_txrx_pdev_handle data_pdev,
+		    uint8_t *vdev_mac_addr,
+		    uint8_t vdev_id, enum wlan_op_mode op_mode);
+
+/**
+ * @brief Allocate and set up references for a data peer object.
+ * @details
+ *  When an association with a peer starts, the host's control SW
+ *  uses this function to inform the host data SW.
+ *  The host data SW allocates its own peer object, and stores a
+ *  reference to the control peer object within the data peer object.
+ *  The host data SW also stores a reference to the virtual device
+ *  that the peer is associated with.  This virtual device handle is
+ *  used when the data SW delivers rx data frames to the OS shim layer.
+ *  The host data SW returns a handle to the new peer data object,
+ *  so a reference within the control peer object can be set to the
+ *  data peer object.
+ *
+ * @param data_pdev - data physical device object that will indirectly
+ *      own the data_peer object
+ * @param data_vdev - data virtual device object that will directly
+ *      own the data_peer object
+ * @param peer_mac_addr - MAC address of the new peer
+ * @return handle to new data peer object, or NULL if the attach fails
+ */
+ol_txrx_peer_handle
+ol_txrx_peer_attach(ol_txrx_pdev_handle data_pdev,
+		    ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac_addr);
+
+/**
+ * @brief Parameter type to be input to ol_txrx_peer_update
+ * @details
+ *  This struct is union,to be used to specify various informations to update
+ *   txrx peer object.
+ */
+union ol_txrx_peer_update_param_t {
+	uint8_t qos_capable;
+	uint8_t uapsd_mask;
+	enum ol_sec_type sec_type;
+};
+
+/**
+ * @brief Parameter type to be input to ol_txrx_peer_update
+ * @details
+ *   This enum is used to specify what exact information in
+ *   ol_txrx_peer_update_param_t
+ *   is used to update the txrx peer object.
+ */
+enum ol_txrx_peer_update_select_t {
+	ol_txrx_peer_update_qos_capable = 1,
+	ol_txrx_peer_update_uapsdMask,
+	ol_txrx_peer_update_peer_security,
+};
+
+/**
+ * @brief Update the data peer object as some informaiton changed in node.
+ * @details
+ *  Only a single prarameter can be changed for each call to this func.
+ *
+ * @param peer - pointer to the node's object
+ * @param param - new param to be upated in peer object.
+ * @param select - specify what's parameter needed to be update
+ */
+void
+ol_txrx_peer_update(ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac,
+		    union ol_txrx_peer_update_param_t *param,
+		    enum ol_txrx_peer_update_select_t select);
+
+enum {
+	OL_TX_WMM_AC_BE,
+	OL_TX_WMM_AC_BK,
+	OL_TX_WMM_AC_VI,
+	OL_TX_WMM_AC_VO,
+
+	OL_TX_NUM_WMM_AC
+};
+
+/**
+ * @brief Parameter type to pass WMM setting to ol_txrx_set_wmm_param
+ * @details
+ *   The struct is used to specify informaiton to update TX WMM scheduler.
+ */
+struct ol_tx_ac_param_t {
+	uint32_t aifs;
+	uint32_t cwmin;
+	uint32_t cwmax;
+};
+
+struct ol_tx_wmm_param_t {
+	struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
+};
+
+/**
+ * @brief Set paramters of WMM scheduler per AC settings.  .
+ * @details
+ *  This function applies only to HL systems.
+ *
+ * @param data_pdev - the physical device being paused
+ * @param wmm_param - the wmm parameters
+ */
+#define ol_txrx_set_wmm_param(data_pdev, wmm_param)     /* no-op */
+
+/**
+ * @brief notify tx data SW that a peer's transmissions are suspended.
+ * @details
+ *  This function applies only to HL systems - in LL systems, tx flow control
+ *  is handled entirely within the target FW.
+ *  The HL host tx data SW is doing tx classification and tx download
+ *  scheduling, and therefore also needs to actively participate in tx
+ *  flow control.  Specifically, the HL tx data SW needs to check whether a
+ *  given peer is available to transmit to, or is paused.
+ *  This function is used to tell the HL tx data SW when a peer is paused,
+ *  so the host tx data SW can hold the tx frames for that SW.
+ *
+ * @param data_peer - which peer is being paused
+ */
+#define ol_txrx_peer_pause(data_peer)   /* no-op */
+
+/**
+ * @brief notify tx data SW that a peer-TID is ready to transmit to.
+ * @details
+ *  This function applies only to HL systems - in LL systems, tx flow control
+ *  is handled entirely within the target FW.
+ *  If a peer-TID has tx paused, then the tx datapath will end up queuing
+ *  any tx frames that arrive from the OS shim for that peer-TID.
+ *  In a HL system, the host tx data SW itself will classify the tx frame,
+ *  and determine that it needs to be queued rather than downloaded to the
+ *  target for transmission.
+ *  Once the peer-TID is ready to accept data, the host control SW will call
+ *  this function to notify the host data SW that the queued frames can be
+ *  enabled for transmission, or specifically to download the tx frames
+ *  to the target to transmit.
+ *  The TID parameter is an extended version of the QoS TID.  Values 0-15
+ *  indicate a regular QoS TID, and the value 16 indicates either non-QoS
+ *  data, multicast data, or broadcast data.
+ *
+ * @param data_peer - which peer is being unpaused
+ * @param tid - which TID within the peer is being unpaused, or -1 as a
+ *      wildcard to unpause all TIDs within the peer
+ */
+#define ol_txrx_peer_tid_unpause(data_peer, tid)        /* no-op */
+
+/**
+ * @brief Tell a paused peer to release a specified number of tx frames.
+ * @details
+ *  This function applies only to HL systems - in LL systems, tx flow control
+ *  is handled entirely within the target FW.
+ *  Download up to a specified maximum number of tx frames from the tx
+ *  queues of the specified TIDs within the specified paused peer, usually
+ *  in response to a U-APSD trigger from the peer.
+ *  It is up to the host data SW to determine how to choose frames from the
+ *  tx queues of the specified TIDs.  However, the host data SW does need to
+ *  provide long-term fairness across the U-APSD enabled TIDs.
+ *  The host data SW will notify the target data FW when it is done downloading
+ *  the batch of U-APSD triggered tx frames, so the target data FW can
+ *  differentiate between an in-progress download versus a case when there are
+ *  fewer tx frames available than the specified limit.
+ *  This function is relevant primarily to HL U-APSD, where the frames are
+ *  held in the host.
+ *
+ * @param peer - which peer sent the U-APSD trigger
+ * @param tid_mask - bitmask of U-APSD enabled TIDs from whose tx queues
+ *      tx frames can be released
+ * @param max_frms - limit on the number of tx frames to release from the
+ *      specified TID's queues within the specified peer
+ */
+#define ol_txrx_tx_release(peer, tid_mask, max_frms)    /* no-op */
+
+/**
+ * @brief Suspend all tx data for the specified virtual device.
+ * @details
+ *  This function applies primarily to HL systems, but also applies to
+ *  LL systems that use per-vdev tx queues for MCC or thermal throttling.
+ *  As an example, this function could be used when a single-channel physical
+ *  device supports multiple channels by jumping back and forth between the
+ *  channels in a time-shared manner.  As the device is switched from channel
+ *  A to channel B, the virtual devices that operate on channel A will be
+ *  paused.
+ *
+ * @param data_vdev - the virtual device being paused
+ * @param reason - the reason for which vdev queue is getting paused
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason);
+#else
+static inline
+void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
+{
+	return;
+}
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+/**
+ * @brief Drop all tx data for the specified virtual device.
+ * @details
+ *  This function applies primarily to HL systems, but also applies to
+ *  LL systems that use per-vdev tx queues for MCC or thermal throttling.
+ *  This function would typically be used by the ctrl SW after it parks
+ *  a STA vdev and then resumes it, but to a new AP.  In this case, though
+ *  the same vdev can be used, any old tx frames queued inside it would be
+ *  stale, and would need to be discarded.
+ *
+ * @param data_vdev - the virtual device being flushed
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
+void ol_txrx_vdev_flush(ol_txrx_vdev_handle data_vdev);
+#else
+#define ol_txrx_vdev_flush(data_vdev)   /* no-op */
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+/**
+ * @brief Resume tx for the specified virtual device.
+ * @details
+ *  This function applies primarily to HL systems, but also applies to
+ *  LL systems that use per-vdev tx queues for MCC or thermal throttling.
+ *
+ * @param data_vdev - the virtual device being unpaused
+ * @param reason - the reason for which vdev queue is getting unpaused
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+void ol_txrx_vdev_unpause(ol_txrx_vdev_handle data_vdev, uint32_t reason);
+#else
+static inline
+void ol_txrx_vdev_unpause(ol_txrx_vdev_handle data_vdev, uint32_t reason)
+{
+	return;
+}
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+/**
+ * @brief Suspend all tx data per thermal event/timer for the
+ *  specified physical device
+ * @details
+ *  This function applies only to HL systerms, and it makes pause and
+ * unpause operations happen in pairs.
+ */
+#define ol_txrx_throttle_pause(data_pdev)       /* no-op */
+
+/**
+ * @brief Resume all tx data per thermal event/timer for the
+ * specified physical device
+ * @details
+ *  This function applies only to HL systerms, and it makes pause and
+ * unpause operations happen in pairs.
+ */
+#define ol_txrx_throttle_unpause(data_pdev)     /* no-op */
+
+/**
+ * @brief Suspend all tx data for the specified physical device.
+ * @details
+ *  This function applies only to HL systems - in LL systems, tx flow control
+ *  is handled entirely within the target FW.
+ *  In some systems it is necessary to be able to temporarily
+ *  suspend all WLAN traffic, e.g. to allow another device such as bluetooth
+ *  to temporarily have exclusive access to shared RF chain resources.
+ *  This function suspends tx traffic within the specified physical device.
+ *
+ * @param data_pdev - the physical device being paused
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason);
+#else
+static inline
+void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason)
+{
+	return;
+}
+#endif
+
+/**
+ * @brief Resume tx for the specified physical device.
+ * @details
+ *  This function applies only to HL systems - in LL systems, tx flow control
+ *  is handled entirely within the target FW.
+ *
+ * @param data_pdev - the physical device being unpaused
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason);
+#else
+static inline
+void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
+{
+	return;
+}
+#endif
+
+/**
+ * @brief Synchronize the data-path tx with a control-path target download
+ * @dtails
+ * @param data_pdev - the data-path physical device object
+ * @param sync_cnt - after the host data-path SW downloads this sync request
+ *      to the target data-path FW, the target tx data-path will hold itself
+ *      in suspension until it is given an out-of-band sync counter value that
+ *      is equal to or greater than this counter value
+ */
+void ol_txrx_tx_sync(ol_txrx_pdev_handle data_pdev, uint8_t sync_cnt);
+
+/**
+ * @brief Delete a peer's data object.
+ * @details
+ *  When the host's control SW disassociates a peer, it calls this
+ *  function to delete the peer's data object.
+ *  The reference stored in the control peer object to the data peer
+ *  object (set up by a call to ol_peer_store()) is provided.
+ *
+ * @param data_peer - the object to delete
+ */
+void ol_txrx_peer_detach(ol_txrx_peer_handle data_peer);
+
+typedef void (*ol_txrx_vdev_delete_cb)(void *context);
+
+/**
+ * @brief Deallocate the specified data virtual device object.
+ * @details
+ *  All peers associated with the virtual device need to be deleted
+ *  (ol_txrx_peer_detach) before the virtual device itself is deleted.
+ *  However, for the peers to be fully deleted, the peer deletion has to
+ *  percolate through the target data FW and back up to the host data SW.
+ *  Thus, even though the host control SW may have issued a peer_detach
+ *  call for each of the vdev's peers, the peer objects may still be
+ *  allocated, pending removal of all references to them by the target FW.
+ *  In this case, though the vdev_detach function call will still return
+ *  immediately, the vdev itself won't actually be deleted, until the
+ *  deletions of all its peers complete.
+ *  The caller can provide a callback function pointer to be notified when
+ *  the vdev deletion actually happens - whether it's directly within the
+ *  vdev_detach call, or if it's deferred until all in-progress peer
+ *  deletions have completed.
+ *
+ * @param data_vdev - data object for the virtual device in question
+ * @param callback - function to call (if non-NULL) once the vdev has
+ *      been wholly deleted
+ * @param callback_context - context to provide in the callback
+ */
+void
+ol_txrx_vdev_detach(ol_txrx_vdev_handle data_vdev,
+		    ol_txrx_vdev_delete_cb callback, void *callback_context);
+
+/**
+ * @brief Delete the data SW state.
+ * @details
+ *  This function is used when the WLAN driver is being removed to
+ *  remove the host data component within the driver.
+ *  All virtual devices within the physical device need to be deleted
+ *  (ol_txrx_vdev_detach) before the physical device itself is deleted.
+ *
+ * @param data_pdev - the data physical device object being removed
+ * @param force - delete the pdev (and its vdevs and peers) even if there
+ *      are outstanding references by the target to the vdevs and peers
+ *      within the pdev
+ */
+void ol_txrx_pdev_detach(ol_txrx_pdev_handle data_pdev, int force);
+
+typedef void
+(*ol_txrx_data_tx_cb)(void *ctxt, cdf_nbuf_t tx_frm, int had_error);
+
+/**
+ * @brief Store a delivery notification callback for specific data frames.
+ * @details
+ *  Through a non-std tx function, the txrx SW can be given tx data frames
+ *  that are specially marked to not be unmapped and freed by the tx SW
+ *  when transmission completes.  Rather, these specially-marked frames
+ *  are provided to the callback registered with this function.
+ *
+ * @param data_vdev - which vdev the callback is being registered with
+ *      (Currently the callback is stored in the pdev rather than the vdev.)
+ * @param callback - the function to call when tx frames marked as "no free"
+ *      are done being transmitted
+ * @param ctxt - the context argument provided to the callback function
+ */
+void
+ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle data_vdev,
+		       ol_txrx_data_tx_cb callback, void *ctxt);
+
+/**
+ * @brief Allow the control-path SW to send data frames.
+ * @details
+ *  Generally, all tx data frames come from the OS shim into the txrx layer.
+ *  However, there are rare cases such as TDLS messaging where the UMAC
+ *  control-path SW creates tx data frames.
+ *  This UMAC SW can call this function to provide the tx data frames to
+ *  the txrx layer.
+ *  The UMAC SW can request a callback for these data frames after their
+ *  transmission completes, by using the ol_txrx_data_tx_cb_set function
+ *  to register a tx completion callback, and by specifying
+ *  ol_tx_spec_no_free as the tx_spec arg when giving the frames to
+ *  ol_tx_non_std.
+ *  The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
+ *  as specified by ol_cfg_frame_type().
+ *
+ * @param data_vdev - which vdev should transmit the tx data frames
+ * @param tx_spec - what non-standard handling to apply to the tx data frames
+ * @param msdu_list - NULL-terminated list of tx MSDUs
+ */
+cdf_nbuf_t
+ol_tx_non_std(ol_txrx_vdev_handle data_vdev,
+	      enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list);
+
+typedef void
+(*ol_txrx_mgmt_tx_cb)(void *ctxt, cdf_nbuf_t tx_mgmt_frm, int had_error);
+
+/**
+ * @brief Store a callback for delivery notifications for management frames.
+ * @details
+ *  When the txrx SW receives notifications from the target that a tx frame
+ *  has been delivered to its recipient, it will check if the tx frame
+ *  is a management frame.  If so, the txrx SW will check the management
+ *  frame type specified when the frame was submitted for transmission.
+ *  If there is a callback function registered for the type of managment
+ *  frame in question, the txrx code will invoke the callback to inform
+ *  the management + control SW that the mgmt frame was delivered.
+ *  This function is used by the control SW to store a callback pointer
+ *  for a given type of management frame.
+ *
+ * @param pdev - the data physical device object
+ * @param type - the type of mgmt frame the callback is used for
+ * @param download_cb - the callback for notification of delivery to the target
+ * @param ota_ack_cb - the callback for notification of delivery to the peer
+ * @param ctxt - context to use with the callback
+ */
+void
+ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
+		       uint8_t type,
+		       ol_txrx_mgmt_tx_cb download_cb,
+		       ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt);
+
+/**
+ * @brief Transmit a management frame.
+ * @details
+ *  Send the specified management frame from the specified virtual device.
+ *  The type is used for determining whether to invoke a callback to inform
+ *  the sender that the tx mgmt frame was delivered, and if so, which
+ *  callback to use.
+ *
+ * @param vdev - virtual device transmitting the frame
+ * @param tx_mgmt_frm - management frame to transmit
+ * @param type - the type of managment frame (determines what callback to use)
+ * @param use_6mbps - specify whether management frame to transmit should use 6 Mbps
+ *                    rather than 1 Mbps min rate(for 5GHz band or P2P)
+ * @return
+ *      0 -> the frame is accepted for transmission, -OR-
+ *      1 -> the frame was not accepted
+ */
+int
+ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
+		  cdf_nbuf_t tx_mgmt_frm,
+		  uint8_t type, uint8_t use_6mbps, uint16_t chanfreq);
+
+/**
+ * @brief Setup the monitor mode vap (vdev) for this pdev
+ * @details
+ *  When a non-NULL vdev handle is registered as the monitor mode vdev, all
+ *  packets received by the system are delivered to the OS stack on this
+ *  interface in 802.11 MPDU format. Only a single monitor mode interface
+ *  can be up at any timer. When the vdev handle is set to NULL the monitor
+ *  mode delivery is stopped. This handle may either be a unique vdev
+ *  object that only receives monitor mode packets OR a point to a a vdev
+ *  object that also receives non-monitor traffic. In the second case the
+ *  OS stack is responsible for delivering the two streams using approprate
+ *  OS APIs
+ *
+ * @param pdev - the data physical device object
+ * @param vdev - the data virtual device object to deliver monitor mode
+ *                  packets on
+ * @return
+ *       0 -> the monitor mode vap was sucessfully setup
+ *      -1 -> Unable to setup monitor mode
+ */
+int
+ol_txrx_set_monitor_mode_vap(ol_txrx_pdev_handle pdev,
+			     ol_txrx_vdev_handle vdev);
+
+/**
+ * @brief Setup the current operating channel of the device
+ * @details
+ *  Mainly used when populating monitor mode status that requires the
+ *  current operating channel
+ *
+ * @param pdev - the data physical device object
+ * @param chan_mhz - the channel frequency (mhz)
+ *                  packets on
+ * @return - void
+ */
+void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz);
+
+CDF_STATUS ol_txrx_bus_suspend(void);
+CDF_STATUS ol_txrx_bus_resume(void);
+CDF_STATUS ol_txrx_wait_for_pending_tx(int timeout);
+
+/**
+ * @brief Get the number of pending transmit frames that are awaiting completion.
+ * @details
+ *  Mainly used in clean up path to make sure all buffers have been free'ed
+ *
+ * @param pdev - the data physical device object
+ * @return - count of pending frames
+ */
+int ol_txrx_get_tx_pending(ol_txrx_pdev_handle pdev);
+
+/**
+ * @brief Discard all tx frames that are pending in txrx.
+ * @details
+ *  Mainly used in clean up path to make sure all pending tx packets
+ *  held by txrx are returned back to OS shim immediately.
+ *
+ * @param pdev - the data physical device object
+ * @return - void
+ */
+void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev);
+
+/**
+ * @brief set the safemode of the device
+ * @details
+ *  This flag is used to bypass the encrypt and decrypt processes when send and
+ *  receive packets. It works like open AUTH mode, HW will treate all packets
+ *  as non-encrypt frames because no key installed. For rx fragmented frames,
+ *  it bypasses all the rx defragmentaion.
+ *
+ * @param vdev - the data virtual device object
+ * @param val - the safemode state
+ * @return - void
+ */
+void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val);
+
+/**
+ * @brief set the privacy filter
+ * @details
+ *  Rx related. Set the privacy filters. When rx packets, check
+ *  the ether type, filter type and packet type
+ *  to decide whether discard these packets.
+ *
+ * @param vdev - the data virtual device object
+ * @param filter - filters to be set
+ * @param num - the number of filters
+ * @return - void
+ */
+void
+ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
+			    void *filter, uint32_t num);
+
+/**
+ * @brief configure the drop unencrypted frame flag
+ * @details
+ *  Rx related. When set this flag, all the unencrypted frames
+ *  received over a secure connection will be discarded
+ *
+ * @param vdev - the data virtual device object
+ * @param val - flag
+ * @return - void
+ */
+void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val);
+
+enum ol_txrx_peer_state {
+	ol_txrx_peer_state_invalid,
+	ol_txrx_peer_state_disc,        /* initial state */
+	ol_txrx_peer_state_conn,        /* authentication in progress */
+	ol_txrx_peer_state_auth,        /* authentication successful */
+};
+
+/**
+ * @brief specify the peer's authentication state
+ * @details
+ *  Specify the peer's authentication state (none, connected, authenticated)
+ *  to allow the data SW to determine whether to filter out invalid data frames.
+ *  (In the "connected" state, where security is enabled, but authentication
+ *  has not completed, tx and rx data frames other than EAPOL or WAPI should
+ *  be discarded.)
+ *  This function is only relevant for systems in which the tx and rx filtering
+ *  are done in the host rather than in the target.
+ *
+ * @param data_peer - which peer has changed its state
+ * @param state - the new state of the peer
+ *
+ * Return: CDF Status
+ */
+CDF_STATUS
+ol_txrx_peer_state_update(ol_txrx_pdev_handle pdev, uint8_t *peer_addr,
+			  enum ol_txrx_peer_state state);
+
+void
+ol_txrx_peer_keyinstalled_state_update(ol_txrx_peer_handle data_peer,
+				       uint8_t val);
+
+#define ol_tx_addba_conf(data_peer, tid, status)        /* no-op */
+
+/**
+ * @brief Find a txrx peer handle from the peer's MAC address
+ * @details
+ *  The control SW typically uses the txrx peer handle to refer to the peer.
+ *  In unusual circumstances, if it is infeasible for the control SW maintain
+ *  the txrx peer handle but it can maintain the peer's MAC address,
+ *  this function allows the peer handled to be retrieved, based on the peer's
+ *  MAC address.
+ *  In cases where there are multiple peer objects with the same MAC address,
+ *  it is undefined which such object is returned.
+ *  This function does not increment the peer's reference count.  Thus, it is
+ *  only suitable for use as long as the control SW has assurance that it has
+ *  not deleted the peer object, by calling ol_txrx_peer_detach.
+ *
+ * @param pdev - the data physical device object
+ * @param peer_mac_addr - MAC address of the peer in question
+ * @return handle to the txrx peer object
+ */
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_addr(ol_txrx_pdev_handle pdev, uint8_t *peer_mac_addr);
+
+/**
+ * @brief Find a txrx peer handle from a peer's local ID
+ * @details
+ *  The control SW typically uses the txrx peer handle to refer to the peer.
+ *  In unusual circumstances, if it is infeasible for the control SW maintain
+ *  the txrx peer handle but it can maintain a small integer local peer ID,
+ *  this function allows the peer handled to be retrieved, based on the local
+ *  peer ID.
+ *
+ * @param pdev - the data physical device object
+ * @param local_peer_id - the ID txrx assigned locally to the peer in question
+ * @return handle to the txrx peer object
+ */
+#if QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_local_id(ol_txrx_pdev_handle pdev, uint8_t local_peer_id);
+#else
+#define ol_txrx_peer_find_by_local_id(pdev, local_peer_id) NULL
+#endif
+
+struct ol_txrx_peer_stats_t {
+	struct {
+		struct {
+			uint32_t ucast;
+			uint32_t mcast;
+			uint32_t bcast;
+		} frms;
+		struct {
+			uint32_t ucast;
+			uint32_t mcast;
+			uint32_t bcast;
+		} bytes;
+	} tx;
+	struct {
+		struct {
+			uint32_t ucast;
+			uint32_t mcast;
+			uint32_t bcast;
+		} frms;
+		struct {
+			uint32_t ucast;
+			uint32_t mcast;
+			uint32_t bcast;
+		} bytes;
+	} rx;
+};
+
+/**
+ * @brief Provide a snapshot of the txrx counters for the specified peer
+ * @details
+ *  The txrx layer optionally maintains per-peer stats counters.
+ *  This function provides the caller with a consistent snapshot of the
+ *  txrx stats counters for the specified peer.
+ *
+ * @param pdev - the data physical device object
+ * @param peer - which peer's stats counters are requested
+ * @param stats - buffer for holding the stats counters snapshot
+ * @return success / failure status
+ */
+#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
+A_STATUS
+ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
+			ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats);
+#else
+#define ol_txrx_peer_stats_copy(pdev, peer, stats) A_ERROR      /* failure */
+#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
+
+/* Config parameters for txrx_pdev */
+struct txrx_pdev_cfg_param_t {
+	uint8_t is_full_reorder_offload;
+	/* IPA Micro controller data path offload enable flag */
+	uint8_t is_uc_offload_enabled;
+	/* IPA Micro controller data path offload TX buffer count */
+	uint32_t uc_tx_buffer_count;
+	/* IPA Micro controller data path offload TX buffer size */
+	uint32_t uc_tx_buffer_size;
+	/* IPA Micro controller data path offload RX indication ring count */
+	uint32_t uc_rx_indication_ring_count;
+	/* IPA Micro controller data path offload TX partition base */
+	uint32_t uc_tx_partition_base;
+	/* IP, TCP and UDP checksum offload */
+	bool ip_tcp_udp_checksum_offload;
+	/* Rx processing in thread from TXRX */
+	bool enable_rxthread;
+	/* CE classification enabled through INI */
+	bool ce_classify_enabled;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	/* Threshold to stop queue in percentage */
+	uint32_t tx_flow_stop_queue_th;
+	/* Start queue offset in percentage */
+	uint32_t tx_flow_start_queue_offset;
+#endif
+};
+
+/**
+ * @brief Setup configuration parameters
+ * @details
+ *  Allocation configuration context that will be used across data path
+ *
+ * @param osdev - OS handle needed as an argument for some OS primitives
+ * @return the control device object
+ */
+ol_pdev_handle ol_pdev_cfg_attach(cdf_device_t osdev,
+				  struct txrx_pdev_cfg_param_t cfg_param);
+
+CDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id);
+void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id);
+
+
+#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
+#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer);
+ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
+					      uint8_t *peer_addr,
+					      uint8_t *peer_id);
+ol_txrx_peer_handle
+ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
+				   ol_txrx_vdev_handle vdev,
+				   uint8_t *peer_addr, uint8_t *peer_id);
+#else
+#define ol_txrx_local_peer_id(peer) OL_TXRX_INVALID_LOCAL_PEER_ID
+#define ol_txrx_find_peer_by_addr(pdev, peer_addr, peer_id) NULL
+#define ol_txrx_find_peer_by_addr_and_vdev(pdev, vdev, peer_addr, peer_id) NULL
+#endif
+
+#define OL_TXRX_RSSI_INVALID 0xffff
+/**
+ * @brief Provide the current RSSI average from data frames sent by a peer.
+ * @details
+ *  If a peer has sent data frames, the data SW will optionally keep
+ *  a running average of the RSSI observed for those data frames.
+ *  This function returns that time-average RSSI if is it available,
+ *  or OL_TXRX_RSSI_INVALID if either RSSI tracking is disabled or if
+ *  no data frame indications with valid RSSI meta-data have been received.
+ *  The RSSI is in approximate dBm units, and is normalized with respect
+ *  to a 20 MHz channel.  For example, if a data frame is received on a
+ *  40 MHz channel, wherein both the primary 20 MHz channel and the
+ *  secondary 20 MHz channel have an RSSI of -77 dBm, the reported RSSI
+ *  will be -77 dBm, rather than the actual -74 dBm RSSI from the
+ *  combination of the primary + extension 20 MHz channels.
+ *  Alternatively, the RSSI may be evaluated only on the primary 20 MHz
+ *  channel.
+ *
+ * @param peer - which peer's RSSI is desired
+ * @return RSSI evaluted from frames sent by the specified peer
+ */
+#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
+int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
+#else
+#define ol_txrx_peer_rssi(peer) OL_TXRX_RSSI_INVALID
+#endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
+
+#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
+#if QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer);
+#else
+#define ol_txrx_local_peer_id(peer) OL_TXRX_INVALID_LOCAL_PEER_ID
+#endif
+
+#ifdef QCA_COMPUTE_TX_DELAY
+/**
+ * @brief updates the compute interval period for TSM stats.
+ * @details
+ * @param interval - interval for stats computation
+ */
+void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval);
+
+/**
+ * @brief Return the uplink (transmitted) packet count and loss count.
+ * @details
+ *  This function will be called for getting uplink packet count and
+ *  loss count for given stream (access category) a regular interval.
+ *  This also resets the counters hence, the value returned is packets
+ *  counted in last 5(default) second interval. These counter are
+ *  incremented per access category in ol_tx_completion_handler()
+ *
+ * @param category - access category of interest
+ * @param out_packet_count - number of packets transmitted
+ * @param out_packet_loss_count - number of packets lost
+ */
+void
+ol_tx_packet_count(ol_txrx_pdev_handle pdev,
+		   uint16_t *out_packet_count,
+		   uint16_t *out_packet_loss_count, int category);
+#endif
+
+/**
+ * @brief Return the average delays for tx frames.
+ * @details
+ *  Return the average of the total time tx frames spend within the driver
+ *  and the average time tx frames take to be transmitted.
+ *  These averages are computed over a 5 second time interval.
+ *  These averages are computed separately for separate access categories,
+ *  if the QCA_COMPUTE_TX_DELAY_PER_AC flag is set.
+ *
+ * @param pdev - the data physical device instance
+ * @param queue_delay_microsec - average time tx frms spend in the WLAN driver
+ * @param tx_delay_microsec - average time for frames to be transmitted
+ * @param category - category (TID) of interest
+ */
+#ifdef QCA_COMPUTE_TX_DELAY
+void
+ol_tx_delay(ol_txrx_pdev_handle pdev,
+	    uint32_t *queue_delay_microsec,
+	    uint32_t *tx_delay_microsec, int category);
+#else
+static inline void
+ol_tx_delay(ol_txrx_pdev_handle pdev,
+	    uint32_t *queue_delay_microsec,
+	    uint32_t *tx_delay_microsec, int category)
+{
+	/* no-op version if QCA_COMPUTE_TX_DELAY is not set */
+	*queue_delay_microsec = *tx_delay_microsec = 0;
+}
+#endif
+
+/*
+ * Bins used for reporting delay histogram:
+ * bin 0:  0 - 10  ms delay
+ * bin 1: 10 - 20  ms delay
+ * bin 2: 20 - 40  ms delay
+ * bin 3: 40 - 80  ms delay
+ * bin 4: 80 - 160 ms delay
+ * bin 5: > 160 ms delay
+ */
+#define QCA_TX_DELAY_HIST_REPORT_BINS 6
+/**
+ * @brief Provide a histogram of tx queuing delays.
+ * @details
+ *  Return a histogram showing the number of tx frames of the specified
+ *  category for each of the delay levels in the histogram bin spacings
+ *  listed above.
+ *  These histograms are computed over a 5 second time interval.
+ *  These histograms are computed separately for separate access categories,
+ *  if the QCA_COMPUTE_TX_DELAY_PER_AC flag is set.
+ *
+ * @param pdev - the data physical device instance
+ * @param bin_values - an array of QCA_TX_DELAY_HIST_REPORT_BINS elements
+ *      This array gets filled in with the histogram bin counts.
+ * @param category - category (TID) of interest
+ */
+#ifdef QCA_COMPUTE_TX_DELAY
+void
+ol_tx_delay_hist(ol_txrx_pdev_handle pdev, uint16_t *bin_values, int category);
+#else
+static inline void
+ol_tx_delay_hist(ol_txrx_pdev_handle pdev, uint16_t *bin_values, int category)
+{
+	/* no-op version if QCA_COMPUTE_TX_DELAY is not set */
+	cdf_assert(bin_values);
+	cdf_mem_zero(bin_values,
+		     QCA_TX_DELAY_HIST_REPORT_BINS * sizeof(*bin_values));
+}
+#endif
+
+#if defined(QCA_SUPPORT_TX_THROTTLE)
+/**
+ * @brief Set the thermal mitgation throttling level.
+ * @details
+ *  This function applies only to LL systems. This function is used set the
+ *  tx throttle level used for thermal mitigation
+ *
+ * @param pdev - the physics device being throttled
+ */
+void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level);
+#else
+static inline void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev,
+					    int level)
+{
+	/* no-op */
+}
+#endif /* QCA_SUPPORT_TX_THROTTLE */
+
+#if defined(QCA_SUPPORT_TX_THROTTLE)
+/**
+ * @brief Configure the thermal mitgation throttling period.
+ * @details
+ *  This function applies only to LL systems. This function is used set the
+ *  period over which data will be throttled
+ *
+ * @param pdev - the physics device being throttled
+ */
+void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period);
+#else
+static inline void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev,
+					      int period)
+{
+	/* no-op */
+}
+#endif /* QCA_SUPPORT_TX_THROTTLE */
+
+void ol_vdev_rx_set_intrabss_fwd(ol_txrx_vdev_handle vdev, bool val);
+
+
+#ifdef IPA_OFFLOAD
+/**
+ * @brief Client request resource information
+ * @details
+ *  OL client will reuqest IPA UC related resource information
+ *  Resource information will be distributted to IPA module
+ *  All of the required resources should be pre-allocated
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ce_sr_base_paddr - copy engine source ring base physical address
+ * @param ce_sr_ring_size - copy engine source ring size
+ * @param ce_reg_paddr - copy engine register physical address
+ * @param tx_comp_ring_base_paddr - tx comp ring base physical address
+ * @param tx_comp_ring_size - tx comp ring size
+ * @param tx_num_alloc_buffer - number of allocated tx buffer
+ * @param rx_rdy_ring_base_paddr - rx ready ring base physical address
+ * @param rx_rdy_ring_size - rx ready ring size
+ * @param rx_proc_done_idx_paddr - rx process done index physical address
+ */
+void
+ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
+			    uint32_t *ce_sr_base_paddr,
+			    uint32_t *ce_sr_ring_size,
+			    cdf_dma_addr_t *ce_reg_paddr,
+			    uint32_t *tx_comp_ring_base_paddr,
+			    uint32_t *tx_comp_ring_size,
+			    uint32_t *tx_num_alloc_buffer,
+			    uint32_t *rx_rdy_ring_base_paddr,
+			    uint32_t *rx_rdy_ring_size,
+			    uint32_t *rx_proc_done_idx_paddr);
+
+/**
+ * @brief Client set IPA UC doorbell register
+ * @details
+ *  IPA UC let know doorbell register physical address
+ *  WLAN firmware will use this physical address to notify IPA UC
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ipa_uc_tx_doorbell_paddr - tx comp doorbell physical address
+ * @param ipa_uc_rx_doorbell_paddr - rx ready doorbell physical address
+ */
+void
+ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
+				  uint32_t ipa_tx_uc_doorbell_paddr,
+				  uint32_t ipa_rx_uc_doorbell_paddr);
+
+/**
+ * @brief Client notify IPA UC data path active or not
+ *
+ * @param pdev - handle to the HTT instance
+ * @param uc_active - UC data path is active or not
+ * @param is_tx - UC TX is active or not
+ */
+void
+ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev, bool uc_active, bool is_tx);
+
+/**
+ * @brief Offload data path activation notificaiton
+ * @details
+ *  Firmware notification handler for offload datapath activity
+ *
+ * @param pdev - handle to the HTT instance
+ * @param op_code - activated for tx or rx data patrh
+ */
+void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg);
+
+/**
+ * @brief callback function registration
+ * @details
+ *  OSIF layer callback function registration API
+ *  OSIF layer will register firmware offload datapath activity
+ *  notification callback
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ipa_uc_op_cb_type - callback function pointer should be registered
+ * @param osif_dev - osif instance pointer
+ */
+void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
+				   void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
+							     void *osif_ctxt),
+				   void *osif_dev);
+
+/**
+ * @brief query uc data path stats
+ * @details
+ *  Query uc data path stats from firmware
+ *
+ * @param pdev - handle to the HTT instance
+ */
+void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev);
+#else
+static inline void
+ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
+			    uint32_t *ce_sr_base_paddr,
+			    uint32_t *ce_sr_ring_size,
+			    cdf_dma_addr_t *ce_reg_paddr,
+			    uint32_t *tx_comp_ring_base_paddr,
+			    uint32_t *tx_comp_ring_size,
+			    uint32_t *tx_num_alloc_buffer,
+			    uint32_t *rx_rdy_ring_base_paddr,
+			    uint32_t *rx_rdy_ring_size,
+			    uint32_t *rx_proc_done_idx_paddr)
+{
+	return;
+}
+
+static inline void
+ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
+				  uint32_t ipa_tx_uc_doorbell_paddr,
+				  uint32_t ipa_rx_uc_doorbell_paddr)
+{
+	return;
+}
+
+static inline void
+ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev,
+	bool uc_active, bool is_tx)
+{
+	return;
+}
+
+static inline void
+ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg)
+{
+	return;
+}
+
+static inline void
+ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
+				   void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
+							     void *osif_ctxt),
+				   void *osif_dev)
+{
+	return;
+}
+
+static inline void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev)
+{
+	return;
+}
+#endif /* IPA_OFFLOAD */
+
+void ol_txrx_display_stats(uint16_t bitmap);
+void ol_txrx_clear_stats(uint16_t bitmap);
+int ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len);
+
+CDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
+				     uint8_t *peer_id);
+
+void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
+			  struct ol_txrx_peer_t *peer);
+
+bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
+			  struct ol_txrx_peer_t **peer);
+
+/* TX FLOW Control related functions */
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+#define TX_FLOW_MGMT_POOL_ID	0xEF
+
+#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
+#define TX_FLOW_MGMT_POOL_SIZE  32
+#else
+#define TX_FLOW_MGMT_POOL_SIZE  0
+#endif
+
+void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev);
+void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev);
+void ol_tx_dump_flow_pool_info(void);
+void ol_tx_clear_flow_pool_stats(void);
+void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
+				 uint8_t flow_pool_id, uint16_t flow_pool_size);
+void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
+				   uint8_t flow_pool_id);
+struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
+						 uint16_t flow_pool_size);
+int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool);
+void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
+#else
+
+static inline void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+static inline void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+static inline void ol_tx_dump_flow_pool_info(void)
+{
+	return;
+}
+static inline void ol_tx_clear_flow_pool_stats(void)
+{
+	return;
+}
+static inline void ol_tx_flow_pool_map_handler(uint8_t flow_id,
+	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
+{
+	return;
+}
+static inline void ol_tx_flow_pool_unmap_handler(uint8_t flow_id,
+	 uint8_t flow_type, uint8_t flow_pool_id)
+{
+	return;
+}
+static inline struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(
+		uint8_t flow_pool_id, uint16_t flow_pool_size)
+{
+	return NULL;
+}
+static inline int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
+{
+	return 0;
+}
+static inline void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
+{
+	return;
+}
+#endif
+
+#endif /* _OL_TXRX_CTRL_API__H_ */

+ 203 - 0
core/dp/ol/inc/ol_txrx_dbg.h

@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_dbg.h
+ * @brief Functions provided for visibility and debugging.
+ */
+#ifndef _OL_TXRX_DBG__H_
+#define _OL_TXRX_DBG__H_
+
+#include <athdefs.h>            /* A_STATUS, uint64_t */
+#include <cdf_lock.h>           /* cdf_semaphore_t */
+#include <htt.h>                /* htt_dbg_stats_type */
+#include <ol_txrx_stats.h>      /* ol_txrx_stats */
+
+typedef void (*ol_txrx_stats_callback)(void *ctxt,
+				       enum htt_dbg_stats_type type,
+				       uint8_t *buf, int bytes);
+
+struct ol_txrx_stats_req {
+	uint32_t stats_type_upload_mask;        /* which stats to upload */
+	uint32_t stats_type_reset_mask; /* which stats to reset */
+
+	/* stats will be printed if either print element is set */
+	struct {
+		int verbose;    /* verbose stats printout */
+		int concise;    /* concise stats printout (takes precedence) */
+	} print;                /* print uploaded stats */
+
+	/* stats notify callback will be invoked if fp is non-NULL */
+	struct {
+		ol_txrx_stats_callback fp;
+		void *ctxt;
+	} callback;
+
+	/* stats will be copied into the specified buffer if buf is non-NULL */
+	struct {
+		uint8_t *buf;
+		int byte_limit; /* don't copy more than this */
+	} copy;
+
+	/*
+	 * If blocking is true, the caller will take the specified semaphore
+	 * to wait for the stats to be uploaded, and the driver will release
+	 * the semaphore when the stats are done being uploaded.
+	 */
+	struct {
+		int blocking;
+		cdf_semaphore_t *sem_ptr;
+	} wait;
+};
+
+#ifndef TXRX_DEBUG_LEVEL
+#define TXRX_DEBUG_LEVEL 0      /* no debug info */
+#endif
+
+#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
+
+#define ol_txrx_debug(vdev, debug_specs) 0
+#define ol_txrx_fw_stats_cfg(vdev, type, val) 0
+#define ol_txrx_fw_stats_get(vdev, req) 0
+#define ol_txrx_aggr_cfg(vdev, max_subfrms_ampdu, max_subfrms_amsdu) 0
+
+#else /*---------------------------------------------------------------------*/
+
+#include <ol_txrx_api.h>        /* ol_txrx_pdev_handle, etc. */
+
+int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs);
+
+void ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
+			  uint8_t cfg_stats_type, uint32_t cfg_val);
+
+int ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev,
+			 struct ol_txrx_stats_req *req);
+
+int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
+		     int max_subfrms_ampdu, int max_subfrms_amsdu);
+
+enum {
+	TXRX_DBG_MASK_OBJS = 0x01,
+	TXRX_DBG_MASK_STATS = 0x02,
+	TXRX_DBG_MASK_PROT_ANALYZE = 0x04,
+	TXRX_DBG_MASK_RX_REORDER_TRACE = 0x08,
+	TXRX_DBG_MASK_RX_PN_TRACE = 0x10
+};
+
+/*--- txrx printouts ---*/
+
+/*
+ * Uncomment this to enable txrx printouts with dynamically adjustable
+ * verbosity.  These printouts should not impact performance.
+ */
+#define TXRX_PRINT_ENABLE 1
+/* uncomment this for verbose txrx printouts (may impact performance) */
+/* #define TXRX_PRINT_VERBOSE_ENABLE 1 */
+
+void ol_txrx_print_level_set(unsigned level);
+
+/*--- txrx object (pdev, vdev, peer) display debug functions ---*/
+
+#if TXRX_DEBUG_LEVEL > 5
+void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent);
+void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent);
+void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent);
+#else
+#define ol_txrx_pdev_display(pdev, indent)
+#define ol_txrx_vdev_display(vdev, indent)
+#define ol_txrx_peer_display(peer, indent)
+#endif
+
+/*--- txrx stats display debug functions ---*/
+
+
+void ol_txrx_stats_display(ol_txrx_pdev_handle pdev);
+
+void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev);
+
+
+/*--- txrx protocol analyzer debug feature ---*/
+
+/* uncomment this to enable the protocol analzyer feature */
+/* #define ENABLE_TXRX_PROT_ANALYZE 1 */
+
+#if defined(ENABLE_TXRX_PROT_ANALYZE)
+
+void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev);
+
+#else
+
+#define ol_txrx_prot_ans_display(pdev)
+
+#endif /* ENABLE_TXRX_PROT_ANALYZE */
+
+/*--- txrx sequence number trace debug feature ---*/
+
+/* uncomment this to enable the rx reorder trace feature */
+/* #define ENABLE_RX_REORDER_TRACE 1 */
+
+#define ol_txrx_seq_num_trace_display(pdev) \
+	ol_rx_reorder_trace_display(pdev, 0, 0)
+
+#if defined(ENABLE_RX_REORDER_TRACE)
+
+void
+ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit);
+
+#else
+
+#define ol_rx_reorder_trace_display(pdev, just_once, limit)
+
+#endif /* ENABLE_RX_REORDER_TRACE */
+
+/*--- txrx packet number trace debug feature ---*/
+
+/* uncomment this to enable the rx PN trace feature */
+/* #define ENABLE_RX_PN_TRACE 1 */
+
+#define ol_txrx_pn_trace_display(pdev) ol_rx_pn_trace_display(pdev, 0)
+
+#if defined(ENABLE_RX_PN_TRACE)
+
+void ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev, int just_once);
+
+#else
+
+#define ol_rx_pn_trace_display(pdev, just_once)
+
+#endif /* ENABLE_RX_PN_TRACE */
+
+/*--- tx queue log debug feature ---*/
+/* uncomment this to enable the tx queue log feature */
+/* #define ENABLE_TX_QUEUE_LOG 1 */
+
+#define ol_tx_queue_log_display(pdev)
+
+#endif /* ATH_PERF_PWR_OFFLOAD  */
+/*----------------------------------------*/
+
+#endif /* _OL_TXRX_DBG__H_ */

部分文件因为文件数量过多而无法显示