Bläddra i källkod

qcacmn: SDIO bus support (Part 1 - HIF SDIO)

Add legacy hif sdio code.
Implement new hif sdio interfaces with respect to new hif design.
Add datapath to HIF interfaces.
Refactor hif-sdio codebase.
Remove references to older kernel version.

Change-Id: Ieca3e512edca5f960d6f2b64d15121db6c8138c7
CRs-Fixed: 969334
Govind Singh 9 år sedan
förälder
incheckning
4cc8213cc2

+ 45 - 25
hif/inc/hif.h

@@ -218,6 +218,42 @@ enum hif_disable_type {
 	HIF_DISABLE_TYPE_SHUTDOWN,
 	HIF_DISABLE_TYPE_MAX
 };
+/**
+ * enum hif_device_config_opcode: configure mode
+ *
+ * @HIF_DEVICE_POWER_STATE: device power state
+ * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
+ * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
+ * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
+ * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
+ * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
+ * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
+ * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
+ * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
+ * @HIF_DEVICE_GET_OS_DEVICE: get OS device
+ * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
+ * @HIF_BMI_DONE: bmi done
+ * @HIF_DEVICE_SET_TARGET_TYPE: set target type
+ * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
+ * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
+ */
+enum hif_device_config_opcode {
+	HIF_DEVICE_POWER_STATE = 0,
+	HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
+	HIF_DEVICE_GET_MBOX_ADDR,
+	HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
+	HIF_DEVICE_GET_IRQ_PROC_MODE,
+	HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
+	HIF_DEVICE_POWER_STATE_CHANGE,
+	HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
+	HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
+	HIF_DEVICE_GET_OS_DEVICE,
+	HIF_DEVICE_DEBUG_BUS_STATE,
+	HIF_BMI_DONE,
+	HIF_DEVICE_SET_TARGET_TYPE,
+	HIF_DEVICE_SET_HTC_CONTEXT,
+	HIF_DEVICE_GET_HTC_CONTEXT,
+};
 
 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
 typedef struct _HID_ACCESS_LOG {
@@ -238,8 +274,8 @@ struct htc_callbacks {
 	void *context;		/* context to pass to the dsrhandler
 				 * note : rwCompletionHandler is provided
 				 * the context passed to hif_read_write  */
-	int (*rwCompletionHandler)(void *rwContext, int status);
-	int (*dsrHandler)(void *context);
+	QDF_STATUS(*rwCompletionHandler)(void *rwContext, QDF_STATUS status);
+	QDF_STATUS(*dsrHandler)(void *context);
 };
 
 /**
@@ -342,33 +378,10 @@ static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  */
 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
 
-#ifdef IPA_OFFLOAD
 void hif_ipa_get_ce_resource(struct hif_opaque_softc *scn,
 			     qdf_dma_addr_t *ce_sr_base_paddr,
 			     uint32_t *ce_sr_ring_size,
 			     qdf_dma_addr_t *ce_reg_paddr);
-#else
-/**
- * hif_ipa_get_ce_resource() - get uc resource on hif
- * @scn: bus context
- * @ce_sr_base_paddr: copyengine source ring base physical address
- * @ce_sr_ring_size: copyengine source ring size
- * @ce_reg_paddr: copyengine register physical address
- *
- * IPA micro controller data path offload feature enabled,
- * HIF should release copy engine related resource information to IPA UC
- * IPA UC will access hardware resource with released information
- *
- * Return: None
- */
-static inline void hif_ipa_get_ce_resource(struct hif_opaque_softc *scn,
-			     qdf_dma_addr_t *ce_sr_base_paddr,
-			     uint32_t *ce_sr_ring_size,
-			     qdf_dma_addr_t *ce_reg_paddr)
-{
-	return;
-}
-#endif /* IPA_OFFLOAD */
 
 /**
  * @brief List of callbacks - filled in by HTC.
@@ -437,6 +450,11 @@ struct hif_pipe_addl_info {
 struct hif_bus_id;
 typedef struct hif_bus_id hif_bus_id;
 
+void hif_claim_device(struct hif_opaque_softc *hif_ctx);
+QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
+		     int opcode, void *config, uint32_t config_len);
+void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
+void hif_mask_interrupt_call(struct hif_opaque_softc *scn);
 void hif_post_init(struct hif_opaque_softc *scn, void *hHTC,
 		   struct hif_msg_callbacks *callbacks);
 QDF_STATUS hif_start(struct hif_opaque_softc *scn);
@@ -554,6 +572,8 @@ void hif_process_runtime_resume_success(struct hif_opaque_softc *);
 int hif_dump_registers(struct hif_opaque_softc *scn);
 int ol_copy_ramdump(struct hif_opaque_softc *scn);
 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
+void hif_bus_pkt_dl_len_set(struct hif_opaque_softc *hif_sc,
+			    unsigned int pkt_download_len);
 void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
 		     const char **target_name);
 void hif_lro_flush_cb_register(struct hif_opaque_softc *scn,

+ 5 - 0
hif/inc/regtable.h

@@ -28,6 +28,11 @@
 #ifndef _REGTABLE_H_
 #define _REGTABLE_H_
 
+#ifdef HIF_SDIO
+#include "regtable_sdio.h"
+#endif
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
 #include "reg_struct.h"
 #include "regtable_pcie.h"
 #endif
+#endif

+ 74 - 67
hif/src/ar6320def.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -33,7 +33,6 @@
 #define AR6320_RTC_WMAC_BASE_ADDRESS                    0x00001000
 #define AR6320_MAC_COEX_BASE_ADDRESS                    0x0000f000
 #define AR6320_BT_COEX_BASE_ADDRESS                     0x00002000
-#define AR6320_SOC_PCIE_BASE_ADDRESS                    0x00038000
 #define AR6320_SOC_CORE_BASE_ADDRESS                    0x0003a000
 #define AR6320_WLAN_UART_BASE_ADDRESS                   0x0000c000
 #define AR6320_WLAN_SI_BASE_ADDRESS                     0x00010000
@@ -43,17 +42,7 @@
 #define AR6320_EFUSE_BASE_ADDRESS                       0x00024000
 #define AR6320_FPGA_REG_BASE_ADDRESS                    0x00039000
 #define AR6320_WLAN_UART2_BASE_ADDRESS                  0x00054c00
-#define AR6320_CE_WRAPPER_BASE_ADDRESS                  0x00034000
-#define AR6320_CE0_BASE_ADDRESS                         0x00034400
-#define AR6320_CE1_BASE_ADDRESS                         0x00034800
-#define AR6320_CE2_BASE_ADDRESS                         0x00034c00
-#define AR6320_CE3_BASE_ADDRESS                         0x00035000
-#define AR6320_CE4_BASE_ADDRESS                         0x00035400
-#define AR6320_CE5_BASE_ADDRESS                         0x00035800
-#define AR6320_CE6_BASE_ADDRESS                         0x00035c00
-#define AR6320_CE7_BASE_ADDRESS                         0x00036000
 #define AR6320_DBI_BASE_ADDRESS                         0x0003c000
-#define AR6320_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS       0x00007800
 
 #define AR6320_SCRATCH_3_ADDRESS                        0x0028
 #define AR6320_TARG_DRAM_START                          0x00400000
@@ -107,7 +96,6 @@
 #define AR6320_SI_CS_RX_CNT_MASK                        0x000000f0
 #define AR6320_SI_CS_TX_CNT_LSB                         0
 #define AR6320_SI_CS_TX_CNT_MASK                        0x0000000f
-#define AR6320_CE_COUNT                                 8
 #define AR6320_SR_WR_INDEX_ADDRESS                      0x003c
 #define AR6320_DST_WATERMARK_ADDRESS                    0x0050
 #define AR6320_RX_MSDU_END_4_FIRST_MSDU_LSB             14
@@ -118,6 +106,46 @@
 #define AR6320_RX_MPDU_START_0_SEQ_NUM_MASK             0x0fff0000
 #define AR6320_RX_MPDU_START_2_TID_LSB                  28
 #define AR6320_RX_MPDU_START_2_TID_MASK                 0xf0000000
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
+#define AR6320_SOC_PCIE_BASE_ADDRESS                    0x00038000
+#define AR6320_CE_WRAPPER_BASE_ADDRESS                  0x00034000
+#define AR6320_CE0_BASE_ADDRESS                         0x00034400
+#define AR6320_CE1_BASE_ADDRESS                         0x00034800
+#define AR6320_CE2_BASE_ADDRESS                         0x00034c00
+#define AR6320_CE3_BASE_ADDRESS                         0x00035000
+#define AR6320_CE4_BASE_ADDRESS                         0x00035400
+#define AR6320_CE5_BASE_ADDRESS                         0x00035800
+#define AR6320_CE6_BASE_ADDRESS                         0x00035c00
+#define AR6320_CE7_BASE_ADDRESS                         0x00036000
+#define AR6320_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS       0x00007800
+#define AR6320_CE_COUNT                                 8
+#define AR6320_CE_CTRL1_ADDRESS                         0x0010
+#define AR6320_CE_CTRL1_DMAX_LENGTH_MASK                0x0000ffff
+#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS     0x0000
+#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
+#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB  8
+#define AR6320_CE_CTRL1_DMAX_LENGTH_LSB                 0
+#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK      0x00010000
+#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK      0x00020000
+#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB       16
+#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB       17
+#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020
+#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB  5
+#define AR6320_PCIE_SOC_WAKE_RESET                      0x00000000
+#define AR6320_PCIE_SOC_WAKE_ADDRESS                    0x0004
+#define AR6320_PCIE_SOC_WAKE_V_MASK                     0x00000001
+#define AR6320_MUX_ID_MASK                              0x0000
+#define AR6320_TRANSACTION_ID_MASK                      0x3fff
+#define AR6320_PCIE_LOCAL_BASE_ADDRESS                  0x80000
+#define AR6320_FW_IND_HELPER                            4
+#define AR6320_PCIE_INTR_ENABLE_ADDRESS                 0x0008
+#define AR6320_PCIE_INTR_CLR_ADDRESS                    0x0014
+#define AR6320_PCIE_INTR_FIRMWARE_MASK                  0x00000400
+#define AR6320_PCIE_INTR_CE0_MASK                       0x00000800
+#define AR6320_PCIE_INTR_CE_MASK_ALL                    0x0007f800
+#define AR6320_PCIE_INTR_CAUSE_ADDRESS                  0x000c
+#define AR6320_SOC_RESET_CONTROL_CE_RST_MASK            0x00000001
+#endif
 #define AR6320_RX_MPDU_START_2_PN_47_32_LSB             0
 #define AR6320_RX_MPDU_START_2_PN_47_32_MASK            0x0000ffff
 #define AR6320_RX_MSDU_END_1_KEY_ID_OCT_MASK            0x000000ff
@@ -157,13 +185,10 @@
 #define AR6320_HOST_IS_DST_RING_LOW_WATERMARK_MASK      0x00000010
 #define AR6320_HOST_IS_ADDRESS                          0x0030
 #define AR6320_HOST_IS_COPY_COMPLETE_MASK               0x00000001
-#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS     0x0000
 #define AR6320_HOST_IE_ADDRESS                          0x002c
 #define AR6320_HOST_IE_COPY_COMPLETE_MASK               0x00000001
 #define AR6320_SR_BA_ADDRESS                            0x0000
 #define AR6320_SR_SIZE_ADDRESS                          0x0004
-#define AR6320_CE_CTRL1_ADDRESS                         0x0010
-#define AR6320_CE_CTRL1_DMAX_LENGTH_MASK                0x0000ffff
 #define AR6320_DR_BA_ADDRESS                            0x0008
 #define AR6320_DR_SIZE_ADDRESS                          0x000c
 #define AR6320_MISC_IE_ADDRESS                          0x0034
@@ -177,41 +202,18 @@
 #define AR6320_SRC_WATERMARK_HIGH_LSB                   0
 #define AR6320_DST_WATERMARK_LOW_LSB                    16
 #define AR6320_DST_WATERMARK_HIGH_LSB                   0
-#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
-#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB  8
-#define AR6320_CE_CTRL1_DMAX_LENGTH_LSB                 0
-#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK      0x00010000
-#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK      0x00020000
-#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB       16
-#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB       17
-#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020
-#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB  5
 #define AR6320_SOC_GLOBAL_RESET_ADDRESS                 0x0008
 #define AR6320_RTC_STATE_ADDRESS                        0x0000
 #define AR6320_RTC_STATE_COLD_RESET_MASK                0x00002000
-#define AR6320_PCIE_SOC_WAKE_RESET                      0x00000000
-#define AR6320_PCIE_SOC_WAKE_ADDRESS                    0x0004
-#define AR6320_PCIE_SOC_WAKE_V_MASK                     0x00000001
 #define AR6320_RTC_STATE_V_MASK                         0x00000007
 #define AR6320_RTC_STATE_V_LSB                          0
 #define AR6320_RTC_STATE_V_ON                           3
-#define AR6320_MUX_ID_MASK                              0x0000
-#define AR6320_TRANSACTION_ID_MASK                      0x3fff
-#define AR6320_PCIE_LOCAL_BASE_ADDRESS                  0x80000
 #define AR6320_FW_IND_EVENT_PENDING                     1
 #define AR6320_FW_IND_INITIALIZED                       2
-#define AR6320_FW_IND_HELPER                            4
-#define AR6320_PCIE_INTR_ENABLE_ADDRESS                 0x0008
-#define AR6320_PCIE_INTR_CLR_ADDRESS                    0x0014
-#define AR6320_PCIE_INTR_FIRMWARE_MASK                  0x00000400
-#define AR6320_PCIE_INTR_CE0_MASK                       0x00000800
-#define AR6320_PCIE_INTR_CE_MASK_ALL                    0x0007f800
-#define AR6320_PCIE_INTR_CAUSE_ADDRESS                  0x000c
 #define AR6320_CPU_INTR_ADDRESS                         0x0010
 #define AR6320_SOC_LF_TIMER_CONTROL0_ADDRESS            0x00000050
 #define AR6320_SOC_LF_TIMER_CONTROL0_ENABLE_MASK        0x00000004
 #define AR6320_SOC_RESET_CONTROL_ADDRESS                0x00000000
-#define AR6320_SOC_RESET_CONTROL_CE_RST_MASK            0x00000001
 #define AR6320_SOC_RESET_CONTROL_CPU_WARM_RST_MASK      0x00000040
 #define AR6320_CORE_CTRL_ADDRESS                        0x0000
 #define AR6320_CORE_CTRL_CPU_INTR_MASK                  0x00002000
@@ -224,8 +226,8 @@
 #define AR6320_SOC_CHIP_ID_VERSION_LSB                  18
 #define AR6320_SOC_CHIP_ID_REVISION_MASK                0x00000f00
 #define AR6320_SOC_CHIP_ID_REVISION_LSB                 8
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
 #define AR6320_SOC_POWER_REG_OFFSET                     0x0000010c
-
 /* Copy Engine Debug */
 #define AR6320_WLAN_DEBUG_INPUT_SEL_OFFSET              0x0000010c
 #define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MSB             3
@@ -312,9 +314,9 @@
 #define AR6320_SOC_CPU_CLOCK_STANDARD_LSB               0
 #define AR6320_SOC_CPU_CLOCK_STANDARD_MASK              0x00000003
 /* PLL end */
-
 #define AR6320_PCIE_INTR_CE_MASK(n) \
 	(AR6320_PCIE_INTR_CE0_MASK << (n))
+#endif
 #define AR6320_DRAM_BASE_ADDRESS          AR6320_TARG_DRAM_START
 #define AR6320_FW_INDICATOR_ADDRESS \
 	(AR6320_SOC_CORE_BASE_ADDRESS + AR6320_SCRATCH_3_ADDRESS)
@@ -380,6 +382,8 @@
 #define AR6320_WINDOW_DATA_ADDRESS                     0x0874
 #define AR6320_WINDOW_READ_ADDR_ADDRESS                0x087c
 #define AR6320_WINDOW_WRITE_ADDR_ADDRESS               0x0878
+#define AR6320_HOST_INT_STATUS_MBOX_DATA_MASK 0x0f
+#define AR6320_HOST_INT_STATUS_MBOX_DATA_LSB 0
 
 struct targetdef_s ar6320_targetdef = {
 	.d_RTC_SOC_BASE_ADDRESS = AR6320_RTC_SOC_BASE_ADDRESS,
@@ -456,14 +460,8 @@ struct targetdef_s ar6320_targetdef = {
 	.d_DRAM_BASE_ADDRESS = AR6320_DRAM_BASE_ADDRESS,
 	.d_SOC_CORE_BASE_ADDRESS = AR6320_SOC_CORE_BASE_ADDRESS,
 	.d_CORE_CTRL_ADDRESS = AR6320_CORE_CTRL_ADDRESS,
-	.d_CE_COUNT = AR6320_CE_COUNT,
 	.d_MSI_NUM_REQUEST = MSI_NUM_REQUEST,
 	.d_MSI_ASSIGN_FW = MSI_ASSIGN_FW,
-	.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
-	.d_PCIE_INTR_ENABLE_ADDRESS = AR6320_PCIE_INTR_ENABLE_ADDRESS,
-	.d_PCIE_INTR_CLR_ADDRESS = AR6320_PCIE_INTR_CLR_ADDRESS,
-	.d_PCIE_INTR_FIRMWARE_MASK = AR6320_PCIE_INTR_FIRMWARE_MASK,
-	.d_PCIE_INTR_CE_MASK_ALL = AR6320_PCIE_INTR_CE_MASK_ALL,
 	.d_CORE_CTRL_CPU_INTR_MASK = AR6320_CORE_CTRL_CPU_INTR_MASK,
 	.d_SR_WR_INDEX_ADDRESS = AR6320_SR_WR_INDEX_ADDRESS,
 	.d_DST_WATERMARK_ADDRESS = AR6320_DST_WATERMARK_ADDRESS,
@@ -521,6 +519,13 @@ struct targetdef_s ar6320_targetdef = {
 		AR6320_RX_ATTENTION_0_MSDU_DONE_MASK,
 	.d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK =
 		AR6320_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK,
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
+	.d_CE_COUNT = AR6320_CE_COUNT,
+	.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
+	.d_PCIE_INTR_ENABLE_ADDRESS = AR6320_PCIE_INTR_ENABLE_ADDRESS,
+	.d_PCIE_INTR_CLR_ADDRESS = AR6320_PCIE_INTR_CLR_ADDRESS,
+	.d_PCIE_INTR_FIRMWARE_MASK = AR6320_PCIE_INTR_FIRMWARE_MASK,
+	.d_PCIE_INTR_CE_MASK_ALL = AR6320_PCIE_INTR_CE_MASK_ALL,
     /* PLL start */
 	.d_EFUSE_OFFSET = AR6320_EFUSE_OFFSET,
 	.d_EFUSE_XTAL_SEL_MSB = AR6320_EFUSE_XTAL_SEL_MSB,
@@ -601,13 +606,6 @@ struct targetdef_s ar6320_targetdef = {
 		AR6320_SOC_LF_TIMER_CONTROL0_ADDRESS,
 	.d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK =
 		AR6320_SOC_LF_TIMER_CONTROL0_ENABLE_MASK,
-	/* chip id start */
-	.d_SOC_CHIP_ID_ADDRESS = AR6320_SOC_CHIP_ID_ADDRESS,
-	.d_SOC_CHIP_ID_VERSION_MASK = AR6320_SOC_CHIP_ID_VERSION_MASK,
-	.d_SOC_CHIP_ID_VERSION_LSB = AR6320_SOC_CHIP_ID_VERSION_LSB,
-	.d_SOC_CHIP_ID_REVISION_MASK = AR6320_SOC_CHIP_ID_REVISION_MASK,
-	.d_SOC_CHIP_ID_REVISION_LSB = AR6320_SOC_CHIP_ID_REVISION_LSB,
-	/* chip id end */
 
 	.d_WLAN_DEBUG_INPUT_SEL_OFFSET = AR6320_WLAN_DEBUG_INPUT_SEL_OFFSET,
 	.d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MSB,
@@ -635,7 +633,14 @@ struct targetdef_s ar6320_targetdef = {
 	.d_AMBA_DEBUG_BUS_SEL_MSB = AR6320_AMBA_DEBUG_BUS_SEL_MSB,
 	.d_AMBA_DEBUG_BUS_SEL_LSB = AR6320_AMBA_DEBUG_BUS_SEL_LSB,
 	.d_AMBA_DEBUG_BUS_SEL_MASK = AR6320_AMBA_DEBUG_BUS_SEL_MASK,
-
+#endif
+	/* chip id start */
+	.d_SOC_CHIP_ID_ADDRESS = AR6320_SOC_CHIP_ID_ADDRESS,
+	.d_SOC_CHIP_ID_VERSION_MASK = AR6320_SOC_CHIP_ID_VERSION_MASK,
+	.d_SOC_CHIP_ID_VERSION_LSB = AR6320_SOC_CHIP_ID_VERSION_LSB,
+	.d_SOC_CHIP_ID_REVISION_MASK = AR6320_SOC_CHIP_ID_REVISION_MASK,
+	.d_SOC_CHIP_ID_REVISION_LSB = AR6320_SOC_CHIP_ID_REVISION_LSB,
+	/* chip id end */
 };
 
 struct hostdef_s ar6320_hostdef = {
@@ -695,34 +700,36 @@ struct hostdef_s ar6320_hostdef = {
 	.d_SOC_GLOBAL_RESET_ADDRESS = AR6320_SOC_GLOBAL_RESET_ADDRESS,
 	.d_RTC_STATE_ADDRESS = AR6320_RTC_STATE_ADDRESS,
 	.d_RTC_STATE_COLD_RESET_MASK = AR6320_RTC_STATE_COLD_RESET_MASK,
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
 	.d_PCIE_LOCAL_BASE_ADDRESS = AR6320_PCIE_LOCAL_BASE_ADDRESS,
 	.d_PCIE_SOC_WAKE_RESET = AR6320_PCIE_SOC_WAKE_RESET,
 	.d_PCIE_SOC_WAKE_ADDRESS = AR6320_PCIE_SOC_WAKE_ADDRESS,
 	.d_PCIE_SOC_WAKE_V_MASK = AR6320_PCIE_SOC_WAKE_V_MASK,
+	.d_MUX_ID_MASK = AR6320_MUX_ID_MASK,
+	.d_TRANSACTION_ID_MASK = AR6320_TRANSACTION_ID_MASK,
+	.d_FW_IND_HELPER = AR6320_FW_IND_HELPER,
+	.d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS,
+	.d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK,
+	.d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS,
+	.d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS,
+	.d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS,
+	.d_HOST_CE_COUNT = 8,
+	.d_ENABLE_MSI = 0,
+#endif
 	.d_RTC_STATE_V_MASK = AR6320_RTC_STATE_V_MASK,
 	.d_RTC_STATE_V_LSB = AR6320_RTC_STATE_V_LSB,
 	.d_FW_IND_EVENT_PENDING = AR6320_FW_IND_EVENT_PENDING,
 	.d_FW_IND_INITIALIZED = AR6320_FW_IND_INITIALIZED,
-	.d_FW_IND_HELPER = AR6320_FW_IND_HELPER,
 	.d_RTC_STATE_V_ON = AR6320_RTC_STATE_V_ON,
-	.d_MUX_ID_MASK = AR6320_MUX_ID_MASK,
-	.d_TRANSACTION_ID_MASK = AR6320_TRANSACTION_ID_MASK,
 #if defined(SDIO_3_0)
 	.d_HOST_INT_STATUS_MBOX_DATA_MASK =
 		AR6320_HOST_INT_STATUS_MBOX_DATA_MASK,
 	.d_HOST_INT_STATUS_MBOX_DATA_LSB =
 		AR6320_HOST_INT_STATUS_MBOX_DATA_LSB,
 #endif
-	.d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS,
-	.d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK,
-	.d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS,
-	.d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS,
-	.d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS,
-	.d_HOST_CE_COUNT = 8,
-	.d_ENABLE_MSI = 0,
 };
 
-
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
 struct ce_reg_def ar6320_ce_targetdef = {
 	/* copy_engine.c  */
 	.d_DST_WR_INDEX_ADDRESS = AR6320_DST_WR_INDEX_ADDRESS,
@@ -792,5 +799,5 @@ struct ce_reg_def ar6320_ce_targetdef = {
 	.d_CE1_BASE_ADDRESS = AR6320_CE1_BASE_ADDRESS,
 
 };
-
+#endif
 #endif

+ 74 - 67
hif/src/ar6320v2def.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -43,17 +43,7 @@
 #define AR6320V2_EFUSE_BASE_ADDRESS                       0x00024000
 #define AR6320V2_FPGA_REG_BASE_ADDRESS                    0x00039000
 #define AR6320V2_WLAN_UART2_BASE_ADDRESS                  0x00054c00
-#define AR6320V2_CE_WRAPPER_BASE_ADDRESS                  0x00034000
-#define AR6320V2_CE0_BASE_ADDRESS                         0x00034400
-#define AR6320V2_CE1_BASE_ADDRESS                         0x00034800
-#define AR6320V2_CE2_BASE_ADDRESS                         0x00034c00
-#define AR6320V2_CE3_BASE_ADDRESS                         0x00035000
-#define AR6320V2_CE4_BASE_ADDRESS                         0x00035400
-#define AR6320V2_CE5_BASE_ADDRESS                         0x00035800
-#define AR6320V2_CE6_BASE_ADDRESS                         0x00035c00
-#define AR6320V2_CE7_BASE_ADDRESS                         0x00036000
 #define AR6320V2_DBI_BASE_ADDRESS                         0x0003c000
-#define AR6320V2_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS       0x00007800
 
 #define AR6320V2_SCRATCH_3_ADDRESS                        0x0028
 #define AR6320V2_TARG_DRAM_START                          0x00400000
@@ -156,13 +146,10 @@
 #define AR6320V2_HOST_IS_DST_RING_LOW_WATERMARK_MASK      0x00000010
 #define AR6320V2_HOST_IS_ADDRESS                          0x0030
 #define AR6320V2_HOST_IS_COPY_COMPLETE_MASK               0x00000001
-#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS     0x0000
 #define AR6320V2_HOST_IE_ADDRESS                          0x002c
 #define AR6320V2_HOST_IE_COPY_COMPLETE_MASK               0x00000001
 #define AR6320V2_SR_BA_ADDRESS                            0x0000
 #define AR6320V2_SR_SIZE_ADDRESS                          0x0004
-#define AR6320V2_CE_CTRL1_ADDRESS                         0x0010
-#define AR6320V2_CE_CTRL1_DMAX_LENGTH_MASK                0x0000ffff
 #define AR6320V2_DR_BA_ADDRESS                            0x0008
 #define AR6320V2_DR_SIZE_ADDRESS                          0x000c
 #define AR6320V2_MISC_IE_ADDRESS                          0x0034
@@ -176,41 +163,18 @@
 #define AR6320V2_SRC_WATERMARK_HIGH_LSB                   0
 #define AR6320V2_DST_WATERMARK_LOW_LSB                    16
 #define AR6320V2_DST_WATERMARK_HIGH_LSB                   0
-#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
-#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB  8
-#define AR6320V2_CE_CTRL1_DMAX_LENGTH_LSB                 0
-#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK      0x00010000
-#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK      0x00020000
-#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB       16
-#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB       17
-#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020
-#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB  5
 #define AR6320V2_SOC_GLOBAL_RESET_ADDRESS                 0x0008
 #define AR6320V2_RTC_STATE_ADDRESS                        0x0000
 #define AR6320V2_RTC_STATE_COLD_RESET_MASK                0x00002000
-#define AR6320V2_PCIE_SOC_WAKE_RESET                      0x00000000
-#define AR6320V2_PCIE_SOC_WAKE_ADDRESS                    0x0004
-#define AR6320V2_PCIE_SOC_WAKE_V_MASK                     0x00000001
 #define AR6320V2_RTC_STATE_V_MASK                         0x00000007
 #define AR6320V2_RTC_STATE_V_LSB                          0
 #define AR6320V2_RTC_STATE_V_ON                           3
-#define AR6320V2_MUX_ID_MASK                              0x0000
-#define AR6320V2_TRANSACTION_ID_MASK                      0x3fff
-#define AR6320V2_PCIE_LOCAL_BASE_ADDRESS                  0x80000
 #define AR6320V2_FW_IND_EVENT_PENDING                     1
 #define AR6320V2_FW_IND_INITIALIZED                       2
-#define AR6320V2_FW_IND_HELPER                            4
-#define AR6320V2_PCIE_INTR_ENABLE_ADDRESS                 0x0008
-#define AR6320V2_PCIE_INTR_CLR_ADDRESS                    0x0014
-#define AR6320V2_PCIE_INTR_FIRMWARE_MASK                  0x00000400
-#define AR6320V2_PCIE_INTR_CE0_MASK                       0x00000800
-#define AR6320V2_PCIE_INTR_CE_MASK_ALL                    0x0007f800
-#define AR6320V2_PCIE_INTR_CAUSE_ADDRESS                  0x000c
 #define AR6320V2_CPU_INTR_ADDRESS                         0x0010
 #define AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS            0x00000050
 #define AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK        0x00000004
 #define AR6320V2_SOC_RESET_CONTROL_ADDRESS                0x00000000
-#define AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK            0x00000001
 #define AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK      0x00000040
 #define AR6320V2_CORE_CTRL_ADDRESS                        0x0000
 #define AR6320V2_CORE_CTRL_CPU_INTR_MASK                  0x00002000
@@ -223,8 +187,44 @@
 #define AR6320V2_SOC_CHIP_ID_VERSION_LSB                  18
 #define AR6320V2_SOC_CHIP_ID_REVISION_MASK                0x00000f00
 #define AR6320V2_SOC_CHIP_ID_REVISION_LSB                 8
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
+#define AR6320V2_CE_WRAPPER_BASE_ADDRESS                  0x00034000
+#define AR6320V2_CE0_BASE_ADDRESS                         0x00034400
+#define AR6320V2_CE1_BASE_ADDRESS                         0x00034800
+#define AR6320V2_CE2_BASE_ADDRESS                         0x00034c00
+#define AR6320V2_CE3_BASE_ADDRESS                         0x00035000
+#define AR6320V2_CE4_BASE_ADDRESS                         0x00035400
+#define AR6320V2_CE5_BASE_ADDRESS                         0x00035800
+#define AR6320V2_CE6_BASE_ADDRESS                         0x00035c00
+#define AR6320V2_CE7_BASE_ADDRESS                         0x00036000
+#define AR6320V2_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS       0x00007800
+#define AR6320V2_CE_CTRL1_ADDRESS                         0x0010
+#define AR6320V2_CE_CTRL1_DMAX_LENGTH_MASK                0x0000ffff
+#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS     0x0000
+#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
+#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB  8
+#define AR6320V2_CE_CTRL1_DMAX_LENGTH_LSB                 0
+#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK      0x00010000
+#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK      0x00020000
+#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB       16
+#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB       17
+#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020
+#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB  5
+#define AR6320V2_PCIE_SOC_WAKE_RESET                      0x00000000
+#define AR6320V2_PCIE_SOC_WAKE_ADDRESS                    0x0004
+#define AR6320V2_PCIE_SOC_WAKE_V_MASK                     0x00000001
+#define AR6320V2_MUX_ID_MASK                              0x0000
+#define AR6320V2_TRANSACTION_ID_MASK                      0x3fff
+#define AR6320V2_PCIE_LOCAL_BASE_ADDRESS                  0x80000
+#define AR6320V2_FW_IND_HELPER                            4
+#define AR6320V2_PCIE_INTR_ENABLE_ADDRESS                 0x0008
+#define AR6320V2_PCIE_INTR_CLR_ADDRESS                    0x0014
+#define AR6320V2_PCIE_INTR_FIRMWARE_MASK                  0x00000400
+#define AR6320V2_PCIE_INTR_CE0_MASK                       0x00000800
+#define AR6320V2_PCIE_INTR_CE_MASK_ALL                    0x0007f800
+#define AR6320V2_PCIE_INTR_CAUSE_ADDRESS                  0x000c
+#define AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK            0x00000001
 #define AR6320V2_SOC_POWER_REG_OFFSET                     0x0000010c
-
 /* Copy Engine Debug */
 #define AR6320V2_WLAN_DEBUG_INPUT_SEL_OFFSET              0x0000010c
 #define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MSB             3
@@ -314,6 +314,7 @@
 
 #define AR6320V2_PCIE_INTR_CE_MASK(n) \
 	(AR6320V2_PCIE_INTR_CE0_MASK << (n))
+#endif
 #define AR6320V2_DRAM_BASE_ADDRESS            AR6320V2_TARG_DRAM_START
 #define AR6320V2_FW_INDICATOR_ADDRESS \
 	(AR6320V2_SOC_CORE_BASE_ADDRESS + AR6320V2_SCRATCH_3_ADDRESS)
@@ -382,6 +383,8 @@
 #define AR6320V2_WINDOW_DATA_ADDRESS                     0x0874
 #define AR6320V2_WINDOW_READ_ADDR_ADDRESS                0x087c
 #define AR6320V2_WINDOW_WRITE_ADDR_ADDRESS               0x0878
+#define AR6320V2_HOST_INT_STATUS_MBOX_DATA_MASK 0x0f
+#define AR6320V2_HOST_INT_STATUS_MBOX_DATA_LSB 0
 
 struct targetdef_s ar6320v2_targetdef = {
 	.d_RTC_SOC_BASE_ADDRESS = AR6320V2_RTC_SOC_BASE_ADDRESS,
@@ -461,14 +464,8 @@ struct targetdef_s ar6320v2_targetdef = {
 	.d_DRAM_BASE_ADDRESS = AR6320V2_DRAM_BASE_ADDRESS,
 	.d_SOC_CORE_BASE_ADDRESS = AR6320V2_SOC_CORE_BASE_ADDRESS,
 	.d_CORE_CTRL_ADDRESS = AR6320V2_CORE_CTRL_ADDRESS,
-	.d_CE_COUNT = AR6320V2_CE_COUNT,
 	.d_MSI_NUM_REQUEST = MSI_NUM_REQUEST,
 	.d_MSI_ASSIGN_FW = MSI_ASSIGN_FW,
-	.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
-	.d_PCIE_INTR_ENABLE_ADDRESS = AR6320V2_PCIE_INTR_ENABLE_ADDRESS,
-	.d_PCIE_INTR_CLR_ADDRESS = AR6320V2_PCIE_INTR_CLR_ADDRESS,
-	.d_PCIE_INTR_FIRMWARE_MASK = AR6320V2_PCIE_INTR_FIRMWARE_MASK,
-	.d_PCIE_INTR_CE_MASK_ALL = AR6320V2_PCIE_INTR_CE_MASK_ALL,
 	.d_CORE_CTRL_CPU_INTR_MASK = AR6320V2_CORE_CTRL_CPU_INTR_MASK,
 	.d_SR_WR_INDEX_ADDRESS = AR6320V2_SR_WR_INDEX_ADDRESS,
 	.d_DST_WATERMARK_ADDRESS = AR6320V2_DST_WATERMARK_ADDRESS,
@@ -532,6 +529,13 @@ struct targetdef_s ar6320v2_targetdef = {
 		AR6320V2_RX_ATTENTION_0_MSDU_DONE_MASK,
 	.d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK =
 		AR6320V2_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK,
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
+	.d_CE_COUNT = AR6320V2_CE_COUNT,
+	.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
+	.d_PCIE_INTR_ENABLE_ADDRESS = AR6320V2_PCIE_INTR_ENABLE_ADDRESS,
+	.d_PCIE_INTR_CLR_ADDRESS = AR6320V2_PCIE_INTR_CLR_ADDRESS,
+	.d_PCIE_INTR_FIRMWARE_MASK = AR6320V2_PCIE_INTR_FIRMWARE_MASK,
+	.d_PCIE_INTR_CE_MASK_ALL = AR6320V2_PCIE_INTR_CE_MASK_ALL,
 	/* PLL start */
 	.d_EFUSE_OFFSET = AR6320V2_EFUSE_OFFSET,
 	.d_EFUSE_XTAL_SEL_MSB = AR6320V2_EFUSE_XTAL_SEL_MSB,
@@ -609,21 +613,6 @@ struct targetdef_s ar6320v2_targetdef = {
 		AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB,
 	.d_SOC_RESET_CONTROL_CE_RST_MASK =
 		AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK,
-	.d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK =
-		AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK,
-	.d_CPU_INTR_ADDRESS = AR6320V2_CPU_INTR_ADDRESS,
-	.d_SOC_LF_TIMER_CONTROL0_ADDRESS =
-		AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS,
-	.d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK =
-		AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK,
-	/* chip id start */
-	.d_SOC_CHIP_ID_ADDRESS = AR6320V2_SOC_CHIP_ID_ADDRESS,
-	.d_SOC_CHIP_ID_VERSION_MASK = AR6320V2_SOC_CHIP_ID_VERSION_MASK,
-	.d_SOC_CHIP_ID_VERSION_LSB = AR6320V2_SOC_CHIP_ID_VERSION_LSB,
-	.d_SOC_CHIP_ID_REVISION_MASK = AR6320V2_SOC_CHIP_ID_REVISION_MASK,
-	.d_SOC_CHIP_ID_REVISION_LSB = AR6320V2_SOC_CHIP_ID_REVISION_LSB,
-	/* chip id end */
-
 	.d_WLAN_DEBUG_INPUT_SEL_OFFSET = AR6320V2_WLAN_DEBUG_INPUT_SEL_OFFSET,
 	.d_WLAN_DEBUG_INPUT_SEL_SRC_MSB =
 		AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MSB,
@@ -652,6 +641,21 @@ struct targetdef_s ar6320v2_targetdef = {
 	.d_AMBA_DEBUG_BUS_SEL_MSB = AR6320V2_AMBA_DEBUG_BUS_SEL_MSB,
 	.d_AMBA_DEBUG_BUS_SEL_LSB = AR6320V2_AMBA_DEBUG_BUS_SEL_LSB,
 	.d_AMBA_DEBUG_BUS_SEL_MASK = AR6320V2_AMBA_DEBUG_BUS_SEL_MASK,
+#endif
+	.d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK =
+		AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK,
+	.d_CPU_INTR_ADDRESS = AR6320V2_CPU_INTR_ADDRESS,
+	.d_SOC_LF_TIMER_CONTROL0_ADDRESS =
+		AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS,
+	.d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK =
+		AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK,
+	/* chip id start */
+	.d_SOC_CHIP_ID_ADDRESS = AR6320V2_SOC_CHIP_ID_ADDRESS,
+	.d_SOC_CHIP_ID_VERSION_MASK = AR6320V2_SOC_CHIP_ID_VERSION_MASK,
+	.d_SOC_CHIP_ID_VERSION_LSB = AR6320V2_SOC_CHIP_ID_VERSION_LSB,
+	.d_SOC_CHIP_ID_REVISION_MASK = AR6320V2_SOC_CHIP_ID_REVISION_MASK,
+	.d_SOC_CHIP_ID_REVISION_LSB = AR6320V2_SOC_CHIP_ID_REVISION_LSB,
+	/* chip id end */
 };
 
 struct hostdef_s ar6320v2_hostdef = {
@@ -714,24 +718,25 @@ struct hostdef_s ar6320v2_hostdef = {
 	.d_SOC_GLOBAL_RESET_ADDRESS = AR6320V2_SOC_GLOBAL_RESET_ADDRESS,
 	.d_RTC_STATE_ADDRESS = AR6320V2_RTC_STATE_ADDRESS,
 	.d_RTC_STATE_COLD_RESET_MASK = AR6320V2_RTC_STATE_COLD_RESET_MASK,
-	.d_PCIE_LOCAL_BASE_ADDRESS = AR6320V2_PCIE_LOCAL_BASE_ADDRESS,
-	.d_PCIE_SOC_WAKE_RESET = AR6320V2_PCIE_SOC_WAKE_RESET,
-	.d_PCIE_SOC_WAKE_ADDRESS = AR6320V2_PCIE_SOC_WAKE_ADDRESS,
-	.d_PCIE_SOC_WAKE_V_MASK = AR6320V2_PCIE_SOC_WAKE_V_MASK,
 	.d_RTC_STATE_V_MASK = AR6320V2_RTC_STATE_V_MASK,
 	.d_RTC_STATE_V_LSB = AR6320V2_RTC_STATE_V_LSB,
 	.d_FW_IND_EVENT_PENDING = AR6320V2_FW_IND_EVENT_PENDING,
 	.d_FW_IND_INITIALIZED = AR6320V2_FW_IND_INITIALIZED,
-	.d_FW_IND_HELPER = AR6320V2_FW_IND_HELPER,
 	.d_RTC_STATE_V_ON = AR6320V2_RTC_STATE_V_ON,
-	.d_MUX_ID_MASK = AR6320V2_MUX_ID_MASK,
-	.d_TRANSACTION_ID_MASK = AR6320V2_TRANSACTION_ID_MASK,
 #if defined(SDIO_3_0)
 	.d_HOST_INT_STATUS_MBOX_DATA_MASK =
 		AR6320V2_HOST_INT_STATUS_MBOX_DATA_MASK,
 	.d_HOST_INT_STATUS_MBOX_DATA_LSB =
 		AR6320V2_HOST_INT_STATUS_MBOX_DATA_LSB,
 #endif
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
+	.d_FW_IND_HELPER = AR6320V2_FW_IND_HELPER,
+	.d_MUX_ID_MASK = AR6320V2_MUX_ID_MASK,
+	.d_TRANSACTION_ID_MASK = AR6320V2_TRANSACTION_ID_MASK,
+	.d_PCIE_LOCAL_BASE_ADDRESS = AR6320V2_PCIE_LOCAL_BASE_ADDRESS,
+	.d_PCIE_SOC_WAKE_RESET = AR6320V2_PCIE_SOC_WAKE_RESET,
+	.d_PCIE_SOC_WAKE_ADDRESS = AR6320V2_PCIE_SOC_WAKE_ADDRESS,
+	.d_PCIE_SOC_WAKE_V_MASK = AR6320V2_PCIE_SOC_WAKE_V_MASK,
 	.d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS,
 	.d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK,
 	.d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS,
@@ -739,8 +744,10 @@ struct hostdef_s ar6320v2_hostdef = {
 	.d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS,
 	.d_HOST_CE_COUNT = 8,
 	.d_ENABLE_MSI = 0,
+#endif
 };
 
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
 struct ce_reg_def ar6320v2_ce_targetdef = {
 	/* copy_engine.c  */
 	.d_DST_WR_INDEX_ADDRESS = AR6320V2_DST_WR_INDEX_ADDRESS,
@@ -811,5 +818,5 @@ struct ce_reg_def ar6320v2_ce_targetdef = {
 	.d_CE1_BASE_ADDRESS = AR6320V2_CE1_BASE_ADDRESS,
 
 };
-
+#endif
 #endif

+ 51 - 43
hif/src/ar9888def.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -43,6 +43,7 @@
 #define AR9888_EFUSE_BASE_ADDRESS                       0x00030000
 #define AR9888_FPGA_REG_BASE_ADDRESS                    0x00039000
 #define AR9888_WLAN_UART2_BASE_ADDRESS                  0x00054c00
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
 #define AR9888_CE_WRAPPER_BASE_ADDRESS                  0x00057000
 #define AR9888_CE0_BASE_ADDRESS                         0x00057400
 #define AR9888_CE1_BASE_ADDRESS                         0x00057800
@@ -52,9 +53,35 @@
 #define AR9888_CE5_BASE_ADDRESS                         0x00058800
 #define AR9888_CE6_BASE_ADDRESS                         0x00058c00
 #define AR9888_CE7_BASE_ADDRESS                         0x00059000
-#define AR9888_DBI_BASE_ADDRESS                         0x00060000
 #define AR9888_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS       0x0006c000
-
+#define AR9888_CE_CTRL1_ADDRESS                         0x0010
+#define AR9888_CE_CTRL1_DMAX_LENGTH_MASK                0x0000ffff
+#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS     0x0000
+#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
+#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB  8
+#define AR9888_CE_CTRL1_DMAX_LENGTH_LSB                 0
+#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK      0x00010000
+#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK      0x00020000
+#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB       16
+#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB       17
+#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000004
+#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB  2
+#define AR9888_PCIE_SOC_WAKE_RESET                      0x00000000
+#define AR9888_PCIE_SOC_WAKE_ADDRESS                    0x0004
+#define AR9888_PCIE_SOC_WAKE_V_MASK                     0x00000001
+#define AR9888_PCIE_INTR_ENABLE_ADDRESS                 0x0008
+#define AR9888_PCIE_INTR_CLR_ADDRESS                    0x0014
+#define AR9888_PCIE_INTR_FIRMWARE_MASK                  0x00000400
+#define AR9888_PCIE_INTR_CE0_MASK                       0x00000800
+#define AR9888_PCIE_INTR_CE_MASK_ALL                    0x0007f800
+#define AR9888_PCIE_INTR_CAUSE_ADDRESS                  0x000c
+#define AR9888_MUX_ID_MASK                              0x0000
+#define AR9888_TRANSACTION_ID_MASK                      0x3fff
+#define AR9888_PCIE_LOCAL_BASE_ADDRESS                  0x80000
+#define AR9888_SOC_RESET_CONTROL_CE_RST_MASK            0x00040000
+#define AR9888_PCIE_INTR_CE_MASK(n) (AR9888_PCIE_INTR_CE0_MASK << (n))
+#endif
+#define AR9888_DBI_BASE_ADDRESS                         0x00060000
 #define AR9888_SCRATCH_3_ADDRESS                        0x0030
 #define AR9888_TARG_DRAM_START                          0x00400000
 #define AR9888_SOC_SYSTEM_SLEEP_OFFSET                  0x000000c4
@@ -156,13 +183,10 @@
 #define AR9888_HOST_IS_DST_RING_LOW_WATERMARK_MASK      0x00000010
 #define AR9888_HOST_IS_ADDRESS                          0x0030
 #define AR9888_HOST_IS_COPY_COMPLETE_MASK               0x00000001
-#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS     0x0000
 #define AR9888_HOST_IE_ADDRESS                          0x002c
 #define AR9888_HOST_IE_COPY_COMPLETE_MASK               0x00000001
 #define AR9888_SR_BA_ADDRESS                            0x0000
 #define AR9888_SR_SIZE_ADDRESS                          0x0004
-#define AR9888_CE_CTRL1_ADDRESS                         0x0010
-#define AR9888_CE_CTRL1_DMAX_LENGTH_MASK                0x0000ffff
 #define AR9888_DR_BA_ADDRESS                            0x0008
 #define AR9888_DR_SIZE_ADDRESS                          0x000c
 #define AR9888_MISC_IE_ADDRESS                          0x0034
@@ -176,40 +200,19 @@
 #define AR9888_SRC_WATERMARK_HIGH_LSB                   0
 #define AR9888_DST_WATERMARK_LOW_LSB                    16
 #define AR9888_DST_WATERMARK_HIGH_LSB                   0
-#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
-#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB  8
-#define AR9888_CE_CTRL1_DMAX_LENGTH_LSB                 0
-#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK      0x00010000
-#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK      0x00020000
-#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB       16
-#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB       17
-#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000004
-#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB  2
 #define AR9888_SOC_GLOBAL_RESET_ADDRESS                 0x0008
 #define AR9888_RTC_STATE_ADDRESS                        0x0000
 #define AR9888_RTC_STATE_COLD_RESET_MASK                0x00000400
-#define AR9888_PCIE_SOC_WAKE_RESET                      0x00000000
-#define AR9888_PCIE_SOC_WAKE_ADDRESS                    0x0004
-#define AR9888_PCIE_SOC_WAKE_V_MASK                     0x00000001
+
 #define AR9888_RTC_STATE_V_MASK                         0x00000007
 #define AR9888_RTC_STATE_V_LSB                          0
 #define AR9888_RTC_STATE_V_ON                           3
-#define AR9888_MUX_ID_MASK                              0x0000
-#define AR9888_TRANSACTION_ID_MASK                      0x3fff
-#define AR9888_PCIE_LOCAL_BASE_ADDRESS                  0x80000
 #define AR9888_FW_IND_EVENT_PENDING                     1
 #define AR9888_FW_IND_INITIALIZED                       2
-#define AR9888_PCIE_INTR_ENABLE_ADDRESS                 0x0008
-#define AR9888_PCIE_INTR_CLR_ADDRESS                    0x0014
-#define AR9888_PCIE_INTR_FIRMWARE_MASK                  0x00000400
-#define AR9888_PCIE_INTR_CE0_MASK                       0x00000800
-#define AR9888_PCIE_INTR_CE_MASK_ALL                    0x0007f800
-#define AR9888_PCIE_INTR_CAUSE_ADDRESS                  0x000c
 #define AR9888_CPU_INTR_ADDRESS                         0x0010
 #define AR9888_SOC_LF_TIMER_CONTROL0_ADDRESS            0x00000050
 #define AR9888_SOC_LF_TIMER_CONTROL0_ENABLE_MASK        0x00000004
 #define AR9888_SOC_RESET_CONTROL_ADDRESS                0x00000000
-#define AR9888_SOC_RESET_CONTROL_CE_RST_MASK            0x00040000
 #define AR9888_SOC_RESET_CONTROL_CPU_WARM_RST_MASK      0x00000040
 #define AR9888_CORE_CTRL_ADDRESS                        0x0000
 #define AR9888_CORE_CTRL_CPU_INTR_MASK                  0x00002000
@@ -218,7 +221,6 @@
 #define AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_LSB             0
 #define AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_MASK            0
 
-#define AR9888_PCIE_INTR_CE_MASK(n) (AR9888_PCIE_INTR_CE0_MASK << (n))
 #define AR9888_FW_EVENT_PENDING_ADDRESS \
 	(AR9888_SOC_CORE_BASE_ADDRESS + AR9888_SCRATCH_3_ADDRESS)
 #define AR9888_DRAM_BASE_ADDRESS AR9888_TARG_DRAM_START
@@ -287,6 +289,8 @@
 #define AR9888_WINDOW_DATA_ADDRESS                     MISSING
 #define AR9888_WINDOW_READ_ADDR_ADDRESS                MISSING
 #define AR9888_WINDOW_WRITE_ADDR_ADDRESS               MISSING
+#define AR9888_HOST_INT_STATUS_MBOX_DATA_MASK          0x0f
+#define AR9888_HOST_INT_STATUS_MBOX_DATA_LSB           0
 
 struct targetdef_s ar9888_targetdef = {
 	.d_RTC_SOC_BASE_ADDRESS = AR9888_RTC_SOC_BASE_ADDRESS,
@@ -363,14 +367,8 @@ struct targetdef_s ar9888_targetdef = {
 	.d_DRAM_BASE_ADDRESS = AR9888_DRAM_BASE_ADDRESS,
 	.d_SOC_CORE_BASE_ADDRESS = AR9888_SOC_CORE_BASE_ADDRESS,
 	.d_CORE_CTRL_ADDRESS = AR9888_CORE_CTRL_ADDRESS,
-	.d_CE_COUNT = AR9888_CE_COUNT,
 	.d_MSI_NUM_REQUEST = MSI_NUM_REQUEST,
 	.d_MSI_ASSIGN_FW = MSI_ASSIGN_FW,
-	.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
-	.d_PCIE_INTR_ENABLE_ADDRESS = AR9888_PCIE_INTR_ENABLE_ADDRESS,
-	.d_PCIE_INTR_CLR_ADDRESS = AR9888_PCIE_INTR_CLR_ADDRESS,
-	.d_PCIE_INTR_FIRMWARE_MASK = AR9888_PCIE_INTR_FIRMWARE_MASK,
-	.d_PCIE_INTR_CE_MASK_ALL = AR9888_PCIE_INTR_CE_MASK_ALL,
 	.d_CORE_CTRL_CPU_INTR_MASK = AR9888_CORE_CTRL_CPU_INTR_MASK,
 	.d_SR_WR_INDEX_ADDRESS = AR9888_SR_WR_INDEX_ADDRESS,
 	.d_DST_WATERMARK_ADDRESS = AR9888_DST_WATERMARK_ADDRESS,
@@ -424,7 +422,13 @@ struct targetdef_s ar9888_targetdef = {
 		AR9888_RX_ATTENTION_0_MSDU_DONE_MASK,
 	.d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK =
 		AR9888_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK,
-
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
+	.d_CE_COUNT = AR9888_CE_COUNT,
+	.d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL,
+	.d_PCIE_INTR_ENABLE_ADDRESS = AR9888_PCIE_INTR_ENABLE_ADDRESS,
+	.d_PCIE_INTR_CLR_ADDRESS = AR9888_PCIE_INTR_CLR_ADDRESS,
+	.d_PCIE_INTR_FIRMWARE_MASK = AR9888_PCIE_INTR_FIRMWARE_MASK,
+	.d_PCIE_INTR_CE_MASK_ALL = AR9888_PCIE_INTR_CE_MASK_ALL,
 	.d_PCIE_INTR_CAUSE_ADDRESS = AR9888_PCIE_INTR_CAUSE_ADDRESS,
 	.d_SOC_RESET_CONTROL_ADDRESS = AR9888_SOC_RESET_CONTROL_ADDRESS,
 	.d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK =
@@ -433,6 +437,7 @@ struct targetdef_s ar9888_targetdef = {
 		AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB,
 	.d_SOC_RESET_CONTROL_CE_RST_MASK =
 		AR9888_SOC_RESET_CONTROL_CE_RST_MASK,
+#endif
 	.d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK =
 		AR9888_SOC_RESET_CONTROL_CPU_WARM_RST_MASK,
 	.d_CPU_INTR_ADDRESS = AR9888_CPU_INTR_ADDRESS,
@@ -499,23 +504,24 @@ struct hostdef_s ar9888_hostdef = {
 	.d_SOC_GLOBAL_RESET_ADDRESS = AR9888_SOC_GLOBAL_RESET_ADDRESS,
 	.d_RTC_STATE_ADDRESS = AR9888_RTC_STATE_ADDRESS,
 	.d_RTC_STATE_COLD_RESET_MASK = AR9888_RTC_STATE_COLD_RESET_MASK,
-	.d_PCIE_LOCAL_BASE_ADDRESS = AR9888_PCIE_LOCAL_BASE_ADDRESS,
-	.d_PCIE_SOC_WAKE_RESET = AR9888_PCIE_SOC_WAKE_RESET,
-	.d_PCIE_SOC_WAKE_ADDRESS = AR9888_PCIE_SOC_WAKE_ADDRESS,
-	.d_PCIE_SOC_WAKE_V_MASK = AR9888_PCIE_SOC_WAKE_V_MASK,
 	.d_RTC_STATE_V_MASK = AR9888_RTC_STATE_V_MASK,
 	.d_RTC_STATE_V_LSB = AR9888_RTC_STATE_V_LSB,
 	.d_FW_IND_EVENT_PENDING = AR9888_FW_IND_EVENT_PENDING,
 	.d_FW_IND_INITIALIZED = AR9888_FW_IND_INITIALIZED,
 	.d_RTC_STATE_V_ON = AR9888_RTC_STATE_V_ON,
-	.d_MUX_ID_MASK = AR9888_MUX_ID_MASK,
-	.d_TRANSACTION_ID_MASK = AR9888_TRANSACTION_ID_MASK,
 #if defined(SDIO_3_0)
 	.d_HOST_INT_STATUS_MBOX_DATA_MASK =
 		AR9888_HOST_INT_STATUS_MBOX_DATA_MASK,
 	.d_HOST_INT_STATUS_MBOX_DATA_LSB =
 		AR9888_HOST_INT_STATUS_MBOX_DATA_LSB,
 #endif
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
+	.d_MUX_ID_MASK = AR9888_MUX_ID_MASK,
+	.d_TRANSACTION_ID_MASK = AR9888_TRANSACTION_ID_MASK,
+	.d_PCIE_LOCAL_BASE_ADDRESS = AR9888_PCIE_LOCAL_BASE_ADDRESS,
+	.d_PCIE_SOC_WAKE_RESET = AR9888_PCIE_SOC_WAKE_RESET,
+	.d_PCIE_SOC_WAKE_ADDRESS = AR9888_PCIE_SOC_WAKE_ADDRESS,
+	.d_PCIE_SOC_WAKE_V_MASK = AR9888_PCIE_SOC_WAKE_V_MASK,
 	.d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS,
 	.d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK,
 	.d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS,
@@ -523,9 +529,10 @@ struct hostdef_s ar9888_hostdef = {
 	.d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS,
 	.d_HOST_CE_COUNT = 8,
 	.d_ENABLE_MSI = 0,
+#endif
 };
 
-
+#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
 struct ce_reg_def ar9888_ce_targetdef = {
 	/* copy_engine.c  */
 	.d_DST_WR_INDEX_ADDRESS = AR9888_DST_WR_INDEX_ADDRESS,
@@ -588,3 +595,4 @@ struct ce_reg_def ar9888_ce_targetdef = {
 
 };
 #endif
+#endif

+ 8 - 1
hif/src/hif_debug.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -39,4 +39,11 @@
 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, ## args)
 #define HIF_DBG(args ...) \
 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, ## args)
+
+#define HIF_ENTER(fmt, ...) QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, \
+		"Enter: %s "fmt, __func__, ## __VA_ARGS__)
+
+#define HIF_EXIT(fmt, ...) QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, \
+		"Exit: %s "fmt, __func__, ## __VA_ARGS__)
+
 #endif /* __HIF_DEBUG_H__ */

+ 1 - 1
hif/src/hif_hw_version.h

@@ -38,7 +38,7 @@
 #define AR6320_REV3_VERSION             0x5020000
 #define AR6320_REV3_2_VERSION           0x5030000
 #define AR6320_DEV_VERSION              0x1000000
-
+#define QCA9377_REV1_1_VERSION          0x5020001
 
 struct qwlan_hw {
 	u32 id;

+ 1 - 1
hif/src/hif_io32.h

@@ -30,6 +30,7 @@
 
 #include <linux/io.h>
 #include "hif.h"
+#include "hif_main.h"
 
 #define hif_read32_mb(addr)         ioread32((void __iomem *)addr)
 #define hif_write32_mb(addr, value) \
@@ -80,7 +81,6 @@
 #ifdef HIF_PCI
 #include "hif_io32_pci.h"
 #endif
-
 #ifdef HIF_SNOC
 #include "hif_io32_snoc.h"
 #endif /* HIF_PCI */

+ 4 - 0
hif/src/hif_main.h

@@ -94,6 +94,7 @@
 
 #define HIF_GET_PCI_SOFTC(scn) ((struct hif_pci_softc *)scn)
 #define HIF_GET_CE_STATE(scn) ((struct HIF_CE_state *)scn)
+#define HIF_GET_SDIO_SOFTC(scn) ((struct hif_sdio_softc *)scn)
 #define HIF_GET_SOFTC(scn) ((struct hif_softc *)scn)
 #define GET_HIF_OPAQUE_HDL(scn) ((struct hif_opaque_softc *)scn)
 
@@ -162,7 +163,9 @@ A_target_id_t hif_get_target_id(struct hif_softc *scn);
 void hif_dump_pipe_debug_count(struct hif_softc *scn);
 
 bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count);
+void hif_shutdown_device(struct hif_opaque_softc *hif_ctx);
 int hif_bus_configure(struct hif_softc *scn);
+void hif_cancel_deferred_target_sleep(struct hif_softc *scn);
 int hif_config_ce(struct hif_softc *scn);
 void hif_unconfig_ce(struct hif_softc *scn);
 void hif_ce_prepare_config(struct hif_softc *scn);
@@ -195,6 +198,7 @@ struct hif_driver_state_callbacks *hif_get_callbacks_handle(struct hif_softc *sc
 bool hif_is_driver_unloading(struct hif_softc *scn);
 bool hif_is_load_or_unload_in_progress(struct hif_softc *scn);
 bool hif_is_recovery_in_progress(struct hif_softc *scn);
+void hif_wlan_disable(struct hif_softc *scn);
 int hif_target_sleep_state_adjust(struct hif_softc *scn,
 					 bool sleep_ok,
 					 bool wait_for_it);

+ 527 - 0
hif/src/sdio/hif_bmi_reg_access.c

@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ ***Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#define ATH_MODULE_NAME hif
+#include "a_debug.h"
+#define ATH_DEBUG_BMI  ATH_DEBUG_MAKE_MODULE_MASK(0)
+#include "hif.h"
+#include "bmi.h"
+#include "htc_api.h"
+#include "if_sdio.h"
+#include "regtable_sdio.h"
+
+#define BMI_COMMUNICATION_TIMEOUT       100000
+
+static bool pending_events_func_check;
+static uint32_t command_credits;
+static uint32_t *p_bmi_cmd_credits = &command_credits;
+/* BMI Access routines */
+
+/**
+ * hif_bmi_buffer_send - call to send bmi buffer
+ * @device: hif context
+ * @buffer: buffer
+ * @length: length
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+static QDF_STATUS
+hif_bmi_buffer_send(struct hif_sdio_dev *device, char *buffer, uint32_t length)
+{
+	QDF_STATUS status;
+	uint32_t timeout;
+	uint32_t address;
+	uint32_t mbox_address[HTC_MAILBOX_NUM_MAX];
+
+	hif_configure_device(device, HIF_DEVICE_GET_MBOX_ADDR,
+			     &mbox_address[0], sizeof(mbox_address));
+
+	*p_bmi_cmd_credits = 0;
+	timeout = BMI_COMMUNICATION_TIMEOUT;
+
+	while (timeout-- && !(*p_bmi_cmd_credits)) {
+		/* Read the counter register to get the command credits */
+		address =
+		      COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+		/* hit the credit counter with a 4-byte access, the first
+		 * byte read will hit the counter and cause
+		 * a decrement, while the remaining 3 bytes has no effect.
+		 * The rationale behind this is to make all HIF accesses
+		 * 4-byte aligned */
+		status =
+			hif_read_write(device, address,
+				       (uint8_t *) p_bmi_cmd_credits, 4,
+				       HIF_RD_SYNC_BYTE_INC, NULL);
+		if (status != QDF_STATUS_SUCCESS) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			  ("%s:Unable to decrement the credit count register\n",
+			  __func__));
+			return QDF_STATUS_E_FAILURE;
+		}
+		/* the counter is only 8=bits, ignore anything in the
+		 *upper 3 bytes */
+		(*p_bmi_cmd_credits) &= 0xFF;
+	}
+
+	if (*p_bmi_cmd_credits) {
+		address = mbox_address[ENDPOINT1];
+		status = hif_read_write(device, address, buffer, length,
+					HIF_WR_SYNC_BYTE_INC, NULL);
+		if (status != QDF_STATUS_SUCCESS) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			  ("%s:Unable to send the BMI data to the device\n",
+			  __func__));
+			return QDF_STATUS_E_FAILURE;
+		}
+	} else {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			("%s:BMI Communication timeout - hif_bmi_buffer_send\n",
+			__func__));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return status;
+}
+
+#if defined(SDIO_3_0)
+
+static QDF_STATUS
+hif_bmi_read_write(struct hif_sdio_dev *device,
+		   char *buffer, uint32_t length)
+{
+	QDF_STATUS status;
+
+	status = hif_read_write(device, HOST_INT_STATUS_ADDRESS,
+				buffer, length,
+				HIF_RD_SYNC_BYTE_INC, NULL);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s:Unable to read int status reg\n",
+				 __func__));
+		return QDF_STATUS_E_FAILURE;
+	}
+	*buffer = (HOST_INT_STATUS_MBOX_DATA_GET(*buffer) & (1 << ENDPOINT1));
+	return status;
+}
+#else
+
+static QDF_STATUS
+hif_bmi_read_write(struct hif_sdio_dev *device,
+		   char *buffer, uint32_t length)
+{
+	QDF_STATUS status;
+	status = hif_read_write(device, RX_LOOKAHEAD_VALID_ADDRESS,
+				buffer, length,
+				HIF_RD_SYNC_BYTE_INC, NULL);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s:Unable to read rx lookahead reg\n",
+				 __func__));
+		return QDF_STATUS_E_FAILURE;
+	}
+	*buffer &= (1 << ENDPOINT1);
+	return status;
+}
+#endif
+
+/**
+ * hif_bmi_buffer_receive - call when bmi buffer is received
+ * @device: hif context
+ * @buffer: buffer
+ * @length: length
+ * @want_timeout: timeout is needed or not
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+static QDF_STATUS
+hif_bmi_buffer_receive(struct hif_sdio_dev *device,
+		       char *buffer, uint32_t length, bool want_timeout)
+{
+	QDF_STATUS status;
+	uint32_t address;
+	uint32_t mbox_address[HTC_MAILBOX_NUM_MAX];
+	struct _HIF_PENDING_EVENTS_INFO hif_pending_events;
+	static HIF_PENDING_EVENTS_FUNC get_pending_events_func;
+
+	if (!pending_events_func_check) {
+		/* see if the HIF layer implements an alternative
+		 * function to get pending events
+		 * do this only once! */
+		hif_configure_device(device,
+				     HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
+				     &get_pending_events_func,
+				     sizeof(get_pending_events_func));
+		pending_events_func_check = true;
+	}
+
+	hif_configure_device(device, HIF_DEVICE_GET_MBOX_ADDR,
+			     &mbox_address[0], sizeof(mbox_address));
+
+	/*
+	 * During normal bootup, small reads may be required.
+	 * Rather than issue an HIF Read and then wait as the Target
+	 * adds successive bytes to the FIFO, we wait here until
+	 * we know that response data is available.
+	 *
+	 * This allows us to cleanly timeout on an unexpected
+	 * Target failure rather than risk problems at the HIF level.  In
+	 * particular, this avoids SDIO timeouts and possibly garbage
+	 * data on some host controllers.  And on an interconnect
+	 * such as Compact Flash (as well as some SDIO masters) which
+	 * does not provide any indication on data timeout, it avoids
+	 * a potential hang or garbage response.
+	 *
+	 * Synchronization is more difficult for reads larger than the
+	 * size of the MBOX FIFO (128B), because the Target is unable
+	 * to push the 129th byte of data until AFTER the Host posts an
+	 * HIF Read and removes some FIFO data.  So for large reads the
+	 * Host proceeds to post an HIF Read BEFORE all the data is
+	 * actually available to read.  Fortunately, large BMI reads do
+	 * not occur in practice -- they're supported for debug/development.
+	 *
+	 * So Host/Target BMI synchronization is divided into these cases:
+	 *  CASE 1: length < 4
+	 *        Should not happen
+	 *
+	 *  CASE 2: 4 <= length <= 128
+	 *        Wait for first 4 bytes to be in FIFO
+	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
+	 *        a BMI command credit, which indicates that the ENTIRE
+	 *        response is available in the the FIFO
+	 *
+	 *  CASE 3: length > 128
+	 *        Wait for the first 4 bytes to be in FIFO
+	 *
+	 * For most uses, a small timeout should be sufficient and we will
+	 * usually see a response quickly; but there may be some unusual
+	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+	 * For now, we use an unbounded busy loop while waiting for
+	 * BMI_EXECUTE.
+	 *
+	 * If BMI_EXECUTE ever needs to support longer-latency execution,
+	 * especially in production, this code needs to be enhanced to sleep
+	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
+	 * a function of Host processor speed.
+	 */
+	if (length >= 4) {      /* NB: Currently, always true */
+		/*
+		 * NB: word_available is declared static for esoteric reasons
+		 * having to do with protection on some OSes.
+		 */
+		static uint32_t word_available;
+		uint32_t timeout;
+
+		word_available = 0;
+		timeout = BMI_COMMUNICATION_TIMEOUT;
+		while ((!want_timeout || timeout--) && !word_available) {
+
+			if (get_pending_events_func != NULL) {
+				status = get_pending_events_func(device,
+							&hif_pending_events,
+							NULL);
+				if (status != QDF_STATUS_SUCCESS) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					  ("%s:Failed to get pending events\n",
+					  __func__));
+					break;
+				}
+
+				if (hif_pending_events.available_recv_bytes >=
+							sizeof(uint32_t)) {
+					word_available = 1;
+				}
+				continue;
+			}
+			status = hif_bmi_read_write(device,
+					(uint8_t *) &word_available,
+					sizeof(word_available));
+			if (status != QDF_STATUS_SUCCESS)
+				return QDF_STATUS_E_FAILURE;
+		}
+
+		if (!word_available) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s:BMI Communication timeout FIFO empty\n",
+				__func__));
+			return QDF_STATUS_E_FAILURE;
+		}
+	}
+
+	address = mbox_address[ENDPOINT1];
+	status = hif_read_write(device, address, buffer, length,
+				HIF_RD_SYNC_BYTE_INC, NULL);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			("%s:Unable to read the BMI data from the device\n",
+			__func__));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_reg_based_get_target_info - to retrieve target info
+ * @hif_ctx: hif context
+ * @targ_info: bmi target info
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_reg_based_get_target_info(struct hif_opaque_softc *hif_ctx,
+					 struct bmi_target_info
+					 *targ_info) {
+	QDF_STATUS status;
+	uint32_t cid;
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *device = scn->hif_handle;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+			("BMI Get Target Info: Enter (device: 0x%p)\n",
+			device));
+	cid = BMI_GET_TARGET_INFO;
+	status = hif_bmi_buffer_send(device, (char *) &cid, sizeof(cid));
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s:Unable to write to the device\n",
+				 __func__));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	status = hif_bmi_buffer_receive(device,
+					(char *) &targ_info->target_ver,
+					sizeof(targ_info->target_ver), true);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			("%s:Unable to read Target Version from the device\n",
+			 __func__));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (targ_info->target_ver == TARGET_VERSION_SENTINAL) {
+		/* Determine how many bytes are in the Target's targ_info */
+		status = hif_bmi_buffer_receive(device,
+						(char *) &targ_info->
+						target_info_byte_count,
+						sizeof(targ_info->
+							target_info_byte_count),
+						true);
+		if (status != QDF_STATUS_SUCCESS) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s:Unable to read target Info\n",
+					 __func__));
+			return QDF_STATUS_E_FAILURE;
+		}
+
+		/*
+		 * The Target's targ_info doesn't match the Host's targ_info.
+		 * We need to do some backwards compatibility work to make this
+		 * OK.*/
+		QDF_ASSERT(targ_info->target_info_byte_count ==
+			 sizeof(*targ_info));
+		/* Read the remainder of the targ_info */
+		status = hif_bmi_buffer_receive(device,
+					    ((char *) targ_info) +
+					    sizeof(targ_info->
+						   target_info_byte_count),
+					    sizeof(*targ_info) -
+					    sizeof(targ_info->
+						   target_info_byte_count),
+					    true);
+		if (status != QDF_STATUS_SUCCESS) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s:Unable to read Target Info (%d bytes)\n",
+				__func__, targ_info->target_info_byte_count));
+			return QDF_STATUS_E_FAILURE;
+		}
+	} else {
+		/*
+		 * Target must be an AR6001 whose firmware does not
+		 * support BMI_GET_TARGET_INFO.  Construct the data
+		 * that it would have sent.
+		 */
+		targ_info->target_info_byte_count = sizeof(*targ_info);
+		targ_info->target_type = TARGET_TYPE_AR6001;
+	}
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_BMI,
+			("BMI Get Target Info: Exit (ver: 0x%x type: 0x%x)\n",
+			 targ_info->target_ver,
+			 targ_info->target_type));
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_exchange_bmi_msg - API to handle HIF-specific BMI message exchanges
+ * @hif_ctx: hif context
+ * @bmi_cmd_da: bmi cmd
+ * @bmi_rsp_da: bmi rsp
+ * @send_message: send message
+ * @length: length
+ * @response_message: response message
+ * @response_length: response length
+ * @timeout_ms: timeout in ms
+ *
+ * This API is synchronous
+ * and only allowed to be called from a context that can block (sleep)
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
+				qdf_dma_addr_t bmi_cmd_da,
+				qdf_dma_addr_t bmi_rsp_da,
+				uint8_t *send_message,
+				uint32_t length,
+				uint8_t *response_message,
+				uint32_t *response_length,
+				uint32_t timeout_ms) {
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *device = scn->hif_handle;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	if (device == NULL) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			("%s:Null device argument\n",
+			__func__));
+		return QDF_STATUS_E_INVAL;
+	}
+
+	status = hif_bmi_buffer_send(device, send_message, length);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s:Unable to Send Message to device\n",
+				 __func__));
+		return status;
+	}
+
+	if (response_message != NULL) {
+		status = hif_bmi_buffer_receive(device, response_message,
+						*response_length,
+						timeout_ms ? true : false);
+		if (QDF_IS_STATUS_ERROR(status)) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s:Unable to read response\n",
+					 __func__));
+			return status;
+		}
+	}
+
+	return status;
+}
+
+/**
+ * hif_bmi_raw_write - API to handle bmi raw buffer
+ * @device: hif context
+ * @buffer: buffer
+ * @length: length
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+
+QDF_STATUS
+hif_bmi_raw_write(struct hif_sdio_dev *device, char *buffer,
+	      uint32_t length) {
+	return hif_bmi_buffer_send(device, buffer, length);
+}
+
+/**
+ * hif_bmi_raw_read - call when bmi buffer is received
+ * @device: hif context
+ * @buffer: buffer
+ * @length: length
+ * @want_timeout: timeout is needed or not
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS
+hif_bmi_raw_read(struct hif_sdio_dev *device, char *buffer,
+	     uint32_t length, bool want_timeout)
+{
+	return hif_bmi_buffer_receive(device, buffer, length,
+				  want_timeout);
+}
+
+#ifdef BRINGUP_DEBUG
+#define SDIO_SCRATCH_1_ADDRESS 0x864
+/*Functions used for debugging*/
+/**
+ * hif_bmi_write_scratch_register - API to write scratch register
+ * @device: hif context
+ * @buffer: buffer
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_bmi_write_scratch_register(struct hif_sdio_dev *device,
+				    uint32_t buffer) {
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	status = hif_read_write(device, SDIO_SCRATCH_1_ADDRESS,
+				(uint8_t *) &buffer, 4,
+				HIF_WR_SYNC_BYTE_INC, NULL);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: Unable to write to 0x%x\n",
+				 __func__, SDIO_SCRATCH_1_ADDRESS));
+		return QDF_STATUS_E_FAILURE;
+	} else {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: wrote 0x%x to 0x%x\n", __func__,
+				 buffer, SDIO_SCRATCH_1_ADDRESS));
+	}
+
+	return status;
+}
+
+/**
+ * hif_bmi_read_scratch_register - API to read from scratch register
+ * @device: hif context
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_bmi_read_scratch_register(struct hif_sdio_dev *device)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	uint32_t buffer = 0;
+
+	status = hif_read_write(device, SDIO_SCRATCH_1_ADDRESS,
+				(uint8_t *) &buffer, 4,
+				HIF_RD_SYNC_BYTE_INC, NULL);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: Unable to read from 0x%x\n",
+				 __func__, SDIO_SCRATCH_1_ADDRESS));
+		return QDF_STATUS_E_FAILURE;
+	} else {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: read 0x%x from 0x%x\n", __func__,
+				 buffer, SDIO_SCRATCH_1_ADDRESS));
+	}
+
+	return status;
+}
+#endif

+ 330 - 0
hif/src/sdio/hif_diag_reg_access.c

@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#define ATH_MODULE_NAME hif
+#include "a_debug.h"
+
+#include "targaddrs.h"
+#include "hif.h"
+#include "if_sdio.h"
+#include "regtable_sdio.h"
+
+#define CPU_DBG_SEL_ADDRESS                      0x00000483
+#define CPU_DBG_ADDRESS                          0x00000484
+#define WORD_NON_ALIGNMENT_MASK                  0x03
+
+/**
+ * hif_ar6000_set_address_window_register - set the window address register (using 4-byte register access ).
+ * @hif_device: hif context
+ * @register_addr: register address
+ * @addr: addr
+ *
+ * This mitigates host interconnect issues with non-4byte aligned bus requests,
+ * some interconnects use bus adapters that impose strict limitations.
+ * Since diag window access is not intended for performance critical operations,
+ * the 4byte mode should be satisfactory as it generates 4X the bus activity.
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+static
+QDF_STATUS hif_ar6000_set_address_window_register(
+			struct hif_sdio_dev *hif_device,
+			uint32_t register_addr,
+			uint32_t addr)
+{
+	QDF_STATUS status;
+	static uint32_t address;
+
+	address = addr;
+	/*AR6320,just write the 4-byte address to window register*/
+	status = hif_read_write(hif_device,
+				register_addr,
+				(char *) (&address),
+				4, HIF_WR_SYNC_BYTE_INC, NULL);
+
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+			("Cannot write 0x%x to window reg: 0x%X\n",
+			 addr, register_addr));
+		return status;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_diag_read_access - Read from the AR6000 through its diagnostic window.
+ * @hif_ctx: hif context
+ * @address: address
+ * @data: data
+ *
+ * No cooperation from the Target is required for this.
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
+				uint32_t address,
+				uint32_t *data)
+{
+	QDF_STATUS status;
+	static uint32_t readvalue;
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+
+	if (address & WORD_NON_ALIGNMENT_MASK) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+			("[%s]addr is not 4 bytes align.addr[0x%08x]\n",
+			 __func__, address));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	/* set window register to start read cycle */
+	status = hif_ar6000_set_address_window_register(hif_device,
+						WINDOW_READ_ADDR_ADDRESS,
+						address);
+
+	if (status != QDF_STATUS_SUCCESS)
+		return status;
+
+	/* read the data */
+	status = hif_read_write(hif_device,
+				WINDOW_DATA_ADDRESS,
+				(char *) &readvalue,
+				sizeof(uint32_t), HIF_RD_SYNC_BYTE_INC, NULL);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+			("Cannot read from WINDOW_DATA_ADDRESS\n"));
+		return status;
+	}
+
+	*data = readvalue;
+	return status;
+}
+
+/**
+ * hif_diag_write_access - Write to the AR6000 through its diagnostic window.
+ * @hif_ctx: hif context
+ * @address: address
+ * @data: data
+ *
+ * No cooperation from the Target is required for this.
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
+				 uint32_t address, uint32_t data)
+{
+	QDF_STATUS status;
+	static uint32_t write_value;
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+
+	if (address & WORD_NON_ALIGNMENT_MASK) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+			("[%s]addr is not 4 bytes align.addr[0x%08x]\n",
+			 __func__, address));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	write_value = data;
+
+	/* set write data */
+	status = hif_read_write(hif_device,
+				WINDOW_DATA_ADDRESS,
+				(char *) &write_value,
+				sizeof(uint32_t), HIF_WR_SYNC_BYTE_INC, NULL);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+			("Cannot write 0x%x to WINDOW_DATA_ADDRESS\n",
+			 data));
+		return status;
+	}
+
+	/* set window register, which starts the write cycle */
+	return hif_ar6000_set_address_window_register(hif_device,
+						  WINDOW_WRITE_ADDR_ADDRESS,
+						  address);
+}
+
+/**
+ * hif_diag_write_mem - Write a block data to the AR6000 through its diagnostic window.
+ * @scn: hif context
+ * @address: address
+ * @data: data
+ * @nbytes: nbytes
+ *
+ * This function may take some time.
+ * No cooperation from the Target is required for this.
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, uint32_t address,
+			      uint8_t *data, int nbytes)
+{
+	QDF_STATUS status;
+	int32_t i;
+	uint32_t tmp_data;
+
+	if ((address & WORD_NON_ALIGNMENT_MASK) ||
+				(nbytes & WORD_NON_ALIGNMENT_MASK)) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+			("[%s]addr or length is not 4 bytes"
+			 " align.addr[0x%08x] len[0x%08x]\n",
+			 __func__, address, nbytes));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	for (i = 0; i < nbytes; i += 4) {
+		tmp_data =
+			data[i] | (data[i + 1] << 8) | (data[i + 2] << 16) |
+			(data[i + 3] << 24);
+		status = hif_diag_write_access(scn, address + i, tmp_data);
+		if (status != QDF_STATUS_SUCCESS) {
+			AR_DEBUG_PRINTF(ATH_LOG_ERR,
+				("Diag Write mem failed.addr[0x%08x]"
+				 " value[0x%08x]\n",
+				 address + i, tmp_data));
+			return status;
+		}
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_diag_read_mem - Read a block data to the AR6000 through its diagnostic window.
+ * @scn: hif context
+ * @data: data
+ * @nbytes: nbytes
+ *
+ * This function may take some time.
+ * No cooperation from the Target is required for this.
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *scn,
+			     uint32_t address, uint8_t *data,
+			     int nbytes)
+{
+	QDF_STATUS status;
+	int32_t i;
+	uint32_t tmp_data;
+
+	if ((address & WORD_NON_ALIGNMENT_MASK) ||
+					(nbytes & WORD_NON_ALIGNMENT_MASK)) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+			("[%s]addr or length is not 4 bytes"
+			" align.addr[0x%08x] len[0x%08x]\n",
+			 __func__, address, nbytes));
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	for (i = 0; i < nbytes; i += 4) {
+		status = hif_diag_read_access(scn, address + i, &tmp_data);
+		if (status != QDF_STATUS_SUCCESS) {
+			AR_DEBUG_PRINTF(ATH_LOG_ERR,
+					("Diag Write mem failed.addr[0x%08x]"
+					 " value[0x%08x]\n",
+					 address + i, tmp_data));
+			return status;
+		}
+		data[i] = tmp_data & 0xff;
+		data[i + 1] = tmp_data >> 8 & 0xff;
+		data[i + 2] = tmp_data >> 16 & 0xff;
+		data[i + 3] = tmp_data >> 24 & 0xff;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_ar6k_read_target_register - call to read target register values
+ * @hif_device: hif context
+ * @regsel: register selection
+ * @regval: reg value
+ *
+ * Return: QDF_STATUS_SUCCESS for success.
+ */
+QDF_STATUS hif_ar6k_read_target_register(struct hif_sdio_dev *hif_device,
+					 int regsel, uint32_t *regval)
+{
+	QDF_STATUS status;
+	char vals[4];
+	char register_selection[4];
+
+	register_selection[0] = regsel & 0xff;
+	register_selection[1] = regsel & 0xff;
+	register_selection[2] = regsel & 0xff;
+	register_selection[3] = regsel & 0xff;
+	status = hif_read_write(hif_device, CPU_DBG_SEL_ADDRESS,
+				register_selection, 4,
+				HIF_WR_SYNC_BYTE_FIX, NULL);
+
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+			("Cannot write CPU_DBG_SEL (%d)\n", regsel));
+		return status;
+	}
+
+	status = hif_read_write(hif_device,
+				CPU_DBG_ADDRESS,
+				(char *) vals,
+				sizeof(vals), HIF_RD_SYNC_BYTE_INC, NULL);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_LOG_ERR,
+				("Cannot read from CPU_DBG_ADDRESS\n"));
+		return status;
+	}
+
+	*regval = vals[0] << 0 | vals[1] << 8 |
+			vals[2] << 16 | vals[3] << 24;
+
+	return status;
+}
+
+/**
+ * hif_ar6k_fetch_target_regs - call to fetch target reg values
+ * @hif_device: hif context
+ * @targregs: target regs
+ *
+ * Return: None
+ */
+void hif_ar6k_fetch_target_regs(struct hif_sdio_dev *hif_device,
+		 uint32_t *targregs)
+{
+	int i;
+	uint32_t val;
+
+	for (i = 0; i < AR6003_FETCH_TARG_REGS_COUNT; i++) {
+		val = 0xffffffff;
+		hif_ar6k_read_target_register(hif_device, i, &val);
+		targregs[i] = val;
+	}
+}

+ 241 - 0
hif/src/sdio/hif_sdio.c

@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_timer.h>
+#include <qdf_time.h>
+#include <qdf_lock.h>
+#include <qdf_mem.h>
+#include <qdf_util.h>
+#include <qdf_defer.h>
+#include <qdf_atomic.h>
+#include <qdf_nbuf.h>
+#include <athdefs.h>
+#include "qdf_net_types.h"
+#include "a_types.h"
+#include "athdefs.h"
+#include "a_osapi.h"
+#include <hif.h>
+#include <htc_services.h>
+#include <a_debug.h>
+#include "hif_sdio_dev.h"
+#include "if_sdio.h"
+#include "regtable_sdio.h"
+
+#define ATH_MODULE_NAME hif_sdio
+
+/**
+ * hif_start() - start hif bus interface.
+ * @hif_ctx: HIF context
+ *
+ * Enables hif device interrupts
+ *
+ * Return: int
+ */
+uint32_t hif_start(struct hif_opaque_softc *hif_ctx)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device);
+
+	HIF_ENTER();
+	hif_dev_enable_interrupts(htc_sdio_device);
+	HIF_EXIT();
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_flush_surprise_remove() - remove hif bus interface.
+ * @hif_ctx: HIF context
+ *
+ *
+ * Return: none
+ */
+void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
+{
+
+}
+
+/**
+ * hif_sdio_stop() - stop hif bus interface.
+ * @hif_ctx: HIF context
+ *
+ * Disable hif device interrupts and destroy hif context
+ *
+ * Return: none
+ */
+void hif_sdio_stop(struct hif_softc *hif_ctx)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device);
+
+	HIF_ENTER();
+	if (htc_sdio_device != NULL) {
+		hif_dev_disable_interrupts(htc_sdio_device);
+		hif_dev_destroy(htc_sdio_device);
+	}
+	HIF_EXIT();
+}
+
+/**
+ * hif_send_head() - send data on hif bus interface.
+ * @hif_ctx: HIF context
+ *
+ * send tx data on a given pipe id
+ *
+ * Return: int
+ */
+QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
+		uint32_t transfer_id, uint32_t nbytes, qdf_nbuf_t buf,
+		uint32_t data_attr)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device);
+
+	return hif_dev_send_buffer(htc_sdio_device,
+				transfer_id, pipe,
+				nbytes, buf);
+}
+
+/**
+ * hif_map_service_to_pipe() - maps ul/dl pipe to service id.
+ * @hif_ctx: HIF hdl
+ * @ServiceId: sevice index
+ * @ULPipe: uplink pipe id
+ * @DLPipe: down-linklink pipe id
+ * @ul_is_polled: if ul is polling based
+ * @ul_is_polled: if dl is polling based
+ *
+ * Return: int
+ */
+int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl,
+			    uint16_t service_id, uint8_t *ul_pipe,
+			    uint8_t *dl_pipe, int *ul_is_polled,
+			    int *dl_is_polled)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_hdl);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device);
+
+	return hif_dev_map_service_to_pipe(htc_sdio_device,
+					   service_id, ul_pipe, dl_pipe,
+					   hif_device->swap_mailbox);
+}
+
+/**
+ * hif_map_service_to_pipe() - maps ul/dl pipe to service id.
+ * @scn: HIF context
+ * @ServiceId: sevice index
+ * @ULPipe: uplink pipe id
+ * @DLPipe: down-linklink pipe id
+ * @ul_is_polled: if ul is polling based
+ * @ul_is_polled: if dl is polling based
+ *
+ * Return: int
+ */
+void hif_get_default_pipe(struct hif_opaque_softc *scn, uint8_t *ul_pipe,
+			  uint8_t *dl_pipe)
+{
+	hif_map_service_to_pipe(scn, HTC_CTRL_RSVD_SVC,
+				ul_pipe, dl_pipe, NULL, NULL);
+}
+
+/**
+ * hif_post_init() - create hif device after probe.
+ * @hif_ctx: HIF context
+ * @target: HIF target
+ * @callbacks: htc callbacks
+ *
+ *
+ * Return: int
+ */
+void hif_post_init(struct hif_opaque_softc *hif_ctx, void *target,
+		   struct hif_msg_callbacks *callbacks)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device);
+
+	if (htc_sdio_device == NULL)
+		htc_sdio_device = hif_dev_create(hif_device, callbacks, target);
+
+	if (htc_sdio_device)
+		hif_dev_setup(htc_sdio_device);
+
+	return;
+}
+
+/**
+ * hif_get_free_queue_number() - create hif device after probe.
+ * @hif_ctx: HIF context
+ * @pipe: pipe id
+ *
+ * SDIO uses credit based flow control at the HTC layer
+ * so transmit resource checks are bypassed
+ * Return: int
+ */
+uint16_t hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx,
+				   uint8_t pipe)
+{
+	uint16_t rv;
+
+	rv = 1;
+	return rv;
+}
+
+/**
+ * hif_send_complete_check() - check tx complete on a given pipe.
+ * @hif_ctx: HIF context
+ * @pipe: HIF target
+ * @force: check if need to pool for completion
+ * Decide whether to actually poll for completions, or just
+ * wait for a later chance.
+ *
+ * Return: int
+ */
+void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
+				int force)
+{
+
+}
+
+/**
+ * hif_set_bundle_mode() - set bundling mode.
+ * @hif_ctx: HIF context
+ * @enabled: enable/disable bundling
+ * @rx_bundle_cnt: bundling count
+ *
+ * Return: none
+ */
+void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
+			uint64_t rx_bundle_cnt)
+{
+
+}

+ 130 - 0
hif/src/sdio/hif_sdio_common.h

@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+
+#ifndef _HIF_SDIO_COMMON_H_
+#define _HIF_SDIO_COMMON_H_
+
+/* SDIO manufacturer ID and Codes */
+#define MANUFACTURER_ID_AR6002_BASE        0x200
+#define MANUFACTURER_ID_AR6003_BASE        0x300
+#define MANUFACTURER_ID_AR6004_BASE        0x400
+#define MANUFACTURER_ID_AR6320_BASE        0x500
+#define MANUFACTURER_ID_QCA9377_BASE       0x700
+#define MANUFACTURER_ID_AR6K_BASE_MASK     0xFF00
+#define MANUFACTURER_ID_AR6K_REV_MASK      0x00FF
+#define FUNCTION_CLASS                     0x0
+#define MANUFACTURER_CODE                  0x271
+
+    /* Mailbox address in SDIO address space */
+#if defined(SDIO_3_0)
+#define HIF_MBOX_BASE_ADDR                 0x1000
+#define HIF_MBOX_DUMMY_WIDTH               0x800
+#else
+#define HIF_MBOX_BASE_ADDR                 0x800
+#define HIF_MBOX_DUMMY_WIDTH               0
+#endif
+
+#define HIF_MBOX_WIDTH                     0x800
+
+#define HIF_MBOX_START_ADDR(mbox)               \
+	(HIF_MBOX_BASE_ADDR + mbox * (HIF_MBOX_WIDTH + HIF_MBOX_DUMMY_WIDTH))
+
+#define HIF_MBOX_END_ADDR(mbox)                 \
+	(HIF_MBOX_START_ADDR(mbox) + HIF_MBOX_WIDTH - 1)
+
+    /* extended MBOX address for larger MBOX writes to MBOX 0*/
+#if defined(SDIO_3_0)
+#define HIF_MBOX0_EXTENDED_BASE_ADDR       0x5000
+#else
+#define HIF_MBOX0_EXTENDED_BASE_ADDR       0x2800
+#endif
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6002    (6*1024)
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6003    (18*1024)
+
+    /* version 1 of the chip has only a 12K extended mbox range */
+#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1  0x4000
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1      (12*1024)
+
+#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004     0x2800
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6004         (18*1024)
+
+
+#if defined(SDIO_3_0)
+#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320     0x5000
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6320             (36*1024)
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0    (56*1024)
+#define HIF_MBOX1_EXTENDED_WIDTH_AR6320             (36*1024)
+#define HIF_MBOX_DUMMY_SPACE_SIZE_AR6320        (2*1024)
+#else
+#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320     0x2800
+#define HIF_MBOX0_EXTENDED_WIDTH_AR6320             (24*1024)
+#define HIF_MBOX1_EXTENDED_WIDTH_AR6320             (24*1024)
+#define HIF_MBOX_DUMMY_SPACE_SIZE_AR6320        0
+#endif
+
+
+    /* GMBOX addresses */
+#define HIF_GMBOX_BASE_ADDR                0x7000
+#define HIF_GMBOX_WIDTH                    0x4000
+
+/* for SDIO we recommend a 128-byte block size */
+#if defined(WITH_BACKPORTS)
+#define HIF_DEFAULT_IO_BLOCK_SIZE          128
+#else
+#define HIF_DEFAULT_IO_BLOCK_SIZE          256
+#endif
+
+#define FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868
+#define FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
+#define FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
+/* In SDIO 2.0, asynchronous interrupt is not in SPEC
+ * requirement, but AR6003 support it, so the register
+ * is placed in vendor specific field 0xF0(bit0)
+ * In SDIO 3.0, the register is defined in SPEC, and its
+ * address is 0x16(bit1) */
+/* interrupt mode register of AR6003 */
+#define CCCR_SDIO_IRQ_MODE_REG_AR6003         0xF0
+/* mode to enable special 4-bit interrupt assertion without clock */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003   (1 << 0)
+    /* interrupt mode register of AR6320 */
+#define CCCR_SDIO_IRQ_MODE_REG_AR6320           0x16
+/* mode to enable special 4-bit interrupt assertion without clock */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320     (1 << 1)
+
+#define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS       0xF0
+#define CCCR_SDIO_ASYNC_INT_DELAY_LSB           0x06
+#define CCCR_SDIO_ASYNC_INT_DELAY_MASK          0xC0
+
+/* Vendor Specific Driver Strength Settings */
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR   0xf2
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK   0x0e
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A      0x02
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C      0x04
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D      0x08
+
+#endif /* _HIF_SDIO_COMMON_H_ */

+ 525 - 0
hif/src/sdio/hif_sdio_dev.c

@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#define ATH_MODULE_NAME hif
+#include "a_debug.h"
+
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_timer.h>
+#include <qdf_time.h>
+#include <qdf_lock.h>
+#include <qdf_mem.h>
+#include <qdf_util.h>
+#include <qdf_defer.h>
+#include <qdf_atomic.h>
+#include <qdf_nbuf.h>
+#include <athdefs.h>
+#include <qdf_net_types.h>
+#include <a_types.h>
+#include <athdefs.h>
+#include <a_osapi.h>
+#include <hif.h>
+#include <htc_services.h>
+#include "hif_sdio_internal.h"
+#include "if_sdio.h"
+#include "regtable_sdio.h"
+
+/* under HL SDIO, with Interface Memory support, we have
+ * the following reasons to support 2 mboxs:
+ * a) we need place different buffers in different
+ * mempool, for example, data using Interface Memory,
+ * desc and other using DRAM, they need different SDIO
+ * mbox channels.
+ * b) currently, tx mempool in LL case is seperated from
+ * main mempool, the structure (descs at the beginning
+ * of every pool buffer) is different, because they only
+ * need store tx desc from host. To align with LL case,
+ * we also need 2 mbox support just as PCIe LL cases.
+ */
+
+#define INVALID_MAILBOX_NUMBER 0xFF
+/**
+ * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
+ * @pdev: sdio device context
+ * @pipeid: pipe index
+ *
+ *
+ * Return: mailbox index
+ */
+uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
+			uint8_t pipeid)
+{
+	/* TODO: temp version, should not hardcoded here, will be
+	 * updated after HIF design */
+	if (2 == pipeid || 3 == pipeid)
+		return 1;
+	else if (0 == pipeid || 1 == pipeid)
+		return 0;
+	else {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			("%s: pipeid=%d,should not happen\n",
+			 __func__, pipeid));
+		qdf_assert(0);
+		return INVALID_MAILBOX_NUMBER;
+	}
+}
+
+/**
+ * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
+ * @pdev: sdio device
+ * @mboxIndex: mailbox index
+ * @upload: boolean to decide mailbox index
+ *
+ * Disable hif device interrupts and destroy hif context
+ *
+ * Return: none
+ */
+uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
+			uint8_t mbox_index,
+				     bool upload)
+{
+	/* TODO: temp version, should not hardcoded here, will be
+	 * updated after HIF design */
+	if (mbox_index == 0) {
+		return upload ? 1 : 0;
+	} else if (mbox_index == 1) {
+		return upload ? 3 : 2;
+	} else {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			("%s:--------------------mboxIndex=%d,upload=%d,"
+			 " should not happen\n",
+			__func__, mbox_index, upload));
+		qdf_assert(0);
+		return 0xff;
+	}
+}
+
+/**
+ * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
+ * @pDev: sdio device context
+ * @ServiceId: sevice index
+ * @ULPipe: uplink pipe id
+ * @DLPipe: down-linklink pipe id
+ * @SwapMapping: mailbox swap mapping
+ *
+ * Return: int
+ */
+QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_device *pdev,
+				     uint16_t service_id,
+				     uint8_t *ul_pipe, uint8_t *dl_pipe,
+				     bool swap_mapping)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	switch (service_id) {
+	case HTT_DATA_MSG_SVC:
+		if (swap_mapping) {
+			*ul_pipe = 1;
+			*dl_pipe = 0;
+		} else {
+			*ul_pipe = 3;
+			*dl_pipe = 2;
+		}
+		break;
+
+	case HTC_CTRL_RSVD_SVC:
+	case HTC_RAW_STREAMS_SVC:
+		*ul_pipe = 1;
+		*dl_pipe = 0;
+		break;
+
+	case WMI_DATA_BE_SVC:
+	case WMI_DATA_BK_SVC:
+	case WMI_DATA_VI_SVC:
+	case WMI_DATA_VO_SVC:
+		*ul_pipe = 1;
+		*dl_pipe = 0;
+		break;
+
+	case WMI_CONTROL_SVC:
+		if (swap_mapping) {
+			*ul_pipe = 3;
+			*dl_pipe = 2;
+		} else {
+			*ul_pipe = 1;
+			*dl_pipe = 0;
+		}
+		break;
+
+	default:
+		status = !QDF_STATUS_SUCCESS;
+		break;
+	}
+	return status;
+}
+
+/**
+ * hif_dev_alloc_rx_buffer() - allocate rx buffer.
+ * @pDev: sdio device context
+ *
+ *
+ * Return: htc buffer pointer
+ */
+HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pdev)
+{
+	HTC_PACKET *packet;
+	qdf_nbuf_t netbuf;
+	uint32_t bufsize = 0, headsize = 0;
+
+	bufsize = HIF_SDIO_RX_BUFFER_SIZE + HIF_SDIO_RX_DATA_OFFSET;
+	headsize = sizeof(HTC_PACKET);
+	netbuf = qdf_nbuf_alloc(NULL, bufsize + headsize, 0, 4, false);
+	if (netbuf == NULL) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("(%s)Allocate netbuf failed\n", __func__));
+		return NULL;
+	}
+	packet = (HTC_PACKET *) qdf_nbuf_data(netbuf);
+	qdf_nbuf_reserve(netbuf, headsize);
+
+	SET_HTC_PACKET_INFO_RX_REFILL(packet,
+				      pdev,
+				      qdf_nbuf_data(netbuf),
+				      bufsize, ENDPOINT_0);
+	SET_HTC_PACKET_NET_BUF_CONTEXT(packet, netbuf);
+	return packet;
+}
+
+/**
+ * hif_dev_create() - create hif device after probe.
+ * @scn: HIF context
+ * @callbacks: htc callbacks
+ * @target: HIF target
+ *
+ *
+ * Return: int
+ */
+struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device,
+			struct hif_msg_callbacks *callbacks, void *target)
+{
+
+	QDF_STATUS status;
+	struct hif_sdio_device *pdev;
+
+	pdev = qdf_mem_malloc(sizeof(struct hif_sdio_device));
+	if (!pdev) {
+		A_ASSERT(false);
+		return NULL;
+	}
+
+	qdf_mem_zero(pdev, sizeof(struct hif_sdio_device));
+	qdf_spinlock_create(&pdev->Lock);
+	qdf_spinlock_create(&pdev->TxLock);
+	qdf_spinlock_create(&pdev->RxLock);
+
+	pdev->HIFDevice = hif_device;
+	pdev->pTarget = target;
+	status = hif_configure_device(hif_device,
+				      HIF_DEVICE_SET_HTC_CONTEXT,
+				      (void *)pdev, sizeof(pdev));
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n",
+				 __func__));
+	}
+
+	A_MEMCPY(&pdev->hif_callbacks, callbacks, sizeof(*callbacks));
+
+	return pdev;
+}
+
+/**
+ * hif_dev_destroy() - destroy hif device.
+ * @pDev: sdio device context
+ *
+ *
+ * Return: none
+ */
+void hif_dev_destroy(struct hif_sdio_device *pdev)
+{
+	QDF_STATUS status;
+
+	status = hif_configure_device(pdev->HIFDevice,
+				      HIF_DEVICE_SET_HTC_CONTEXT,
+				      (void *)NULL, 0);
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n",
+				 __func__));
+	}
+	qdf_mem_free(pdev);
+}
+
+/**
+ * hif_dev_from_hif() - get sdio device from hif device.
+ * @pDev: hif device context
+ *
+ *
+ * Return: hif sdio device context
+ */
+struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device)
+{
+	struct hif_sdio_device *pdev = NULL;
+	QDF_STATUS status;
+	status = hif_configure_device(hif_device,
+				HIF_DEVICE_GET_HTC_CONTEXT,
+				(void **)&pdev, sizeof(struct hif_sdio_device));
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("(%s)HTC_SDIO_CONTEXT is NULL!!!\n",
+				 __func__));
+	}
+	return pdev;
+}
+
+/**
+ * hif_dev_disable_interrupts() - disable hif device interrupts.
+ * @pDev: sdio device context
+ *
+ *
+ * Return: int
+ */
+QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *pdev)
+{
+	struct MBOX_IRQ_ENABLE_REGISTERS regs;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	HIF_ENTER();
+
+	LOCK_HIF_DEV(pdev);
+	/* Disable all interrupts */
+	pdev->IrqEnableRegisters.int_status_enable = 0;
+	pdev->IrqEnableRegisters.cpu_int_status_enable = 0;
+	pdev->IrqEnableRegisters.error_status_enable = 0;
+	pdev->IrqEnableRegisters.counter_int_status_enable = 0;
+	/* copy into our temp area */
+	A_MEMCPY(&regs,
+		 &pdev->IrqEnableRegisters, sizeof(pdev->IrqEnableRegisters));
+
+	UNLOCK_HIF_DEV(pdev);
+
+	/* always synchronous */
+	status = hif_read_write(pdev->HIFDevice,
+				INT_STATUS_ENABLE_ADDRESS,
+				(char *) &regs,
+				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
+				HIF_WR_SYNC_BYTE_INC, NULL);
+
+	if (status != QDF_STATUS_SUCCESS) {
+		/* Can't write it for some reason */
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			("Failed to update interrupt control registers err: %d",
+			 status));
+	}
+
+	/* To Do mask the host controller interrupts */
+	hif_mask_interrupt(pdev->HIFDevice);
+	HIF_EXIT("status :%d", status);
+	return status;
+}
+
+/**
+ * hif_dev_enable_interrupts() - enables hif device interrupts.
+ * @pDev: sdio device context
+ *
+ *
+ * Return: int
+ */
+QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *pdev)
+{
+	QDF_STATUS status;
+	struct MBOX_IRQ_ENABLE_REGISTERS regs;
+	HIF_ENTER();
+
+	/* for good measure, make sure interrupt are disabled
+	 * before unmasking at the HIF layer.
+	 * The rationale here is that between device insertion
+	 * (where we clear the interrupts the first time)
+	 * and when HTC is finally ready to handle interrupts,
+	 * other software can perform target "soft" resets.
+	 * The AR6K interrupt enables reset back to an "enabled"
+	 * state when this happens. */
+	hif_dev_disable_interrupts(pdev);
+
+	/* Unmask the host controller interrupts */
+	hif_un_mask_interrupt(pdev->HIFDevice);
+
+	LOCK_HIF_DEV(pdev);
+
+	/* Enable all the interrupts except for the internal
+	 * AR6000 CPU interrupt */
+	pdev->IrqEnableRegisters.int_status_enable =
+		INT_STATUS_ENABLE_ERROR_SET(0x01) |
+			INT_STATUS_ENABLE_CPU_SET(0x01)
+		| INT_STATUS_ENABLE_COUNTER_SET(0x01);
+
+		/* enable 2 mboxs INT */
+	pdev->IrqEnableRegisters.int_status_enable |=
+			INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
+			INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
+
+	/* Set up the CPU Interrupt Status Register, enable
+	 * CPU sourced interrupt #0, #1.
+	 * #0 is used for report assertion from target
+	 * #1 is used for inform host that credit arrived
+	 * */
+	pdev->IrqEnableRegisters.cpu_int_status_enable = 0x03;
+
+	/* Set up the Error Interrupt Status Register */
+	pdev->IrqEnableRegisters.error_status_enable =
+		(ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
+		 | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
+
+	/* Set up the Counter Interrupt Status Register
+	 * (only for debug interrupt to catch fatal errors) */
+	pdev->IrqEnableRegisters.counter_int_status_enable =
+	   (COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >>
+		24;
+
+	/* copy into our temp area */
+	A_MEMCPY(&regs,
+		 &pdev->IrqEnableRegisters,
+		 sizeof(struct MBOX_IRQ_ENABLE_REGISTERS));
+
+	UNLOCK_HIF_DEV(pdev);
+
+	/* always synchronous */
+	status = hif_read_write(pdev->HIFDevice,
+				INT_STATUS_ENABLE_ADDRESS,
+				(char *) &regs,
+				sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
+				HIF_WR_SYNC_BYTE_INC, NULL);
+
+	if (status != QDF_STATUS_SUCCESS) {
+		/* Can't write it for some reason */
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+		  ("Failed to update interrupt control registers err: %d\n",
+				 status));
+
+	}
+	HIF_EXIT();
+	return status;
+}
+
+/**
+ * hif_dev_setup() - set up sdio device.
+ * @pDev: sdio device context
+ *
+ *
+ * Return: int
+ */
+QDF_STATUS hif_dev_setup(struct hif_sdio_device *pdev)
+{
+	QDF_STATUS status;
+	uint32_t blocksizes[MAILBOX_COUNT];
+	HTC_CALLBACKS htc_callbacks;
+	struct hif_sdio_dev *hif_device = pdev->HIFDevice;
+
+	HIF_ENTER();
+
+	status = hif_configure_device(hif_device,
+				      HIF_DEVICE_GET_MBOX_ADDR,
+				      &pdev->MailBoxInfo,
+				      sizeof(pdev->MailBoxInfo));
+
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("(%s)HIF_DEVICE_GET_MBOX_ADDR failed!!!\n",
+				 __func__));
+		A_ASSERT(false);
+	}
+
+	status = hif_configure_device(hif_device,
+				      HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
+				      blocksizes, sizeof(blocksizes));
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			("(%s)HIF_DEVICE_GET_MBOX_BLOCK_SIZE failed!!!\n",
+				 __func__));
+		A_ASSERT(false);
+	}
+
+	pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
+	pdev->BlockMask = pdev->BlockSize - 1;
+	A_ASSERT((pdev->BlockSize & pdev->BlockMask) == 0);
+
+	/* assume we can process HIF interrupt events asynchronously */
+	pdev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC;
+
+	/* see if the HIF layer overrides this assumption */
+	hif_configure_device(hif_device,
+			     HIF_DEVICE_GET_IRQ_PROC_MODE,
+			     &pdev->HifIRQProcessingMode,
+			     sizeof(pdev->HifIRQProcessingMode));
+
+	switch (pdev->HifIRQProcessingMode) {
+	case HIF_DEVICE_IRQ_SYNC_ONLY:
+		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+			("HIF Interrupt processing is SYNC ONLY\n"));
+		/* see if HIF layer wants HTC to yield */
+		hif_configure_device(hif_device,
+				     HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
+				     &pdev->HifIRQYieldParams,
+				     sizeof(pdev->HifIRQYieldParams));
+
+		if (pdev->HifIRQYieldParams.recv_packet_yield_count > 0) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+				("HIF req of DSR yield per %d RECV packets\n",
+				 pdev->HifIRQYieldParams.
+				 recv_packet_yield_count));
+			pdev->DSRCanYield = true;
+		}
+		break;
+	case HIF_DEVICE_IRQ_ASYNC_SYNC:
+		AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
+			("HIF Interrupt processing is ASYNC and SYNC\n"));
+		break;
+	default:
+		A_ASSERT(false);
+		break;
+	}
+
+	pdev->HifMaskUmaskRecvEvent = NULL;
+
+	/* see if the HIF layer implements the mask/unmask recv
+	 * events function  */
+	hif_configure_device(hif_device,
+			     HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
+			     &pdev->HifMaskUmaskRecvEvent,
+			     sizeof(pdev->HifMaskUmaskRecvEvent));
+
+	status = hif_dev_disable_interrupts(pdev);
+
+	qdf_mem_zero(&htc_callbacks, sizeof(HTC_CALLBACKS));
+	/* the device layer handles these */
+	htc_callbacks.rwCompletionHandler = hif_dev_rw_completion_handler;
+	htc_callbacks.dsrHandler = hif_dev_dsr_handler;
+	htc_callbacks.context = pdev;
+	status = hif_attach_htc(pdev->HIFDevice, &htc_callbacks);
+
+	HIF_EXIT();
+	return status;
+}

+ 63 - 0
hif/src/sdio/hif_sdio_dev.h

@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef HIF_SDIO_DEV_H_
+#define HIF_SDIO_DEV_H_
+
+#include "qdf_net_types.h"
+#include "a_types.h"
+#include "athdefs.h"
+#include "a_osapi.h"
+#include <hif.h>
+#include "athstartpack.h"
+#include "hif_internal.h"
+
+struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device);
+
+struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device,
+				struct hif_msg_callbacks *callbacks,
+				void *target);
+
+void hif_dev_destroy(struct hif_sdio_device *htc_sdio_device);
+
+QDF_STATUS hif_dev_setup(struct hif_sdio_device *htc_sdio_device);
+
+QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *htc_sdio_device);
+
+QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *htc_sdio_device);
+
+QDF_STATUS hif_dev_send_buffer(struct hif_sdio_device *htc_sdio_device,
+			     unsigned int transfer_id, uint8_t pipe,
+			     unsigned int nbytes, qdf_nbuf_t buf);
+
+QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_device *pdev,
+				       uint16_t service_id,
+				       uint8_t *ul_pipe,
+				       uint8_t *dl_pipe,
+				       bool swap_mapping);
+
+#endif /* HIF_SDIO_DEV_H_ */

+ 156 - 0
hif/src/sdio/hif_sdio_internal.h

@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _HIF_SDIO_INTERNAL_H_
+#define _HIF_SDIO_INTERNAL_H_
+
+#include "a_debug.h"
+#include "hif_sdio_dev.h"
+#include "htc_packet.h"
+#include "htc_api.h"
+#include "hif_internal.h"
+
+#define HIF_SDIO_RX_BUFFER_SIZE            1792
+#define HIF_SDIO_RX_DATA_OFFSET            64
+
+/* TODO: print output level and mask control */
+#define ATH_DEBUG_IRQ  ATH_DEBUG_MAKE_MODULE_MASK(4)
+#define ATH_DEBUG_XMIT ATH_DEBUG_MAKE_MODULE_MASK(5)
+#define ATH_DEBUG_RECV ATH_DEBUG_MAKE_MODULE_MASK(6)
+
+#define ATH_DEBUG_MAX_MASK 32
+
+#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK |   \
+			    INT_STATUS_ENABLE_CPU_MASK   |   \
+			    INT_STATUS_ENABLE_COUNTER_MASK)
+
+/* HTC operational parameters */
+#define HTC_TARGET_RESPONSE_TIMEOUT        2000 /* in ms */
+#define HTC_TARGET_DEBUG_INTR_MASK         0x01
+#define HTC_TARGET_CREDIT_INTR_MASK        0xF0
+
+#define MAILBOX_COUNT 4
+#define MAILBOX_FOR_BLOCK_SIZE 1
+#define MAILBOX_USED_COUNT 2
+#if defined(SDIO_3_0)
+#define MAILBOX_LOOKAHEAD_SIZE_IN_WORD 2
+#else
+#define MAILBOX_LOOKAHEAD_SIZE_IN_WORD 1
+#endif
+#define AR6K_TARGET_DEBUG_INTR_MASK     0x01
+
+PREPACK struct MBOX_IRQ_PROC_REGISTERS {
+	uint8_t host_int_status;
+	uint8_t cpu_int_status;
+	uint8_t error_int_status;
+	uint8_t counter_int_status;
+	uint8_t mbox_frame;
+	uint8_t rx_lookahead_valid;
+	uint8_t host_int_status2;
+	uint8_t gmbox_rx_avail;
+	uint32_t rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * MAILBOX_COUNT];
+	uint32_t int_status_enable;
+} POSTPACK;
+
+PREPACK struct MBOX_IRQ_ENABLE_REGISTERS {
+	uint8_t int_status_enable;
+	uint8_t cpu_int_status_enable;
+	uint8_t error_status_enable;
+	uint8_t counter_int_status_enable;
+} POSTPACK;
+
+#define TOTAL_CREDIT_COUNTER_CNT 4
+
+PREPACK struct MBOX_COUNTER_REGISTERS {
+	uint32_t counter[TOTAL_CREDIT_COUNTER_CNT];
+} POSTPACK;
+
+#define SDIO_NUM_DATA_RX_BUFFERS  64
+#define SDIO_DATA_RX_SIZE         1664
+
+struct hif_sdio_device {
+	struct hif_sdio_dev *HIFDevice;
+	qdf_spinlock_t Lock;
+	qdf_spinlock_t TxLock;
+	qdf_spinlock_t RxLock;
+	struct MBOX_IRQ_PROC_REGISTERS IrqProcRegisters;
+	struct MBOX_IRQ_ENABLE_REGISTERS IrqEnableRegisters;
+	struct MBOX_COUNTER_REGISTERS MailBoxCounterRegisters;
+	struct hif_msg_callbacks hif_callbacks;
+	struct hif_device_mbox_info MailBoxInfo;
+	uint32_t BlockSize;
+	uint32_t BlockMask;
+	enum hif_device_irq_mode HifIRQProcessingMode;
+	struct hif_device_irq_yield_params HifIRQYieldParams;
+	bool DSRCanYield;
+	HIF_MASK_UNMASK_RECV_EVENT HifMaskUmaskRecvEvent;
+	int CurrentDSRRecvCount;
+	int RecheckIRQStatusCnt;
+	uint32_t RecvStateFlags;
+	void *pTarget;
+};
+
+#define LOCK_HIF_DEV(device)    qdf_spin_lock(&(device)->Lock);
+#define UNLOCK_HIF_DEV(device)  qdf_spin_unlock(&(device)->Lock);
+#define LOCK_HIF_DEV_RX(t)      qdf_spin_lock(&(t)->RxLock);
+#define UNLOCK_HIF_DEV_RX(t)    qdf_spin_unlock(&(t)->RxLock);
+#define LOCK_HIF_DEV_TX(t)      qdf_spin_lock(&(t)->TxLock);
+#define UNLOCK_HIF_DEV_TX(t)    qdf_spin_unlock(&(t)->TxLock);
+
+#define DEV_CALC_RECV_PADDED_LEN(pDev, length) \
+		(((length) + (pDev)->BlockMask) & (~((pDev)->BlockMask)))
+#define DEV_CALC_SEND_PADDED_LEN(pDev, length) \
+		DEV_CALC_RECV_PADDED_LEN(pDev, length)
+#define DEV_IS_LEN_BLOCK_ALIGNED(pDev, length) \
+		(((length) % (pDev)->BlockSize) == 0)
+
+#define HTC_RECV_WAIT_BUFFERS        (1 << 0)
+#define HTC_OP_STATE_STOPPING        (1 << 0)
+
+#define HTC_RX_PKT_IGNORE_LOOKAHEAD      (1 << 0)
+#define HTC_RX_PKT_REFRESH_HDR           (1 << 1)
+#define HTC_RX_PKT_PART_OF_BUNDLE        (1 << 2)
+#define HTC_RX_PKT_NO_RECYCLE            (1 << 3)
+#define HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK     (1 << 4)
+
+#define IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pDev) \
+		((pDev)->HifIRQProcessingMode != HIF_DEVICE_IRQ_SYNC_ONLY)
+
+/* hif_sdio_dev.c */
+HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pDev);
+
+uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pDev,
+			uint8_t pipeid);
+uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pDev,
+			uint8_t mboxIndex,
+				     bool upload);
+
+/* hif_sdio_recv.c */
+QDF_STATUS hif_dev_rw_completion_handler(void *context, QDF_STATUS status);
+QDF_STATUS hif_dev_dsr_handler(void *context);
+
+#endif /* _HIF_SDIO_INTERNAL_H_ */

+ 1419 - 0
hif/src/sdio/hif_sdio_recv.c

@@ -0,0 +1,1419 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#define ATH_MODULE_NAME hif
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_timer.h>
+#include <qdf_time.h>
+#include <qdf_lock.h>
+#include <qdf_mem.h>
+#include <qdf_util.h>
+#include <qdf_defer.h>
+#include <qdf_atomic.h>
+#include <qdf_nbuf.h>
+#include <athdefs.h>
+#include <qdf_net_types.h>
+#include <a_types.h>
+#include <athdefs.h>
+#include <a_osapi.h>
+#include <hif.h>
+#include <htc_services.h>
+#include "hif_sdio_internal.h"
+#include <htc_internal.h>
+#include "regtable_sdio.h"
+#include "if_sdio.h"
+
+static void hif_dev_dump_registers(struct hif_sdio_device *pdev,
+				struct MBOX_IRQ_PROC_REGISTERS *irq_proc_regs,
+				struct MBOX_IRQ_ENABLE_REGISTERS *
+				irq_enable_regs,
+				struct MBOX_COUNTER_REGISTERS *
+				mailbox_counter_registers)
+{
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("RegTable->"));
+
+	if (irq_proc_regs != NULL) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("HostIntStatus: 0x%x ",
+				 irq_proc_regs->host_int_status));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("CPUIntStatus: 0x%x ",
+				 irq_proc_regs->cpu_int_status));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("ErrorIntStatus: 0x%x ",
+				 irq_proc_regs->error_int_status));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("CounterIntStatus: 0x%x ",
+				 irq_proc_regs->counter_int_status));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("MboxFrame: 0x%x ",
+				 irq_proc_regs->mbox_frame));
+
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\nRegTable->"));
+
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("RxLKAValid: 0x%x ",
+				 irq_proc_regs->rx_lookahead_valid));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("RxLKA0: 0x%x",
+				 irq_proc_regs->rx_lookahead[0]));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("RxLKA1: 0x%x ",
+				 irq_proc_regs->rx_lookahead[1]));
+
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("RxLKA2: 0x%x ",
+				 irq_proc_regs->rx_lookahead[2]));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("RxLKA3: 0x%x",
+				 irq_proc_regs->rx_lookahead[3]));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\nRegTable->"));
+
+		if (pdev->MailBoxInfo.gmbox_address != 0) {
+			/* if the target supports GMBOX hardware,
+			 * dump some additional state */
+			AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+					("GMBOX-HostIntStatus2:  0x%x ",
+					 irq_proc_regs->host_int_status2));
+			AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+					("GMBOX-RX-Avail: 0x%x ",
+					 irq_proc_regs->gmbox_rx_avail));
+		}
+	}
+
+	if (irq_enable_regs != NULL) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("Int Status Enable:         0x%x\n",
+				 irq_enable_regs->int_status_enable));
+		AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("Counter Int Status Enable: 0x%x\n",
+				 irq_enable_regs->counter_int_status_enable));
+	}
+
+	if (mailbox_counter_registers != NULL) {
+		int i;
+		for (i = 0; i < 4; i++) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+					("Counter[%d]:               0x%x\n", i,
+					 mailbox_counter_registers->
+								counter[i]));
+		}
+	}
+	AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+			("<------------------------------->\n"));
+}
+
+static
+QDF_STATUS hif_dev_alloc_and_prepare_rx_packets(struct hif_sdio_device *pdev,
+						uint32_t look_aheads[],
+						int messages,
+						HTC_PACKET_QUEUE *queue)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	HTC_PACKET *packet;
+	HTC_FRAME_HDR *hdr;
+	int i, j;
+	int num_messages;
+	int full_length;
+	bool no_recycle;
+
+	/* lock RX while we assemble the packet buffers */
+	LOCK_HIF_DEV_RX(pdev);
+
+	for (i = 0; i < messages; i++) {
+
+		hdr = (HTC_FRAME_HDR *) &look_aheads[i];
+		if (hdr->EndpointID >= ENDPOINT_MAX) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("Invalid Endpoint in look-ahead: %d\n",
+					 hdr->EndpointID));
+			/* invalid endpoint */
+			status = QDF_STATUS_E_PROTO;
+			break;
+		}
+
+		if (hdr->PayloadLen > HTC_MAX_PAYLOAD_LENGTH) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("Payload length %d exceeds max HTC : %d !\n",
+				 hdr->PayloadLen,
+				 (uint32_t) HTC_MAX_PAYLOAD_LENGTH));
+			status = QDF_STATUS_E_PROTO;
+			break;
+		}
+
+		if ((hdr->Flags & HTC_FLAGS_RECV_BUNDLE_CNT_MASK) == 0) {
+			/* HTC header only indicates 1 message to fetch */
+			num_messages = 1;
+		} else {
+			/* HTC header indicates that every packet to follow
+			 * has the same padded length so that it can
+			 * be optimally fetched as a full bundle */
+			num_messages =
+				(hdr->Flags & HTC_FLAGS_RECV_BUNDLE_CNT_MASK)
+				>> HTC_FLAGS_RECV_BUNDLE_CNT_SHIFT;
+			/* the count doesn't include the starter frame, just
+			 * a count of frames to follow */
+			num_messages++;
+			/* A_ASSERT(numMessages <= target->MaxMsgPerBundle); */
+			AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+				("HTC header indicates :%d messages can be"
+				 " fetched as a bundle\n",
+				 num_messages));
+		}
+
+		full_length =
+			DEV_CALC_RECV_PADDED_LEN(pdev,
+						 hdr->PayloadLen +
+						 sizeof(HTC_FRAME_HDR));
+
+		/* get packet buffers for each message, if there was a
+		 * bundle detected in the header,
+		 * use pHdr as a template to fetch all packets in the bundle */
+		for (j = 0; j < num_messages; j++) {
+
+			/* reset flag, any packets allocated using the
+			 * RecvAlloc() API cannot be recycled on cleanup,
+			 * they must be explicitly returned */
+			no_recycle = false;
+			packet = hif_dev_alloc_rx_buffer(pdev);
+
+			if (packet == NULL) {
+				/* No error, simply need to mark that
+				 * we are waiting for buffers. */
+				pdev->RecvStateFlags |= HTC_RECV_WAIT_BUFFERS;
+				/* pDev->EpWaitingForBuffers = pEndpoint->Id; */
+				status = QDF_STATUS_E_RESOURCES;
+				break;
+			}
+			/* AR_DEBUG_ASSERT(pPacket->Endpoint ==
+					   pEndpoint->Id); */
+			/* clear flags */
+			packet->PktInfo.AsRx.HTCRxFlags = 0;
+			packet->PktInfo.AsRx.IndicationFlags = 0;
+			packet->Status = QDF_STATUS_SUCCESS;
+
+			if (no_recycle)
+				/* flag that these packets cannot be recycled,
+				 * they have to be returned to the user */
+				packet->PktInfo.AsRx.HTCRxFlags |=
+					HTC_RX_PKT_NO_RECYCLE;
+			/* add packet to queue (also incase we need to
+			 * cleanup down below)  */
+			HTC_PACKET_ENQUEUE(queue, packet);
+
+			/*
+			   if (HTC_STOPPING(target)) {
+			   status = QDF_STATUS_E_CANCELED;
+			   break;
+			   }
+			 */
+
+			/* make sure  message can fit in the endpoint buffer */
+			if ((uint32_t) full_length > packet->BufferLength) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("Payload Length Error : header reports payload"
+				 " of: %d (%d) endpoint buffer size: %d\n",
+					 hdr->PayloadLen, full_length,
+					 packet->BufferLength));
+				status = QDF_STATUS_E_PROTO;
+				break;
+			}
+
+			if (j > 0) {
+				/* for messages fetched in a bundle the expected
+				 * lookahead is unknown as we are only using the
+				 * lookahead of the first packet as a template
+				 * of what to expect for lengths */
+				packet->PktInfo.AsRx.HTCRxFlags |=
+					HTC_RX_PKT_REFRESH_HDR;
+				/* set it to something invalid */
+				packet->PktInfo.AsRx.ExpectedHdr = 0xFFFFFFFF;
+			} else {
+				packet->PktInfo.AsRx.ExpectedHdr =
+					look_aheads[i];
+			}
+			/* set the amount of data to fetch */
+			packet->ActualLength =
+				hdr->PayloadLen + HTC_HDR_LENGTH;
+			packet->Endpoint = hdr->EndpointID;
+			packet->Completion = NULL;
+		}
+
+		if (QDF_IS_STATUS_ERROR(status)) {
+			if (QDF_STATUS_E_RESOURCES == status) {
+				/* this is actually okay */
+				status = QDF_STATUS_SUCCESS;
+			}
+			break;
+		}
+
+	}
+
+	UNLOCK_HIF_DEV_RX(pdev);
+
+	if (QDF_IS_STATUS_ERROR(status)) {
+		while (!HTC_QUEUE_EMPTY(queue))
+			packet = htc_packet_dequeue(queue);
+	}
+
+	return status;
+}
+
+static inline QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev,
+				   HTC_PACKET *packet,
+				   uint32_t recv_length, uint8_t mbox_index)
+{
+	uint32_t padded_length;
+	QDF_STATUS status;
+	bool sync = (packet->Completion == NULL) ? true : false;
+
+	/* adjust the length to be a multiple of block size if appropriate */
+	padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length);
+
+	if (padded_length > packet->BufferLength) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("DevRecvPacket, Not enough space for"
+				 " padlen:%d recvlen:%d bufferlen:%d\n",
+				 padded_length, recv_length,
+				 packet->BufferLength));
+		if (packet->Completion != NULL) {
+			COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL);
+			return QDF_STATUS_SUCCESS;
+		}
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* mailbox index is saved in Endpoint member */
+	AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+			("hif_dev_recv_packet (0x%lX : hdr:0x%X) Len:%d,"
+			 " Padded Length: %d Mbox:0x%X\n",
+			 (unsigned long)packet,
+			 packet->PktInfo.AsRx.ExpectedHdr, recv_length,
+			 padded_length,
+			 pdev->MailBoxInfo.mbox_addresses[mbox_index]));
+	status = hif_read_write(pdev->HIFDevice,
+				pdev->MailBoxInfo.mbox_addresses[mbox_index],
+				packet->pBuffer, padded_length,
+				(sync ? HIF_RD_SYNC_BLOCK_FIX :
+							HIF_RD_ASYNC_BLOCK_FIX),
+				sync ? NULL : packet);
+	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("EP%d, Seq:%d\n",
+					 ((HTC_FRAME_HDR *) packet->pBuffer)->
+					 EndpointID,
+					 ((HTC_FRAME_HDR *) packet->pBuffer)->
+					 ControlBytes1));
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+			("hif_dev_recv_packet (0x%lX : hdr:0x%X) Failed\n",
+			 (unsigned long)packet,
+			 packet->PktInfo.AsRx.ExpectedHdr));
+	}
+	if (sync) {
+		packet->Status = status;
+		if (status == QDF_STATUS_SUCCESS) {
+			HTC_FRAME_HDR *hdr =
+				(HTC_FRAME_HDR *) packet->pBuffer;
+			AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+				("hif_dev_recv_packet "
+				 "EP:%d,Len:%d,Flag:%d,CB:0x%02X,0x%02X\n",
+				 hdr->EndpointID, hdr->PayloadLen,
+				 hdr->Flags, hdr->ControlBytes0,
+				 hdr->ControlBytes1));
+		}
+	}
+
+	return status;
+}
+
+static inline QDF_STATUS hif_dev_process_trailer(struct hif_sdio_device *pdev,
+				       uint8_t *buffer, int length,
+				       uint32_t *next_look_aheads,
+				       int *num_look_aheads,
+				       HTC_ENDPOINT_ID from_endpoint)
+{
+	HTC_RECORD_HDR *record;
+	uint8_t *record_buf;
+	HTC_LOOKAHEAD_REPORT *look_ahead;
+	uint8_t *orig_buffer;
+	int orig_length;
+	QDF_STATUS status;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+			("+htc_process_trailer (length:%d)\n", length));
+
+	if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV))
+		AR_DEBUG_PRINTBUF(buffer, length, "Recv Trailer");
+
+	orig_buffer = buffer;
+	orig_length = length;
+	status = QDF_STATUS_SUCCESS;
+
+	while (length > 0) {
+
+		if (length < sizeof(HTC_RECORD_HDR)) {
+			status = QDF_STATUS_E_PROTO;
+			break;
+		}
+		/* these are byte aligned structs */
+		record = (HTC_RECORD_HDR *) buffer;
+		length -= sizeof(HTC_RECORD_HDR);
+		buffer += sizeof(HTC_RECORD_HDR);
+
+		if (record->Length > length) {
+			/* no room left in buffer for record */
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				(" invalid record len: %d (id:%d) buffer has:"
+				 "%d bytes left\n",
+				 record->Length, record->RecordID,
+				 length));
+			status = QDF_STATUS_E_PROTO;
+			break;
+		}
+		/* start of record follows the header */
+		record_buf = buffer;
+
+		switch (record->RecordID) {
+		case HTC_RECORD_CREDITS:
+			/* Process in HTC, ignore here */
+			break;
+		case HTC_RECORD_LOOKAHEAD:
+			AR_DEBUG_ASSERT(record->Length >=
+					sizeof(HTC_LOOKAHEAD_REPORT));
+			look_ahead = (HTC_LOOKAHEAD_REPORT *) record_buf;
+			if ((look_ahead->PreValid ==
+			     ((~look_ahead->PostValid) & 0xFF))
+			    && (next_look_aheads != NULL)) {
+
+				AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+					(" look_ahead Report (pre valid:0x%X,"
+					" post valid:0x%X) %d %d\n",
+					 look_ahead->PreValid,
+					 look_ahead->PostValid,
+					 from_endpoint,
+					 look_ahead->LookAhead0));
+				/* look ahead bytes are valid, copy them over */
+				((uint8_t *) (&next_look_aheads[0]))[0] =
+					look_ahead->LookAhead0;
+				((uint8_t *) (&next_look_aheads[0]))[1] =
+					look_ahead->LookAhead1;
+				((uint8_t *) (&next_look_aheads[0]))[2] =
+					look_ahead->LookAhead2;
+				((uint8_t *) (&next_look_aheads[0]))[3] =
+					look_ahead->LookAhead3;
+
+				if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+					debug_dump_bytes((uint8_t *)
+							 next_look_aheads, 4,
+							 "Next Look Ahead");
+				}
+				/* just one normal lookahead */
+				if (num_look_aheads != NULL)
+					*num_look_aheads = 1;
+			}
+			break;
+		case HTC_RECORD_LOOKAHEAD_BUNDLE:
+			AR_DEBUG_ASSERT(record->Length >=
+					sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT));
+			if (record->Length >=
+			    sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT)
+			    && (next_look_aheads != NULL)) {
+				HTC_BUNDLED_LOOKAHEAD_REPORT
+				*pBundledLookAheadRpt;
+				int i;
+
+				pBundledLookAheadRpt =
+				(HTC_BUNDLED_LOOKAHEAD_REPORT *) record_buf;
+
+				if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+					debug_dump_bytes(record_buf,
+							 record->Length,
+							 "Bundle look_ahead");
+				}
+
+				if ((record->Length /
+				     (sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT)))
+				    > HTC_MAX_MSG_PER_BUNDLE) {
+					/* this should never happen, the target
+					 * restricts the number of messages per
+					 * bundle configured by the host */
+					A_ASSERT(false);
+					status = QDF_STATUS_E_PROTO;
+					break;
+				}
+				for (i = 0;
+				     i <
+				     (int)(record->Length /
+					   (sizeof
+					    (HTC_BUNDLED_LOOKAHEAD_REPORT)));
+				     i++) {
+					((uint8_t *)(&next_look_aheads[i]))[0] =
+					   pBundledLookAheadRpt->LookAhead0;
+					((uint8_t *)(&next_look_aheads[i]))[1] =
+					   pBundledLookAheadRpt->LookAhead1;
+					((uint8_t *)(&next_look_aheads[i]))[2] =
+					   pBundledLookAheadRpt->LookAhead2;
+					((uint8_t *)(&next_look_aheads[i]))[3] =
+					   pBundledLookAheadRpt->LookAhead3;
+					pBundledLookAheadRpt++;
+				}
+
+				*num_look_aheads = i;
+			}
+			break;
+		default:
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				(" HIF unhandled record: id:%d length:%d\n",
+				 record->RecordID, record->Length));
+			break;
+		}
+
+		if (QDF_IS_STATUS_ERROR(status))
+			break;
+
+		/* advance buffer past this record for next time around */
+		buffer += record->Length;
+		length -= record->Length;
+	}
+
+	if (QDF_IS_STATUS_ERROR(status))
+		debug_dump_bytes(orig_buffer, orig_length,
+				  "BAD Recv Trailer");
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-htc_process_trailer\n"));
+	return status;
+
+}
+
+/* process a received message (i.e. strip off header,
+ * process any trailer data).
+ * note : locks must be released when this function is called */
+static QDF_STATUS hif_dev_process_recv_header(struct hif_sdio_device *pdev,
+				    HTC_PACKET *packet,
+				    uint32_t *next_look_aheads,
+				    int *num_look_aheads)
+{
+	uint8_t temp;
+	uint8_t *buf;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	uint16_t payloadLen;
+	uint32_t look_ahead, actual_length;
+
+	buf = packet->pBuffer;
+	actual_length = packet->ActualLength;
+
+	if (num_look_aheads != NULL)
+		*num_look_aheads = 0;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCProcessRecvHeader\n"));
+
+	if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV))
+		AR_DEBUG_PRINTBUF(buf, packet->ActualLength, "HTC Recv PKT");
+
+	do {
+		/* note, we cannot assume the alignment of pBuffer,
+		 * so we use the safe macros to
+		 * retrieve 16 bit fields */
+		payloadLen = HTC_GET_FIELD(buf, HTC_FRAME_HDR,
+					PAYLOADLEN);
+
+		((uint8_t *) &look_ahead)[0] = buf[0];
+		((uint8_t *) &look_ahead)[1] = buf[1];
+		((uint8_t *) &look_ahead)[2] = buf[2];
+		((uint8_t *) &look_ahead)[3] = buf[3];
+
+		if (packet->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_REFRESH_HDR) {
+			/* refresh expected hdr, since this was unknown
+			 * at the time we grabbed the packets
+			 * as part of a bundle */
+			packet->PktInfo.AsRx.ExpectedHdr = look_ahead;
+			/* refresh actual length since we now have the
+			 * real header */
+			packet->ActualLength = payloadLen + HTC_HDR_LENGTH;
+
+			/* validate the actual header that was refreshed  */
+			if (packet->ActualLength > packet->BufferLength) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("Invalid  HDR payload length (%d)"
+					 " in bundled RECV (hdr: 0x%X)\n",
+						 payloadLen, look_ahead));
+				/* limit this to max buffer just to print out
+				 * some of the buffer */
+				packet->ActualLength =
+					min(packet->ActualLength,
+					    packet->BufferLength);
+				status = QDF_STATUS_E_PROTO;
+				break;
+			}
+
+			if (packet->Endpoint
+			    != HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID)) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("Refreshed HDR endpoint (%d) does not "
+					 " match expected endpoint (%d)\n",
+					 HTC_GET_FIELD(buf,
+							       HTC_FRAME_HDR,
+							       ENDPOINTID),
+						 packet->Endpoint));
+				status = QDF_STATUS_E_PROTO;
+				break;
+			}
+		}
+
+		if (look_ahead != packet->PktInfo.AsRx.ExpectedHdr) {
+			/* somehow the lookahead that gave us the full read
+			 * length did not reflect the actual header
+			 * in the pending message */
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+			   ("hif_dev_process_recv_header, lookahead mismatch!"
+			    " (pPkt:0x%lX flags:0x%X), 0x%08X != 0x%08X\n",
+				 (unsigned long)packet,
+				 packet->PktInfo.AsRx.HTCRxFlags,
+				 look_ahead,
+				 packet->PktInfo.AsRx.ExpectedHdr));
+#ifdef ATH_DEBUG_MODULE
+			debug_dump_bytes((uint8_t *) &packet->PktInfo.AsRx.
+				 ExpectedHdr, 4,
+				 "Expected Message look_ahead");
+			debug_dump_bytes(buf, sizeof(HTC_FRAME_HDR),
+				 "Current Frame Header");
+#ifdef HTC_CAPTURE_LAST_FRAME
+			debug_dump_bytes((uint8_t *) &target->LastFrameHdr,
+				 sizeof(HTC_FRAME_HDR),
+				 "Last Frame Header");
+			if (target->LastTrailerLength != 0)
+				debug_dump_bytes(target->LastTrailer,
+					 target->LastTrailerLength,
+					 "Last trailer");
+#endif
+#endif
+			status = QDF_STATUS_E_PROTO;
+			break;
+		}
+
+		/* get flags */
+		temp = HTC_GET_FIELD(buf, HTC_FRAME_HDR, FLAGS);
+
+		if (temp & HTC_FLAGS_RECV_TRAILER) {
+			/* this packet has a trailer */
+
+			/* extract the trailer length in control byte 0 */
+			temp =
+				HTC_GET_FIELD(buf, HTC_FRAME_HDR,
+					CONTROLBYTES0);
+
+			if ((temp < sizeof(HTC_RECORD_HDR))
+			    || (temp > payloadLen)) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("hif_dev_process_recv_header, invalid header"
+				 "(payloadlength should be :%d, CB[0] is:%d)\n",
+					 payloadLen, temp));
+				status = QDF_STATUS_E_PROTO;
+				break;
+			}
+
+			if (packet->PktInfo.AsRx.
+			    HTCRxFlags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
+				/* this packet was fetched as part of an HTC
+				 * bundle as the lookahead is not valid.
+				 * Next packet may have already been fetched as
+				 * part of the bundle */
+				next_look_aheads = NULL;
+				num_look_aheads = NULL;
+			}
+
+			/* process trailer data that follows HDR and
+			 * application payload */
+			status = hif_dev_process_trailer(pdev,
+						 (buf + HTC_HDR_LENGTH +
+						  payloadLen - temp), temp,
+						 next_look_aheads,
+						 num_look_aheads,
+						 packet->Endpoint);
+
+			if (QDF_IS_STATUS_ERROR(status))
+				break;
+		}
+	} while (false);
+
+	if (QDF_IS_STATUS_ERROR(status)) {
+		/* dump the whole packet */
+		debug_dump_bytes(buf, packet->ActualLength,
+			 "BAD HTC Recv PKT");
+	} else {
+		if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) {
+			if (packet->ActualLength > 0) {
+				AR_DEBUG_PRINTBUF(packet->pBuffer,
+						  packet->ActualLength,
+						  "HTC - Application Msg");
+			}
+		}
+	}
+	AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+			("-hif_dev_process_recv_header\n"));
+	return status;
+}
+
+static QDF_STATUS hif_dev_issue_recv_packet_bundle(struct hif_sdio_device *pdev,
+					 HTC_PACKET_QUEUE *recv_pkt_queue,
+					 HTC_PACKET_QUEUE *
+					 sync_completion_queue,
+					 uint8_t mail_box_index,
+					 int *num_packets_fetched,
+					 bool partial_bundle)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	int i, total_length = 0;
+	unsigned char *bundle_buffer = NULL;
+	HTC_PACKET *packet, *packet_rx_bundle;
+	HTC_TARGET *target = NULL;
+	uint32_t padded_length;
+
+	int bundleSpaceRemaining = 0;
+	target = (HTC_TARGET *) pdev->pTarget;
+
+	if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) - HTC_MAX_MSG_PER_BUNDLE) >
+	    0) {
+		partial_bundle = true;
+		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+				("%s, partial bundle detected num: %d, %d\n",
+				 __func__,
+				 HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue),
+				 HTC_MAX_MSG_PER_BUNDLE));
+	}
+
+	bundleSpaceRemaining =
+		HTC_MAX_MSG_PER_BUNDLE * target->TargetCreditSize;
+	packet_rx_bundle = allocate_htc_bundle_packet(target);
+	bundle_buffer = packet_rx_bundle->pBuffer;
+
+	for (i = 0;
+	     !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE;
+	     i++) {
+		packet = htc_packet_dequeue(recv_pkt_queue);
+		A_ASSERT(packet != NULL);
+		padded_length =
+			DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength);
+
+		if ((bundleSpaceRemaining - padded_length) < 0) {
+			/* exceeds what we can transfer, put the packet back */
+			HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet);
+			break;
+		}
+		bundleSpaceRemaining -= padded_length;
+
+		if (partial_bundle ||
+			HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) {
+			packet->PktInfo.AsRx.HTCRxFlags |=
+				HTC_RX_PKT_IGNORE_LOOKAHEAD;
+		}
+		packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
+
+		HTC_PACKET_ENQUEUE(sync_completion_queue, packet);
+
+		total_length += padded_length;
+	}
+#ifdef DEBUG_BUNDLE
+	qdf_print("Recv bundle count %d, length %d.\n",
+		  HTC_PACKET_QUEUE_DEPTH(sync_completion_queue), total_length);
+#endif
+
+	status = hif_read_write(pdev->HIFDevice,
+				pdev->MailBoxInfo.
+				mbox_addresses[(int)mail_box_index],
+				bundle_buffer, total_length,
+				HIF_RD_SYNC_BLOCK_FIX, NULL);
+
+	if (status != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s, hif_send Failed status:%d\n",
+				 __func__, status));
+	} else {
+		unsigned char *buffer = bundle_buffer;
+		*num_packets_fetched = i;
+		HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(sync_completion_queue,
+						      packet) {
+			padded_length =
+				DEV_CALC_RECV_PADDED_LEN(pdev,
+							 packet->ActualLength);
+			A_MEMCPY(packet->pBuffer, buffer, padded_length);
+			buffer += padded_length;
+		} HTC_PACKET_QUEUE_ITERATE_END;
+	}
+	/* free bundle space under Sync mode */
+	free_htc_bundle_packet(target, packet_rx_bundle);
+	return status;
+}
+
+QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev,
+				      uint8_t mail_box_index,
+				      uint32_t msg_look_aheads[],
+				      int num_look_aheads,
+				      bool *async_proc,
+				      int *num_pkts_fetched)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	HTC_PACKET *packet;
+	bool asyncProc = false;
+	uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE];
+	int pkts_fetched;
+	HTC_PACKET_QUEUE recv_pkt_queue, sync_completed_pkts_queue;
+	bool partial_bundle;
+	HTC_ENDPOINT_ID id;
+	int total_fetched = 0;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
+			("+HTCRecvMessagePendingHandler NumLookAheads: %d\n",
+			 num_look_aheads));
+
+	if (num_pkts_fetched != NULL)
+		*num_pkts_fetched = 0;
+
+	if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) {
+		/* We use async mode to get the packets if the
+		 * device layer supports it. The device layer
+		 * interfaces with HIF in which HIF may have
+		 * restrictions on how interrupts are processed */
+		asyncProc = true;
+	}
+
+	if (async_proc != NULL)
+		/* indicate to caller how we decided to process this */
+		*async_proc = asyncProc;
+	if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE) {
+		A_ASSERT(false);
+		return QDF_STATUS_E_PROTO;
+	}
+	A_MEMCPY(look_aheads, msg_look_aheads,
+		(sizeof(uint32_t)) * num_look_aheads);
+	while (true) {
+
+		/* reset packets queues */
+		INIT_HTC_PACKET_QUEUE(&recv_pkt_queue);
+		INIT_HTC_PACKET_QUEUE(&sync_completed_pkts_queue);
+		if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE) {
+			status = QDF_STATUS_E_PROTO;
+			A_ASSERT(false);
+			break;
+		}
+
+		/* first lookahead sets the expected endpoint IDs for
+		 * all packets in a bundle */
+		id = ((HTC_FRAME_HDR *) &look_aheads[0])->EndpointID;
+
+		if (id >= ENDPOINT_MAX) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("MsgPend, Invalid Endpoint in lookahead: %d\n",
+				 id));
+			status = QDF_STATUS_E_PROTO;
+			break;
+		}
+		/* try to allocate as many HTC RX packets indicated
+		 * by the lookaheads these packets are stored
+		 * in the recvPkt queue */
+		status = hif_dev_alloc_and_prepare_rx_packets(pdev,
+							      look_aheads,
+							      num_look_aheads,
+							      &recv_pkt_queue);
+		if (QDF_IS_STATUS_ERROR(status))
+			break;
+		total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_pkt_queue);
+
+		/* we've got packet buffers for all we can currently fetch,
+		 * this count is not valid anymore  */
+		num_look_aheads = 0;
+		partial_bundle = false;
+
+		/* now go fetch the list of HTC packets */
+		while (!HTC_QUEUE_EMPTY(&recv_pkt_queue)) {
+
+			pkts_fetched = 0;
+			if ((HTC_PACKET_QUEUE_DEPTH(&recv_pkt_queue) > 1)) {
+				/* there are enough packets to attempt a bundle
+				 * transfer and recv bundling is allowed  */
+				status = hif_dev_issue_recv_packet_bundle(pdev,
+						  &recv_pkt_queue,
+						  asyncProc ?
+						  NULL :
+						  &sync_completed_pkts_queue,
+						  mail_box_index,
+						  &pkts_fetched,
+						  partial_bundle);
+				if (QDF_IS_STATUS_ERROR(status))
+					break;
+
+				if (HTC_PACKET_QUEUE_DEPTH(&recv_pkt_queue) !=
+					0) {
+					/* we couldn't fetch all packets at one,
+					 * time this creates a broken
+					 * bundle  */
+					partial_bundle = true;
+				}
+			}
+
+			/* see if the previous operation fetched any
+			 * packets using bundling */
+			if (0 == pkts_fetched) {
+				/* dequeue one packet */
+				packet = htc_packet_dequeue(&recv_pkt_queue);
+				A_ASSERT(packet != NULL);
+				packet->Completion = NULL;
+
+				if (HTC_PACKET_QUEUE_DEPTH(&recv_pkt_queue) >
+				    0) {
+					/* lookaheads in all packets except the
+					 * last one in must be ignored */
+					packet->PktInfo.AsRx.HTCRxFlags |=
+						HTC_RX_PKT_IGNORE_LOOKAHEAD;
+				}
+
+				/* go fetch the packet */
+				status =
+					hif_dev_recv_packet(pdev, packet,
+						    packet->ActualLength,
+						    mail_box_index);
+				if (QDF_IS_STATUS_ERROR(status))
+					break;
+				/* sent synchronously, queue this packet for
+				 * synchronous completion */
+				HTC_PACKET_ENQUEUE(&sync_completed_pkts_queue,
+						   packet);
+			}
+		}
+
+		/* synchronous handling */
+		if (pdev->DSRCanYield) {
+			/* for the SYNC case, increment count that tracks
+			 * when the DSR should yield */
+			pdev->CurrentDSRRecvCount++;
+		}
+
+		/* in the sync case, all packet buffers are now filled,
+		 * we can process each packet, check lookahead , then repeat */
+
+		/* unload sync completion queue */
+		while (!HTC_QUEUE_EMPTY(&sync_completed_pkts_queue)) {
+			uint8_t pipeid;
+			qdf_nbuf_t netbuf;
+
+			packet = htc_packet_dequeue(&sync_completed_pkts_queue);
+			A_ASSERT(packet != NULL);
+
+			num_look_aheads = 0;
+			status =
+				hif_dev_process_recv_header(pdev, packet,
+							    look_aheads,
+							    &num_look_aheads);
+			if (QDF_IS_STATUS_ERROR(status))
+				break;
+
+			netbuf = (qdf_nbuf_t) packet->pNetBufContext;
+			/* set data length */
+			qdf_nbuf_put_tail(netbuf, packet->ActualLength);
+
+			if (pdev->hif_callbacks.rxCompletionHandler) {
+				pipeid =
+					hif_dev_map_mail_box_to_pipe(pdev,
+							mail_box_index,
+							true);
+				pdev->hif_callbacks.rxCompletionHandler(pdev->
+								hif_callbacks.
+								Context,
+								netbuf,
+								pipeid);
+			}
+		}
+		if (QDF_IS_STATUS_ERROR(status))
+			break;
+
+		if (num_look_aheads == 0) {
+			/* no more look aheads */
+			break;
+		}
+		/* check whether other OS contexts have queued any WMI
+		 * command/data for WLAN. This check is needed only if WLAN
+		 * Tx and Rx happens in same thread context */
+		/* A_CHECK_DRV_TX(); */
+	}
+	if (num_pkts_fetched != NULL)
+		*num_pkts_fetched = total_fetched;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n"));
+	return status;
+}
+
+/**
+ * hif_dev_service_cpu_interrupt() - service fatal interrupts
+ * synchronously
+ *
+ * @pDev: hif sdio device context
+ *
+ * Return: QDF_STATUS_SUCCESS for success
+ */
+static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev)
+{
+	QDF_STATUS status;
+	uint8_t cpu_int_status;
+	uint8_t reg_buffer[4];
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("CPU Interrupt\n"));
+	cpu_int_status = pdev->IrqProcRegisters.cpu_int_status
+			 & pdev->IrqEnableRegisters.cpu_int_status_enable;
+	A_ASSERT(cpu_int_status);
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+			("Valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
+			 cpu_int_status));
+
+	/* Clear the interrupt */
+	pdev->IrqProcRegisters.cpu_int_status &= ~cpu_int_status;
+
+	/*set up the register transfer buffer to hit the register
+	 * 4 times , this is done to make the access 4-byte aligned
+	 * to mitigate issues with host bus interconnects that
+	 * restrict bus transfer lengths to be a multiple of 4-bytes
+	 * set W1C value to clear the interrupt, this hits the register
+	 * first */
+	reg_buffer[0] = cpu_int_status;
+	/* the remaining 4 values are set to zero which have no-effect  */
+	reg_buffer[1] = 0;
+	reg_buffer[2] = 0;
+	reg_buffer[3] = 0;
+
+	status = hif_read_write(pdev->HIFDevice,
+				CPU_INT_STATUS_ADDRESS,
+				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
+
+	A_ASSERT(status == QDF_STATUS_SUCCESS);
+
+	/* The Interrupt sent to the Host is generated via bit0
+	 * of CPU INT register */
+	if (cpu_int_status & 0x1) {
+		if (pdev && pdev->hif_callbacks.fwEventHandler)
+			/* It calls into HTC which propagates this
+			 * to ol_target_failure() */
+			pdev->hif_callbacks.fwEventHandler(pdev->hif_callbacks.
+						Context, QDF_STATUS_E_FAILURE);
+	} else
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: Unable to call fwEventHandler,"
+				" invalid input arguments\n",
+				 __func__));
+
+	return status;
+}
+
+/**
+ * hif_dev_service_error_interrupt() - service error interrupts
+ * synchronously
+ *
+ * @pDev: hif sdio device context
+ *
+ * Return: QDF_STATUS_SUCCESS for success
+ */
+static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev)
+{
+	QDF_STATUS status;
+	uint8_t error_int_status;
+	uint8_t reg_buffer[4];
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error Interrupt\n"));
+	error_int_status = pdev->IrqProcRegisters.error_int_status & 0x0F;
+	A_ASSERT(error_int_status);
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+			("Valid interrupt source in ERROR_INT_STATUS: 0x%x\n",
+			 error_int_status));
+
+	if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status)) {
+		/* Wakeup */
+		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error : Wakeup\n"));
+	}
+
+	if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status)) {
+		/* Rx Underflow */
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Rx Underflow\n"));
+	}
+
+	if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status)) {
+		/* Tx Overflow */
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Tx Overflow\n"));
+	}
+
+	/* Clear the interrupt */
+	pdev->IrqProcRegisters.error_int_status &= ~error_int_status;
+
+	/* set up the register transfer buffer to hit the register
+	 * 4 times , this is done to make the access 4-byte
+	 * aligned to mitigate issues with host bus interconnects that
+	 * restrict bus transfer lengths to be a multiple of 4-bytes */
+
+	/* set W1C value to clear the interrupt */
+	reg_buffer[0] = error_int_status;
+	/* the remaining 4 values are set to zero which have no-effect  */
+	reg_buffer[1] = 0;
+	reg_buffer[2] = 0;
+	reg_buffer[3] = 0;
+
+	status = hif_read_write(pdev->HIFDevice,
+				ERROR_INT_STATUS_ADDRESS,
+				reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
+
+	A_ASSERT(status == QDF_STATUS_SUCCESS);
+	return status;
+}
+
+/**
+ * hif_dev_service_debug_interrupt() - service debug interrupts
+ * synchronously
+ *
+ * @pDev: hif sdio device context
+ *
+ * Return: QDF_STATUS_SUCCESS for success
+ */
+static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev)
+{
+	uint32_t dummy;
+	QDF_STATUS status;
+
+	/* Send a target failure event to the application */
+	AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Target debug interrupt\n"));
+
+	/* clear the interrupt , the debug error interrupt is
+	 * counter 0 */
+	/* read counter to clear interrupt */
+	status = hif_read_write(pdev->HIFDevice,
+				COUNT_DEC_ADDRESS,
+				(uint8_t *) &dummy,
+				4, HIF_RD_SYNC_BYTE_INC, NULL);
+
+	A_ASSERT(status == QDF_STATUS_SUCCESS);
+	return status;
+}
+
+/**
+ * hif_dev_service_counter_interrupt() - service counter interrupts
+ * synchronously
+ *
+ * @pDev: hif sdio device context
+ *
+ * Return: QDF_STATUS_SUCCESS for success
+ */
+static
+QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev)
+{
+	uint8_t counter_int_status;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
+
+	counter_int_status = pdev->IrqProcRegisters.counter_int_status &
+			     pdev->IrqEnableRegisters.counter_int_status_enable;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+			("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n",
+			 counter_int_status));
+
+	/* Check if the debug interrupt is pending
+	 * NOTE: other modules like GMBOX may use the counter interrupt
+	 * for credit flow control on other counters, we only need to
+	 * check for the debug assertion counter interrupt */
+	if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK)
+		return hif_dev_service_debug_interrupt(pdev);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_dev_process_pending_irqs() - process pending interrupts
+ * synchronously
+ *
+ * @pDev: hif sdio device context
+ * @pDone: pending irq completion status
+ * @pASyncProcessing: sync/async processing flag
+ * Return: QDF_STATUS_SUCCESS for success
+ */
+static QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev,
+					      bool *done,
+					      bool *async_processing)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	uint8_t host_int_status = 0;
+	uint32_t look_ahead[MAILBOX_USED_COUNT];
+	int i;
+
+	qdf_mem_zero(&look_ahead, sizeof(look_ahead));
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+			("+ProcessPendingIRQs: (dev: 0x%lX)\n",
+			 (unsigned long)pdev));
+
+	/* NOTE: the HIF implementation guarantees that the context
+	 * of this call allows us to perform SYNCHRONOUS I/O,
+	 * that is we can block, sleep or call any API that
+	 * can block or switch thread/task ontexts.
+	 * This is a fully schedulable context. */
+	do {
+
+		if (pdev->IrqEnableRegisters.int_status_enable == 0) {
+			/* interrupt enables have been cleared, do not try
+			 * to process any pending interrupts that
+			 * may result in more bus transactions.
+			 * The target may be unresponsive at this point. */
+			break;
+		}
+		status = hif_read_write(pdev->HIFDevice,
+					HOST_INT_STATUS_ADDRESS,
+					(uint8_t *) &pdev->IrqProcRegisters,
+					sizeof(pdev->IrqProcRegisters),
+					HIF_RD_SYNC_BYTE_INC, NULL);
+
+		if (QDF_IS_STATUS_ERROR(status))
+			break;
+
+		if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
+			hif_dev_dump_registers(pdev,
+					       &pdev->IrqProcRegisters,
+					       &pdev->IrqEnableRegisters,
+					       &pdev->MailBoxCounterRegisters);
+		}
+
+		/* Update only those registers that are enabled */
+		host_int_status = pdev->IrqProcRegisters.host_int_status
+				  & pdev->IrqEnableRegisters.int_status_enable;
+
+		/* only look at mailbox status if the HIF layer did not
+		 * provide this function, on some HIF interfaces reading
+		 * the RX lookahead is not valid to do */
+		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
+			look_ahead[i] = 0;
+			if (host_int_status & (1 << i)) {
+				/* mask out pending mailbox value, we use
+				 * "lookAhead" as the real flag for
+				 * mailbox processing below */
+				host_int_status &= ~(1 << i);
+				if (pdev->IrqProcRegisters.
+				    rx_lookahead_valid & (1 << i)) {
+					/* mailbox has a message and the
+					 * look ahead is valid */
+					look_ahead[i] =
+						pdev->
+						IrqProcRegisters.rx_lookahead[
+						MAILBOX_LOOKAHEAD_SIZE_IN_WORD *
+						i];
+				}
+			}
+		} /*end of for loop */
+	} while (false);
+
+	do {
+		bool bLookAheadValid = false;
+		/* did the interrupt status fetches succeed? */
+		if (QDF_IS_STATUS_ERROR(status))
+			break;
+
+		for (i = 0; i < MAILBOX_USED_COUNT; i++) {
+			if (look_ahead[i] != 0) {
+				bLookAheadValid = true;
+				break;
+			}
+		}
+
+		if ((0 == host_int_status) && !bLookAheadValid) {
+			/* nothing to process, the caller can use this
+			 * to break out of a loop */
+			*done = true;
+			break;
+		}
+
+		if (bLookAheadValid) {
+			for (i = 0; i < MAILBOX_USED_COUNT; i++) {
+				int fetched = 0;
+				if (look_ahead[i] == 0)
+					continue;
+				AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+					("Pending mailbox[%d] message, look_ahead: 0x%X\n",
+					 i, look_ahead[i]));
+				/* Mailbox Interrupt, the HTC layer may issue
+				 * async requests to empty the mailbox...
+				 * When emptying the recv mailbox we use the
+				 * async handler from the completion routine of
+				 * routine of the callers read request.
+				 * This can improve performance by reducing
+				 * the  context switching when we rapidly
+				 * pull packets */
+				status = hif_dev_recv_message_pending_handler(
+							pdev, i,
+							&look_ahead
+							[i], 1,
+							async_processing,
+							&fetched);
+				if (QDF_IS_STATUS_ERROR(status))
+					break;
+
+				if (!fetched) {
+					/* HTC could not pull any messages out
+					 * due to lack of resources force DSR
+					 * handle to ack the interrupt */
+					*async_processing = false;
+					pdev->RecheckIRQStatusCnt = 0;
+				}
+			}
+		}
+
+		/* now handle the rest of them */
+		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+			(" Valid interrupt source for OTHER interrupts: 0x%x\n",
+			 host_int_status));
+
+		if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
+			/* CPU Interrupt */
+			status = hif_dev_service_cpu_interrupt(pdev);
+			if (QDF_IS_STATUS_ERROR(status))
+				break;
+		}
+
+		if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
+			/* Error Interrupt */
+			status = hif_dev_service_error_interrupt(pdev);
+			if (QDF_IS_STATUS_ERROR(status))
+				break;
+		}
+
+		if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
+			/* Counter Interrupt */
+			status = hif_dev_service_counter_interrupt(pdev);
+			if (QDF_IS_STATUS_ERROR(status))
+				break;
+		}
+
+	} while (false);
+
+	/* an optimization to bypass reading the IRQ status registers
+	 * unecessarily which can re-wake the target, if upper layers
+	 * determine that we are in a low-throughput mode, we can
+	 * rely on taking another interrupt rather than re-checking
+	 * the status registers which can re-wake the target.
+	 *
+	 * NOTE : for host interfaces that use the special
+	 * GetPendingEventsFunc, this optimization cannot be used due to
+	 * possible side-effects.  For example, SPI requires the host
+	 * to drain all messages from the mailbox before exiting
+	  * the ISR routine. */
+	if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+			("Bypassing IRQ Status re-check, forcing done\n"));
+		*done = true;
+	}
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+			("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n",
+			 *done, *async_processing, status));
+
+	return status;
+}
+
+#define DEV_CHECK_RECV_YIELD(pdev) \
+	((pdev)->CurrentDSRRecvCount >= \
+	 (pdev)->HifIRQYieldParams.recv_packet_yield_count)
+
+/**
+ * hif_dev_dsr_handler() - Synchronous interrupt handler
+ *
+ * @context: hif send context
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+QDF_STATUS hif_dev_dsr_handler(void *context)
+{
+	struct hif_sdio_device *pdev = (struct hif_sdio_device *) context;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	bool done = false;
+	bool async_proc = false;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+			("+DevDsrHandler: (dev: 0x%lX)\n",
+			 (unsigned long)pdev));
+
+	/* reset the recv counter that tracks when we need
+	 * to yield from the DSR */
+	pdev->CurrentDSRRecvCount = 0;
+	/* reset counter used to flag a re-scan of IRQ
+	 * status registers on the target */
+	pdev->RecheckIRQStatusCnt = 0;
+
+	while (!done) {
+		status = hif_dev_process_pending_irqs(pdev, &done, &async_proc);
+		if (QDF_IS_STATUS_ERROR(status))
+			break;
+
+		if (HIF_DEVICE_IRQ_SYNC_ONLY == pdev->HifIRQProcessingMode) {
+			/* the HIF layer does not allow async IRQ processing,
+			 * override the asyncProc flag */
+			async_proc = false;
+			/* this will cause us to re-enter ProcessPendingIRQ()
+			 * and re-read interrupt status registers.
+			 * This has a nice side effect of blocking us until all
+			 * async read requests are completed. This behavior is
+			 * required as we  do not allow ASYNC processing
+			 * in interrupt handlers (like Windows CE) */
+
+			if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev))
+				/* ProcessPendingIRQs() pulled enough recv
+				 * messages to satisfy the yield count, stop
+				 * checking for more messages and return */
+				break;
+		}
+
+		if (async_proc) {
+			/* the function does some async I/O for performance,
+			 * we need to exit the ISR immediately, the check below
+			 * will prevent the interrupt from being
+			 * Ack'd while we handle it asynchronously */
+			break;
+		}
+
+	}
+
+	if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) {
+		/* Ack the interrupt only if :
+		 *  1. we did not get any errors in processing interrupts
+		 *  2. there are no outstanding async processing requests */
+		if (pdev->DSRCanYield) {
+			/* if the DSR can yield do not ACK the interrupt, there
+			 * could be more pending messages. The HIF layer
+			 * must ACK the interrupt on behalf of HTC */
+			AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+				(" Yield in effect (cur RX count: %d)\n",
+				 pdev->CurrentDSRRecvCount));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
+				(" Acking interrupt from DevDsrHandler\n"));
+			hif_ack_interrupt(pdev->HIFDevice);
+		}
+	}
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("-DevDsrHandler\n"));
+	return status;
+}

+ 197 - 0
hif/src/sdio/hif_sdio_send.c

@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_timer.h>
+#include <qdf_time.h>
+#include <qdf_lock.h>
+#include <qdf_mem.h>
+#include <qdf_util.h>
+#include <qdf_defer.h>
+#include <qdf_atomic.h>
+#include <qdf_nbuf.h>
+#include <athdefs.h>
+#include <qdf_net_types.h>
+#include <a_types.h>
+#include <athdefs.h>
+#include <a_osapi.h>
+#include <hif.h>
+#include <htc_services.h>
+#include <a_debug.h>
+#include "hif_sdio_internal.h"
+
+/*
+ * Data structure to record required sending context data
+ */
+struct hif_sendContext {
+	bool bNewAlloc;
+	struct hif_sdio_device *pDev;
+	qdf_nbuf_t netbuf;
+	unsigned int transferID;
+	unsigned int head_data_len;
+};
+
+/**
+ * hif_dev_rw_completion_handler() - Completion routine
+ * for ALL HIF layer async I/O
+ * @context: hif send context
+ * @status: completion routine sync/async context
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+QDF_STATUS hif_dev_rw_completion_handler(void *context, QDF_STATUS status)
+{
+	struct hif_sendContext *send_context =
+				(struct hif_sendContext *)context;
+	unsigned int transfer_id = send_context->transferID;
+	struct hif_sdio_device *pdev = send_context->pDev;
+	qdf_nbuf_t buf = send_context->netbuf;
+	/* Fix Me: Do we need toeplitz_hash_result for SDIO */
+	uint32_t toeplitz_hash_result = 0;
+
+	if (send_context->bNewAlloc)
+		qdf_mem_free((void *)send_context);
+	else
+		qdf_nbuf_pull_head(buf, send_context->head_data_len);
+	if (pdev->hif_callbacks.txCompletionHandler)
+		pdev->hif_callbacks.txCompletionHandler(pdev->hif_callbacks.
+					Context, buf,
+					transfer_id, toeplitz_hash_result);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_dev_send_buffer() - send buffer to sdio device
+ * @pDev: sdio function
+ * @transferID: transfer id
+ * @pipe: ul/dl pipe
+ * @nbytes: no of bytes to transfer
+ * @buf: pointer to buffer
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+QDF_STATUS hif_dev_send_buffer(struct hif_sdio_device *pdev,
+			       unsigned int transfer_id,
+			       uint8_t pipe, unsigned int nbytes,
+			       qdf_nbuf_t buf)
+{
+	QDF_STATUS status;
+	uint32_t padded_length;
+	int frag_count = 0, i, head_data_len;
+	struct hif_sendContext *send_context;
+	unsigned char *pData;
+	uint32_t request = HIF_WR_ASYNC_BLOCK_INC;
+	uint8_t mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe);
+
+	padded_length = DEV_CALC_SEND_PADDED_LEN(pdev, nbytes);
+	A_ASSERT(padded_length - nbytes < HIF_DUMMY_SPACE_MASK + 1);
+	/*
+	 * two most significant bytes to save dummy data count
+	 * data written into the dummy space will not put into
+	 * the final mbox FIFO.
+	 */
+	request |= ((padded_length - nbytes) << 16);
+
+	frag_count = qdf_nbuf_get_num_frags(buf);
+
+	if (frag_count > 1) {
+		/* header data length should be total sending length substract
+		 * internal data length of netbuf */
+		head_data_len = sizeof(struct hif_sendContext) +
+			(nbytes - qdf_nbuf_get_frag_len(buf, frag_count - 1));
+	} else {
+		/*
+		 * | hif_sendContext | netbuf->data
+		 */
+		head_data_len = sizeof(struct hif_sendContext);
+	}
+
+	/* Check whether head room is enough to save extra head data */
+	if ((head_data_len <= qdf_nbuf_headroom(buf)) &&
+	    (qdf_nbuf_tailroom(buf) >= (padded_length - nbytes))) {
+		send_context =
+			(struct hif_sendContext *)qdf_nbuf_push_head(buf,
+						     head_data_len);
+		send_context->bNewAlloc = false;
+	} else {
+		send_context =
+			(struct hif_sendContext *)
+			qdf_mem_malloc(sizeof(struct hif_sendContext) +
+				       padded_length);
+		send_context->bNewAlloc = true;
+	}
+
+	send_context->netbuf = buf;
+	send_context->pDev = pdev;
+	send_context->transferID = transfer_id;
+	send_context->head_data_len = head_data_len;
+	/*
+	 * Copy data to head part of netbuf or head of allocated buffer.
+	 * if buffer is new allocated, the last buffer should be copied also.
+	 * It assume last fragment is internal buffer of netbuf
+	 * sometime total length of fragments larger than nbytes
+	 */
+	pData = (unsigned char *)send_context + sizeof(struct hif_sendContext);
+	for (i = 0; i < (send_context->bNewAlloc ? frag_count : frag_count - 1);
+	     i++) {
+		int frag_len = qdf_nbuf_get_frag_len(buf, i);
+		unsigned char *frag_addr = qdf_nbuf_get_frag_vaddr(buf, i);
+		if (frag_len > nbytes)
+			frag_len = nbytes;
+		memcpy(pData, frag_addr, frag_len);
+		pData += frag_len;
+		nbytes -= frag_len;
+		if (nbytes <= 0)
+			break;
+	}
+
+	/* Reset pData pointer and send_context out */
+	pData = (unsigned char *)send_context + sizeof(struct hif_sendContext);
+	status = hif_read_write(pdev->HIFDevice,
+				pdev->MailBoxInfo.mbox_prop[mbox_index].
+				extended_address, (char *)pData, padded_length,
+				request, (void *)send_context);
+
+	if (status == QDF_STATUS_E_PENDING)
+		/*
+		 * it will return QDF_STATUS_E_PENDING in native HIF
+		 * implementation, which should be treated as successful
+		 * result here.
+		 */
+		status = QDF_STATUS_SUCCESS;
+	/* release buffer or move back data pointer when failed */
+	if (status != QDF_STATUS_SUCCESS) {
+		if (send_context->bNewAlloc)
+			qdf_mem_free(send_context);
+		else
+			qdf_nbuf_pull_head(buf, head_data_len);
+	}
+
+	return status;
+}

+ 635 - 0
hif/src/sdio/if_sdio.c

@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
+
+#include <osdep.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/if_arp.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/wait.h>
+#include <qdf_mem.h>
+#include "bmi_msg.h"            /* TARGET_TYPE_ */
+#include "if_sdio.h"
+#include <qdf_trace.h>
+#include <cds_api.h>
+#include "regtable_sdio.h"
+#include <hif_debug.h>
+#ifndef REMOVE_PKT_LOG
+#include "ol_txrx_types.h"
+#include "pktlog_ac_api.h"
+#include "pktlog_ac.h"
+#endif
+#include "epping_main.h"
+
+#ifndef ATH_BUS_PM
+#ifdef CONFIG_PM
+#define ATH_BUS_PM
+#endif /* CONFIG_PM */
+#endif /* ATH_BUS_PM */
+
+#ifndef REMOVE_PKT_LOG
+struct ol_pl_os_dep_funcs *g_ol_pl_os_dep_funcs = NULL;
+#endif
+#define HIF_SDIO_LOAD_TIMEOUT 1000
+
+struct hif_sdio_softc *scn = NULL;
+struct hif_softc *ol_sc;
+static atomic_t hif_sdio_load_state;
+/* Wait queue for MC thread */
+wait_queue_head_t sync_wait_queue;
+
+/**
+ * hif_sdio_probe() - configure sdio device
+ * @context: sdio device context
+ * @hif_handle: pointer to hif handle
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+static A_STATUS hif_sdio_probe(void *context, void *hif_handle)
+{
+	int ret = 0;
+	struct HIF_DEVICE_OS_DEVICE_INFO os_dev_info;
+	struct sdio_func *func = NULL;
+	const struct sdio_device_id *id;
+	uint32_t target_type;
+	HIF_ENTER();
+
+	scn = (struct hif_sdio_softc *)qdf_mem_malloc(sizeof(*scn));
+	if (!scn) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+	qdf_mem_zero(scn, sizeof(*scn));
+
+	scn->hif_handle = hif_handle;
+	hif_configure_device(hif_handle, HIF_DEVICE_GET_OS_DEVICE,
+			     &os_dev_info,
+			     sizeof(os_dev_info));
+
+	scn->aps_osdev.device = os_dev_info.os_dev;
+	scn->aps_osdev.bc.bc_bustype = QDF_BUS_TYPE_SDIO;
+	spin_lock_init(&scn->target_lock);
+	ol_sc = qdf_mem_malloc(sizeof(*ol_sc));
+	if (!ol_sc) {
+		ret = -ENOMEM;
+		goto err_attach;
+	}
+	OS_MEMZERO(ol_sc, sizeof(*ol_sc));
+
+	{
+		/*
+		 * Attach Target register table. This is needed early on
+		 * even before BMI since PCI and HIF initialization
+		 * directly access Target registers.
+		 *
+		 * TBDXXX: targetdef should not be global -- should be stored
+		 * in per-device struct so that we can support multiple
+		 * different Target types with a single Host driver.
+		 * The whole notion of an "hif type" -- (not as in the hif
+		 * module, but generic "Host Interface Type") is bizarre.
+		 * At first, one one expect it to be things like SDIO, USB, PCI.
+		 * But instead, it's an actual platform type. Inexplicably, the
+		 * values used for HIF platform types are *different* from the
+		 * values used for Target Types.
+		 */
+
+#if defined(CONFIG_AR9888_SUPPORT)
+		hif_register_tbl_attach(ol_sc, HIF_TYPE_AR9888);
+		target_register_tbl_attach(ol_sc, TARGET_TYPE_AR9888);
+		target_type = TARGET_TYPE_AR9888;
+#elif defined(CONFIG_AR6320_SUPPORT)
+		id = ((struct hif_sdio_dev *) hif_handle)->id;
+		if ((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) ==
+				MANUFACTURER_ID_QCA9377_BASE) {
+			hif_register_tbl_attach(ol_sc, HIF_TYPE_AR6320V2);
+			target_register_tbl_attach(ol_sc, TARGET_TYPE_AR6320V2);
+		} else if ((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) ==
+				MANUFACTURER_ID_AR6320_BASE) {
+			int ar6kid = id->device & MANUFACTURER_ID_AR6K_REV_MASK;
+			if (ar6kid >= 1) {
+				/* v2 or higher silicon */
+				hif_register_tbl_attach(ol_sc,
+					HIF_TYPE_AR6320V2);
+				target_register_tbl_attach(ol_sc,
+					  TARGET_TYPE_AR6320V2);
+			} else {
+				/* legacy v1 silicon */
+				hif_register_tbl_attach(ol_sc,
+					HIF_TYPE_AR6320);
+				target_register_tbl_attach(ol_sc,
+					  TARGET_TYPE_AR6320);
+			}
+		}
+		target_type = TARGET_TYPE_AR6320;
+
+#endif
+	}
+	func = ((struct hif_sdio_dev *) hif_handle)->func;
+	scn->targetdef =  ol_sc->targetdef;
+	scn->hostdef =  ol_sc->hostdef;
+	scn->aps_osdev.bdev = func;
+	ol_sc->bus_type = scn->aps_osdev.bc.bc_bustype;
+	scn->ol_sc = *ol_sc;
+	ol_sc->target_info.target_type = target_type;
+
+#ifndef TARGET_DUMP_FOR_NON_QC_PLATFORM
+	scn->ramdump_base = ioremap(RAMDUMP_ADDR, RAMDUMP_SIZE);
+	scn->ramdump_size = RAMDUMP_SIZE;
+	if (scn->ramdump_base == NULL) {
+		scn->ramdump_base = 0;
+		scn->ramdump_size = 0;
+	}
+#endif
+
+	if (athdiag_procfs_init(scn) != 0) {
+		QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
+			  "%s athdiag_procfs_init failed", __func__);
+		ret = QDF_STATUS_E_FAILURE;
+		goto err_attach1;
+	}
+
+	atomic_set(&hif_sdio_load_state, true);
+	wake_up_interruptible(&sync_wait_queue);
+
+	return 0;
+
+err_attach1:
+	qdf_mem_free(ol_sc);
+err_attach:
+	qdf_mem_free(scn);
+	scn = NULL;
+err_alloc:
+	return ret;
+}
+
+/**
+ * ol_ath_sdio_configure() - configure sdio device
+ * @hif_sc: pointer to sdio softc structure
+ * @dev: pointer to net device
+ * @hif_handle: pointer to sdio function
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+int
+ol_ath_sdio_configure(void *hif_sc, struct net_device *dev,
+		      hif_handle_t *hif_hdl)
+{
+	struct hif_sdio_softc *sc = (struct hif_sdio_softc *)hif_sc;
+	int ret = 0;
+
+	sc->aps_osdev.netdev = dev;
+	*hif_hdl = sc->hif_handle;
+
+	return ret;
+}
+
+/**
+ * hif_sdio_remove() - remove sdio device
+ * @conext: sdio device context
+ * @hif_handle: pointer to sdio function
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+static A_STATUS hif_sdio_remove(void *context, void *hif_handle)
+{
+	HIF_ENTER();
+
+	if (!scn) {
+		QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
+			  "Global SDIO context is NULL");
+		return A_ERROR;
+	}
+
+	atomic_set(&hif_sdio_load_state, false);
+	athdiag_procfs_remove();
+
+#ifndef TARGET_DUMP_FOR_NON_QC_PLATFORM
+	iounmap(scn->ramdump_base);
+#endif
+
+	if (scn) {
+		qdf_mem_free(scn);
+		scn = NULL;
+	}
+
+	HIF_EXIT();
+
+	return 0;
+}
+
+/**
+ * hif_sdio_suspend() - sdio suspend routine
+ * @context: sdio device context
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+static A_STATUS hif_sdio_suspend(void *context)
+{
+	return 0;
+}
+
+/**
+ * hif_sdio_resume() - sdio resume routine
+ * @context: sdio device context
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+static A_STATUS hif_sdio_resume(void *context)
+{
+	return 0;
+}
+
+/**
+ * hif_sdio_power_change() - change power state of sdio bus
+ * @conext: sdio device context
+ * @config: power state configurartion
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+static A_STATUS hif_sdio_power_change(void *context, uint32_t config)
+{
+	return 0;
+}
+
+/*
+ * Module glue.
+ */
+#include <linux/version.h>
+static char *version = "HIF (Atheros/multi-bss)";
+static char *dev_info = "ath_hif_sdio";
+
+/**
+ * init_ath_hif_sdio() - initialize hif sdio callbacks
+ * @param: none
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+static int init_ath_hif_sdio(void)
+{
+	static int probed;
+	QDF_STATUS status;
+	struct osdrv_callbacks osdrv_callbacks;
+	HIF_ENTER();
+
+	qdf_mem_zero(&osdrv_callbacks, sizeof(osdrv_callbacks));
+	osdrv_callbacks.device_inserted_handler = hif_sdio_probe;
+	osdrv_callbacks.device_removed_handler = hif_sdio_remove;
+	osdrv_callbacks.device_suspend_handler = hif_sdio_suspend;
+	osdrv_callbacks.device_resume_handler = hif_sdio_resume;
+	osdrv_callbacks.device_power_change_handler = hif_sdio_power_change;
+
+	if (probed)
+		return -ENODEV;
+	probed++;
+
+	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, "%s %d", __func__,
+		  __LINE__);
+	status = hif_init(&osdrv_callbacks);
+	if (status != QDF_STATUS_SUCCESS) {
+		QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
+			  "%s hif_init failed!", __func__);
+		return -ENODEV;
+	}
+	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
+		 "%s: %s\n", dev_info, version);
+
+	return 0;
+}
+
+/**
+ * hif_targ_is_awake(): check if target is awake
+ *
+ * This function returns true if the target is awake
+ *
+ * @scn: struct hif_softc
+ * @mem: mapped mem base
+ *
+ * Return: bool
+ */
+bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
+{
+	return true;
+}
+
+/**
+ * hif_sdio_bus_suspend() - suspend the bus
+ *
+ * This function suspends the bus, but sdio doesn't need to suspend.
+ * Therefore do nothing.
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+int hif_sdio_bus_suspend(struct hif_softc *hif_ctx)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	struct device *dev = &hif_device->func->dev;
+
+	hif_device_suspend(dev);
+	return 0;
+}
+
+
+/**
+ * hif_sdio_bus_resume() - hif resume API
+ *
+ * This function resumes the bus. but sdio doesn't need to resume.
+ * Therefore do nothing.
+ *
+ * Return: 0 for success and non-zero for failure
+ */
+int hif_sdio_bus_resume(struct hif_softc *hif_ctx)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	struct device *dev = &hif_device->func->dev;
+
+	hif_device_resume(dev);
+	return 0;
+}
+
+/**
+ * hif_enable_power_gating() - enable HW power gating
+ *
+ * Return: n/a
+ */
+void hif_enable_power_gating(void *hif_ctx)
+{
+}
+
+/**
+ * hif_disable_aspm() - hif_disable_aspm
+ *
+ * Return: n/a
+ */
+void hif_disable_aspm(void)
+{
+}
+
+/**
+ * hif_sdio_close() - hif_bus_close
+ *
+ * Return: None
+ */
+void hif_sdio_close(struct hif_softc *hif_sc)
+{
+}
+
+/**
+ * hif_sdio_open() - hif_bus_open
+ * @hif_sc: hif context
+ * @bus_type: bus type
+ *
+ * Return: QDF status
+ */
+QDF_STATUS hif_sdio_open(struct hif_softc *hif_sc,
+				   enum qdf_bus_type bus_type)
+{
+	QDF_STATUS status;
+
+	hif_sc->bus_type = bus_type;
+	status = init_ath_hif_sdio();
+
+	return status;
+}
+
+/**
+ * hif_get_target_type() - Get the target type
+ *
+ * This function is used to query the target type.
+ *
+ * @ol_sc: ol_softc struct pointer
+ * @dev: device pointer
+ * @bdev: bus dev pointer
+ * @bid: bus id pointer
+ * @hif_type: HIF type such as HIF_TYPE_QCA6180
+ * @target_type: target type such as TARGET_TYPE_QCA6180
+ *
+ * Return: 0 for success
+ */
+int hif_get_target_type(struct hif_softc *ol_sc, struct device *dev,
+	void *bdev, const hif_bus_id *bid, uint32_t *hif_type,
+	uint32_t *target_type)
+{
+
+	return 0;
+}
+
+void hif_get_target_revision(struct hif_softc *ol_sc)
+{
+	struct hif_softc *ol_sc_local = (struct hif_softc *)ol_sc;
+	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc_local);
+	uint32_t chip_id = 0;
+	QDF_STATUS rv;
+
+	rv = hif_diag_read_access(hif_hdl,
+			(CHIP_ID_ADDRESS | RTC_SOC_BASE_ADDRESS), &chip_id);
+	if (rv != QDF_STATUS_SUCCESS) {
+		HIF_ERROR("%s[%d]: get chip id fail\n", __func__, __LINE__);
+	} else {
+		ol_sc_local->target_info.target_revision =
+			CHIP_ID_REVISION_GET(chip_id);
+	}
+}
+
+/**
+ * hif_sdio_enable_bus() - hif_enable_bus
+ * @hif_sc: hif context
+ * @dev: dev
+ * @bdev: bus dev
+ * @bid: bus id
+ * @type: bus type
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS hif_sdio_enable_bus(struct hif_softc *hif_sc,
+			struct device *dev, void *bdev, const hif_bus_id *bid,
+			enum hif_enable_type type)
+{
+	int ret = 0;
+	const struct sdio_device_id *id = (const struct sdio_device_id *)bid;
+	struct hif_sdio_softc *sc = HIF_GET_SDIO_SOFTC(hif_sc);
+
+	init_waitqueue_head(&sync_wait_queue);
+	if (hif_sdio_device_inserted(dev, id)) {
+			HIF_ERROR("wlan: %s hif_sdio_device_inserted"
+					  "failed", __func__);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	wait_event_interruptible_timeout(sync_wait_queue,
+			  atomic_read(&hif_sdio_load_state) == true,
+			  HIF_SDIO_LOAD_TIMEOUT);
+	hif_sc->hostdef = ol_sc->hostdef;
+	hif_sc->targetdef = ol_sc->targetdef;
+	hif_sc->bus_type = ol_sc->bus_type;
+	hif_sc->target_info.target_type = ol_sc->target_info.target_type;
+
+	sc->hif_handle = scn->hif_handle;
+	sc->aps_osdev.device = scn->aps_osdev.device;
+	sc->aps_osdev.bc.bc_bustype = scn->aps_osdev.bc.bc_bustype;
+	sc->target_lock = scn->target_lock;
+	sc->targetdef = scn->targetdef;
+	sc->hostdef = scn->hostdef;
+	sc->aps_osdev.bdev = scn->aps_osdev.bdev;
+	sc->ramdump_size = scn->ramdump_size;
+	sc->ramdump_base = scn->ramdump_base;
+
+	return ret;
+}
+
+
+/**
+ * hif_sdio_disable_bus() - sdio disable bus
+ * @hif_sc: hif softc pointer
+ *
+ * Return: none
+ */
+void hif_sdio_disable_bus(struct hif_softc *hif_sc)
+{
+	struct hif_sdio_softc *sc = HIF_GET_SDIO_SOFTC(hif_sc);
+	struct sdio_func *func = sc->aps_osdev.bdev;
+
+	hif_sdio_device_removed(func);
+}
+
+/**
+ * hif_sdio_get_config_item - sdio configure bus
+ * @hif_sc: hif context
+ * @opcode: configuration type
+ * @config: configuration value to set
+ * @config_len: configuration length
+ *
+ * Return: QDF_STATUS_SUCCESS for sucess
+ */
+QDF_STATUS hif_sdio_get_config_item(struct hif_softc *hif_sc,
+		     int opcode, void *config, uint32_t config_len)
+{
+	struct hif_sdio_softc *sc = HIF_GET_SDIO_SOFTC(hif_sc);
+	struct hif_sdio_dev *hif_device = sc->hif_handle;
+
+	return hif_configure_device(hif_device,
+				opcode, config, config_len);
+}
+
+/**
+ * hif_sdio_set_mailbox_swap - set mailbox swap
+ * @hif_sc: hif context
+ *
+ * Return: None
+ */
+void hif_sdio_set_mailbox_swap(struct hif_softc *hif_sc)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_sc);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	hif_device->swap_mailbox = true;
+	return;
+}
+
+/**
+ * hif_sdio_claim_device - set mailbox swap
+ * @hif_sc: hif context
+ *
+ * Return: None
+ */
+void hif_sdio_claim_device(struct hif_softc *hif_sc)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_sc);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+	hif_device->claimed_ctx = hif_sc;
+	return;
+}
+
+/**
+ * hif_sdio_mask_interrupt_call() - disbale hif device irq
+ * @scn: pointr to softc structure
+ *
+ * Return: None
+ */
+void hif_sdio_mask_interrupt_call(struct hif_softc *scn)
+{
+	struct hif_sdio_softc *hif_ctx = HIF_GET_SDIO_SOFTC(scn);
+	struct hif_sdio_dev *hif_device = hif_ctx->hif_handle;
+	hif_mask_interrupt(hif_device);
+	return;
+}
+
+/**
+ * hif_trigger_dump() - trigger various dump cmd
+ * @scn: struct hif_opaque_softc
+ * @cmd_id: dump command id
+ * @start: start/stop dump
+ *
+ * Return: None
+ */
+void hif_trigger_dump(struct hif_opaque_softc *scn, uint8_t cmd_id, bool start)
+{
+}
+
+/**
+ * hif_check_fw_reg() - hif_check_fw_reg
+ * @scn: scn
+ * @state:
+ *
+ * Return: int
+ */
+int hif_check_fw_reg(struct hif_opaque_softc *scn)
+{
+	return 0;
+}
+
+/**
+ * hif_wlan_disable() - call the platform driver to disable wlan
+ * @scn: scn
+ *
+ * Return: void
+ */
+void hif_wlan_disable(struct hif_softc *scn)
+{
+}
+
+/**
+ * hif_config_target() - configure hif bus
+ * @hif_hdl: hif handle
+ * @state:
+ *
+ * Return: int
+ */
+int hif_config_target(void *hif_hdl)
+{
+	return 0;
+}

+ 110 - 0
hif/src/sdio/if_sdio.h

@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef __IF_SDIO_H__
+#define __IF_SDIO_H__
+
+#include <linux/version.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+#include <osdep.h>
+#include <ol_if_athvar.h>
+#include <athdefs.h>
+#include "a_osapi.h"
+#include "hif_internal.h"
+
+
+#define AR6320_HEADERS_DEF
+
+#define ATH_DBG_DEFAULT   0
+
+#define RAMDUMP_ADDR     0x8F000000
+#define RAMDUMP_SIZE     0x700000
+
+struct hif_sdio_softc {
+	struct hif_softc ol_sc;
+	struct device *dev;
+	struct _NIC_DEV aps_osdev;
+	struct tasklet_struct intr_tq;  /* tasklet */
+
+	int irq;
+	/*
+	 * Guard changes to Target HW state and to software
+	 * structures that track hardware state.
+	 */
+	spinlock_t target_lock;
+	void *hif_handle;
+	void *ramdump_base;
+	unsigned long ramdump_address;
+	unsigned long ramdump_size;
+	struct targetdef_s *targetdef;
+	struct hostdef_s *hostdef;
+};
+
+#if defined(CONFIG_ATH_PROCFS_DIAG_SUPPORT)
+int athdiag_procfs_init(void *scn);
+void athdiag_procfs_remove(void);
+#else
+static inline int athdiag_procfs_init(void *scn)
+{
+	return 0;
+}
+
+static inline void athdiag_procfs_remove(void)
+{
+	return;
+}
+#endif
+
+#ifndef REMOVE_PKT_LOG
+extern int pktlogmod_init(void *context);
+extern void pktlogmod_exit(void *context);
+#endif
+
+#define DMA_MAPPING_ERROR(dev, addr) dma_mapping_error((dev), (addr))
+
+int ath_sdio_probe(void *context, void *hif_handle);
+void ath_sdio_remove(void *context, void *hif_handle);
+int ath_sdio_suspend(void *context);
+int ath_sdio_resume(void *context);
+
+/*These functions are exposed to HDD*/
+void hif_init_qdf_ctx(qdf_device_t qdf_dev, void *ol_sc);
+void hif_deinit_qdf_ctx(void *ol_sc);
+
+int hif_sdio_device_inserted(struct device *dev,
+		const struct sdio_device_id *id);
+void hif_sdio_stop(struct hif_softc *hif_ctx);
+void hif_sdio_shutdown(struct hif_softc *hif_ctx);
+void hif_sdio_device_removed(struct sdio_func *func);
+int hif_device_suspend(struct device *dev);
+int hif_device_resume(struct device *dev);
+void hif_register_tbl_attach(struct hif_softc *scn,
+						u32 hif_type);
+void target_register_tbl_attach(struct hif_softc *scn,
+						u32 target_type);
+#endif /* __IF_SDIO_H__ */

+ 422 - 0
hif/src/sdio/native_sdio/include/hif_internal.h

@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _HIF_INTERNAL_H_
+#define _HIF_INTERNAL_H_
+
+#include "athdefs.h"
+#include "a_types.h"
+#include "a_osapi.h"
+#include <qdf_types.h>          /* qdf_device_t, qdf_print */
+#include <qdf_time.h>           /* qdf_system_ticks, etc. */
+#include <qdf_status.h>
+#include <qdf_timer.h>
+#include <qdf_atomic.h>
+#include "hif.h"
+#include "hif_debug.h"
+#include "hif_sdio_common.h"
+#include <linux/scatterlist.h>
+#include "hif_main.h"
+
+#define HIF_LINUX_MMC_SCATTER_SUPPORT
+
+#define BUS_REQUEST_MAX_NUM                64
+
+#define SDIO_CLOCK_FREQUENCY_DEFAULT       25000000
+#define SDWLAN_ENABLE_DISABLE_TIMEOUT      20
+#define FLAGS_CARD_ENAB                    0x02
+#define FLAGS_CARD_IRQ_UNMSK               0x04
+
+#define HIF_MBOX_BLOCK_SIZE                HIF_DEFAULT_IO_BLOCK_SIZE
+#define HIF_MBOX0_BLOCK_SIZE               1
+#define HIF_MBOX1_BLOCK_SIZE               HIF_MBOX_BLOCK_SIZE
+#define HIF_MBOX2_BLOCK_SIZE               HIF_MBOX_BLOCK_SIZE
+#define HIF_MBOX3_BLOCK_SIZE               HIF_MBOX_BLOCK_SIZE
+
+/*
+ * direction - Direction of transfer (HIF_SDIO_READ/HIF_SDIO_WRITE).
+ */
+#define HIF_SDIO_READ			0x00000001
+#define HIF_SDIO_WRITE			0x00000002
+#define HIF_SDIO_DIR_MASK		(HIF_SDIO_READ | HIF_SDIO_WRITE)
+
+/*
+ * type - An interface may support different kind of rd/wr commands.
+ * For example: SDIO supports CMD52/CMD53s. In case of MSIO it
+ * translates to using different kinds of TPCs. The command type
+ * is thus divided into a basic and an extended command and can
+ * be specified using HIF_BASIC_IO/HIF_EXTENDED_IO.
+ */
+#define HIF_BASIC_IO			0x00000004
+#define HIF_EXTENDED_IO			0x00000008
+#define HIF_TYPE_MASK			(HIF_BASIC_IO | HIF_EXTENDED_IO)
+
+/*
+ * This indicates the whether the command is to be executed in a
+ * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
+ * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
+ * implemented using the asynchronous mode allowing the the bus
+ * driver to indicate the completion of operation through the
+ * registered callback routine. The requirement primarily comes
+ * from the contexts these operations get called from (a driver's
+ * transmit context or the ISR context in case of receive).
+ * Support for both of these modes is essential.
+ */
+#define HIF_SYNCHRONOUS		0x00000010
+#define HIF_ASYNCHRONOUS	0x00000020
+#define HIF_EMODE_MASK		(HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
+
+/*
+ * An interface may support different kinds of commands based on
+ * the tradeoff between the amount of data it can carry and the
+ * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
+ * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
+ * to the nearest block size by padding. The size of the block is
+ * configurable at compile time using the HIF_BLOCK_SIZE and is
+ * negotiated with the target during initialization after the
+ * AR6000 interrupts are enabled.
+ */
+#define HIF_BYTE_BASIS		0x00000040
+#define HIF_BLOCK_BASIS		0x00000080
+#define HIF_DMODE_MASK		(HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
+
+/*
+ * This indicates if the address has to be incremented on AR6000
+ * after every read/write operation (HIF?FIXED_ADDRESS/
+ * HIF_INCREMENTAL_ADDRESS).
+ */
+#define HIF_FIXED_ADDRESS			0x00000100
+#define HIF_INCREMENTAL_ADDRESS		0x00000200
+#define HIF_AMODE_MASK				(HIF_FIXED_ADDRESS | \
+							HIF_INCREMENTAL_ADDRESS)
+
+/*
+ * data written into the dummy space will not put into the final mbox FIFO
+ */
+#define HIF_DUMMY_SPACE_MASK			0xFFFF0000
+
+/*
+ * data written into the dummy space will not put into the final mbox FIFO
+ */
+#define HIF_DUMMY_SPACE_MASK			0xFFFF0000
+
+
+#define HIF_WR_ASYNC_BYTE_FIX   \
+		(HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_WR_ASYNC_BYTE_INC   \
+	(HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_ASYNC_BLOCK_INC  \
+	(HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_SYNC_BYTE_FIX    \
+	(HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_WR_SYNC_BYTE_INC    \
+	(HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_SYNC_BLOCK_INC  \
+	(HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_WR_ASYNC_BLOCK_FIX \
+	(HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_WR_SYNC_BLOCK_FIX  \
+	(HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_RD_SYNC_BYTE_INC    \
+	(HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BYTE_FIX    \
+	(HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BYTE_FIX   \
+	(HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BLOCK_FIX  \
+	(HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+#define HIF_RD_ASYNC_BYTE_INC   \
+	(HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_ASYNC_BLOCK_INC  \
+	(HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BLOCK_INC  \
+	(HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+#define HIF_RD_SYNC_BLOCK_FIX  \
+	(HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \
+				HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+
+enum hif_sdio_device_state {
+		HIF_DEVICE_STATE_ON,
+		HIF_DEVICE_STATE_DEEPSLEEP,
+		HIF_DEVICE_STATE_CUTPOWER,
+		HIF_DEVICE_STATE_WOW
+};
+
+struct bus_request {
+	struct bus_request *next;       /* link list of available requests */
+	struct bus_request *inusenext;  /* link list of in use requests */
+	struct semaphore sem_req;
+	uint32_t address;       /* request data */
+	char *buffer;
+	uint32_t length;
+	uint32_t request;
+	void *context;
+	QDF_STATUS status;
+	struct HIF_SCATTER_REQ_PRIV *scatter_req;
+};
+
+struct hif_sdio_dev {
+	struct sdio_func *func;
+	qdf_spinlock_t asynclock;
+	struct task_struct *async_task; /* task to handle async commands */
+	struct semaphore sem_async;     /* wake up for async task */
+	int async_shutdown;     /* stop the async task */
+	struct completion async_completion;     /* thread completion */
+	struct bus_request *asyncreq;  /* request for async tasklet */
+	struct bus_request *taskreq;   /*  async tasklet data */
+	qdf_spinlock_t lock;
+	struct bus_request *bus_request_free_queue;     /* free list */
+	struct bus_request bus_request[BUS_REQUEST_MAX_NUM]; /* bus requests */
+	void *claimed_ctx;
+	struct htc_callbacks htc_callbacks;
+	uint8_t *dma_buffer;
+	DL_LIST scatter_req_head; /* scatter request list head */
+	bool scatter_enabled; /* scatter enabled flag */
+	bool is_suspend;
+	bool is_disabled;
+	atomic_t irq_handling;
+	HIF_DEVICE_POWER_CHANGE_TYPE power_config;
+	enum hif_sdio_device_state device_state;
+	const struct sdio_device_id *id;
+	struct mmc_host *host;
+	void *htc_context;
+	bool swap_mailbox;
+};
+
+struct HIF_DEVICE_OS_DEVICE_INFO {
+	void *os_dev;
+};
+
+struct hif_mailbox_properties {
+	u_int32_t    extended_address;  /* extended address for larger writes */
+	u_int32_t    extended_size;
+};
+
+struct hif_device_irq_yield_params {
+	int recv_packet_yield_count; /* max number of packets to force DSR
+				   to return */
+};
+
+struct hif_device_mbox_info {
+	u_int32_t mbox_addresses[4]; /*first element for legacy HIFs and
+				  return the address and ARRAY of 32bit words */
+	struct hif_mailbox_properties mbox_prop[4];
+	u_int32_t gmbox_address;
+	u_int32_t gmbox_size;
+	u_int32_t flags;   /* flags to describe mbox behavior or usage */
+};
+
+enum hif_device_irq_mode {
+	HIF_DEVICE_IRQ_SYNC_ONLY,   /* DSR to process all
+				     * interrupts before returning */
+	HIF_DEVICE_IRQ_ASYNC_SYNC,  /* DSR to process interrupts */
+};
+
+struct osdrv_callbacks {
+	void *context;          /* context to pass for all callbacks
+				 * except device_removed_handler
+				 * the device_removed_handler is only
+				 * called if the device is claimed */
+	int (*device_inserted_handler)(void *context, void *hif_handle);
+	int (*device_removed_handler)(void *claimed_ctx,
+				    void *hif_handle);
+	int (*device_suspend_handler)(void *context);
+	int (*device_resume_handler)(void *context);
+	int (*device_wakeup_handler)(void *context);
+	int (*device_power_change_handler)(void *context,
+					HIF_DEVICE_POWER_CHANGE_TYPE
+					config);
+};
+
+/* other interrupts are pending, host
+ * needs to read the to monitor
+ */
+#define HIF_OTHER_EVENTS     (1 << 0)
+/* pending recv packet */
+#define HIF_RECV_MSG_AVAIL   (1 << 1)
+
+struct _HIF_PENDING_EVENTS_INFO {
+	uint32_t events;
+	uint32_t look_ahead;
+	uint32_t available_recv_bytes;
+};
+
+/* hif-sdio pending events handler type, some HIF modules
+ * use special mechanisms to detect packet available and other interrupts
+ */
+typedef int (*HIF_PENDING_EVENTS_FUNC)(struct hif_sdio_dev *device,
+					struct _HIF_PENDING_EVENTS_INFO *
+					events, void *async_context);
+
+#define HIF_MASK_RECV    true
+#define HIF_UNMASK_RECV  false
+/* hif-sdio Handler type to mask receive events */
+typedef int (*HIF_MASK_UNMASK_RECV_EVENT)(struct hif_sdio_dev *device,
+					  bool mask,
+					  void *async_context);
+
+QDF_STATUS hif_configure_device(struct hif_sdio_dev *device,
+			enum hif_device_config_opcode opcode,
+			void *config, uint32_t config_len);
+
+QDF_STATUS hif_init(struct osdrv_callbacks *callbacks);
+
+QDF_STATUS hif_attach_htc(struct hif_sdio_dev *device,
+			  HTC_CALLBACKS *callbacks);
+
+QDF_STATUS hif_read_write(struct hif_sdio_dev *device,
+			uint32_t address,
+			char *buffer,
+			uint32_t length, uint32_t request, void *context);
+
+void hif_ack_interrupt(struct hif_sdio_dev *device);
+
+void hif_mask_interrupt(struct hif_sdio_dev *device);
+
+void hif_un_mask_interrupt(struct hif_sdio_dev *device);
+
+QDF_STATUS hif_wait_for_pending_recv(struct hif_sdio_dev *device);
+
+struct _HIF_SCATTER_ITEM {
+	u_int8_t     *buffer; /* CPU accessible address of buffer */
+	int          length; /* length of transfer to/from this buffer */
+	void         *caller_contexts[2]; /* caller context */
+};
+
+struct _HIF_SCATTER_REQ;
+
+typedef void (*HIF_SCATTER_COMP_CB)(struct _HIF_SCATTER_REQ *);
+
+enum HIF_SCATTER_METHOD {
+	HIF_SCATTER_NONE = 0,
+	HIF_SCATTER_DMA_REAL, /* Real SG support no restrictions */
+	HIF_SCATTER_DMA_BOUNCE, /* Uses SG DMA */
+};
+
+struct _HIF_SCATTER_REQ {
+	DL_LIST             list_link; /* link management */
+	u_int32_t            address; /* address for the read/write operation */
+	u_int32_t            request; /* request flags */
+	u_int32_t            total_length; /* total length of entire transfer */
+	u_int32_t            caller_flags; /* caller specific flags */
+	HIF_SCATTER_COMP_CB  completion_routine; /* completion callback */
+	int                  completion_status; /* status of completion */
+	void                 *context; /* caller context for this request */
+	int                  valid_scatter_entries; /* no of valid entries */
+	/* scatter method handled by HIF */
+	enum HIF_SCATTER_METHOD   scatter_method;
+	void                 *hif_private[4]; /* HIF private area */
+	u_int8_t             *scatter_bounce_buffer; /* bounce buffers */
+	struct _HIF_SCATTER_ITEM    scatter_list[1]; /* start of scatter list */
+};
+
+typedef struct
+_HIF_SCATTER_REQ * (*HIF_ALLOCATE_SCATTER_REQUEST)(struct hif_sdio_dev *device);
+typedef void (*HIF_FREE_SCATTER_REQUEST)(struct hif_sdio_dev *device,
+				struct _HIF_SCATTER_REQ *request);
+typedef QDF_STATUS (*HIF_READWRITE_SCATTER)(struct hif_sdio_dev *device,
+					struct _HIF_SCATTER_REQ *request);
+
+struct HIF_DEVICE_SCATTER_SUPPORT_INFO {
+	/* information returned from HIF layer */
+	HIF_ALLOCATE_SCATTER_REQUEST    allocate_req_func;
+	HIF_FREE_SCATTER_REQUEST        free_req_func;
+	HIF_READWRITE_SCATTER           read_write_scatter_func;
+	int                             max_scatter_entries;
+	int                             max_tx_size_per_scatter_req;
+};
+
+void hif_get_target_revision(struct hif_softc *ol_sc);
+struct HIF_SCATTER_REQ_PRIV;
+
+#define HIF_DMA_BUFFER_SIZE (4 * 1024)
+#define CMD53_FIXED_ADDRESS 1
+#define CMD53_INCR_ADDRESS  2
+
+struct bus_request *hif_allocate_bus_request(struct hif_sdio_dev *device);
+void hif_free_bus_request(struct hif_sdio_dev *device,
+			  struct bus_request *busrequest);
+void add_to_async_list(struct hif_sdio_dev *device,
+		       struct bus_request *busrequest);
+void hif_dump_cccr(struct hif_sdio_dev *hif_device);
+
+#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT
+
+#define MAX_SCATTER_REQUESTS             4
+#define MAX_SCATTER_ENTRIES_PER_REQ      16
+#define MAX_SCATTER_REQ_TRANSFER_SIZE    (32*1024)
+
+struct HIF_SCATTER_REQ_PRIV {
+	struct _HIF_SCATTER_REQ *hif_scatter_req;
+	struct hif_sdio_dev *device;     /* this device */
+	struct bus_request *busrequest;
+	/* scatter list for linux */
+	struct scatterlist sgentries[MAX_SCATTER_ENTRIES_PER_REQ];
+};
+
+#define ATH_DEBUG_SCATTER  ATH_DEBUG_MAKE_MODULE_MASK(0)
+
+QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device,
+		   struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info);
+void cleanup_hif_scatter_resources(struct hif_sdio_dev *device);
+QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device,
+				   struct bus_request *busrequest);
+
+#else                           /* HIF_LINUX_MMC_SCATTER_SUPPORT */
+
+static inline QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device,
+				struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+
+static inline QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device,
+					 struct bus_request *busrequest)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+
+#define cleanup_hif_scatter_resources(d) { }
+
+#endif /* HIF_LINUX_MMC_SCATTER_SUPPORT */
+
+#endif /* _HIF_INTERNAL_H_ */

+ 2705 - 0
hif/src/sdio/native_sdio/src/hif.c

@@ -0,0 +1,2705 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <qdf_atomic.h>
+#include <cds_utils.h>
+#include <qdf_timer.h>
+#include <cds_api.h>
+#include <qdf_time.h>
+#include "hif_sdio_dev.h"
+#include "if_sdio.h"
+#include "regtable_sdio.h"
+#include "wma_api.h"
+#include "hif_internal.h"
+
+/* by default setup a bounce buffer for the data packets,
+ * if the underlying host controller driver
+ * does not use DMA you may be able to skip this step
+ * and save the memory allocation and transfer time */
+#define HIF_USE_DMA_BOUNCE_BUFFER 1
+#define ATH_MODULE_NAME hif
+#include "a_debug.h"
+
+#if HIF_USE_DMA_BOUNCE_BUFFER
+/* macro to check if DMA buffer is WORD-aligned and DMA-able.
+ * Most host controllers assume the
+ * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
+ * virt_addr_valid check fails on stack memory.
+ */
+#define BUFFER_NEEDS_BOUNCE(buffer)  (((unsigned long)(buffer) & 0x3) || \
+					!virt_addr_valid((buffer)))
+#else
+#define BUFFER_NEEDS_BOUNCE(buffer)   (false)
+#endif
+#define MAX_HIF_DEVICES 2
+#ifdef HIF_MBOX_SLEEP_WAR
+#define HIF_MIN_SLEEP_INACTIVITY_TIME_MS     50
+#define HIF_SLEEP_DISABLE_UPDATE_DELAY 1
+#define HIF_IS_WRITE_REQUEST_MBOX1_TO_3(request) \
+				((request->request & HIF_SDIO_WRITE) && \
+				(request->address >= 0x1000 && \
+				request->address < 0x1FFFF))
+#endif
+
+extern struct hif_sdio_softc *sc;
+
+unsigned int mmcbuswidth = 0;
+module_param(mmcbuswidth, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mmcbuswidth,
+		 "Set MMC driver Bus Width: 1-1Bit, 4-4Bit, 8-8Bit");
+
+unsigned int mmcclock = 0;
+module_param(mmcclock, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mmcclock, "Set MMC driver Clock value");
+
+unsigned int brokenirq = 0;
+module_param(brokenirq, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(brokenirq,
+		 "Set as 1 to use polling method instead of interrupt mode");
+
+unsigned int forcesleepmode = 0;
+module_param(forcesleepmode, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(forcesleepmode,
+		"Set sleep mode: 0-host capbility, "
+		"1-force WOW, 2-force DeepSleep, 3-force CutPower");
+
+#ifdef CONFIG_X86
+unsigned int asyncintdelay = 2;
+module_param(asyncintdelay, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(asyncintdelay,
+		 "Delay clock count for aysnc interrupt, "
+		 "2 is default, vaild values are 1 and 2");
+#else
+unsigned int asyncintdelay = 0;
+module_param(asyncintdelay, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(asyncintdelay,
+		 "Delay clock count for aysnc interrupt, "
+		 "0 is default, vaild values are 1 and 2");
+#endif
+
+unsigned int forcecard = 0;
+module_param(forcecard, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(forcecard,
+		 "Ignore card capabilities information to switch bus mode");
+
+unsigned int debugcccr = 1;
+module_param(debugcccr, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(debugcccr, "Output this cccr values");
+
+unsigned int writecccr1 = 0;
+module_param(writecccr1, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+unsigned int writecccr1value = 0;
+module_param(writecccr1value, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+
+unsigned int writecccr2 = 0;
+module_param(writecccr2, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+unsigned int writecccr2value = 0;
+module_param(writecccr2value, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+
+unsigned int writecccr3 = 0;
+module_param(writecccr3, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+unsigned int writecccr3value = 0;
+module_param(writecccr3value, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+
+unsigned int writecccr4 = 0;
+module_param(writecccr4, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+
+unsigned int writecccr4value = 0;
+module_param(writecccr4value, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+
+unsigned int modstrength = 0;
+module_param(modstrength, uint, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(modstrength, "Adjust internal driver strength");
+
+#define dev_to_sdio_func(d)		container_of(d, struct sdio_func, dev)
+#define to_sdio_driver(d)		container_of(d, struct sdio_driver, drv)
+static int hif_device_inserted(struct sdio_func *func,
+			       const struct sdio_device_id *id);
+static void hif_device_removed(struct sdio_func *func);
+static struct hif_sdio_dev *add_hif_device(struct sdio_func *func);
+static struct hif_sdio_dev *get_hif_device(struct sdio_func *func);
+static void del_hif_device(struct hif_sdio_dev *device);
+static int func0_cmd52_write_byte(struct mmc_card *card, unsigned int address,
+				  unsigned char byte);
+static int func0_cmd52_read_byte(struct mmc_card *card, unsigned int address,
+				 unsigned char *byte);
+
+int reset_sdio_on_unload = 0;
+module_param(reset_sdio_on_unload, int, 0644);
+
+uint32_t nohifscattersupport = 1;
+
+uint32_t forcedriverstrength = 1; /* force driver strength to type D */
+
+/* ------ Static Variables ------ */
+static const struct sdio_device_id ar6k_id_table[] = {
+#ifdef AR6002_HEADERS_DEF
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x0))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x1))},
+#endif
+#ifdef AR6003_HEADERS_DEF
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+#endif
+#ifdef AR6004_HEADERS_DEF
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
+#endif
+#ifdef AR6320_HEADERS_DEF
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x0))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x1))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x2))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x3))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x4))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x5))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x6))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x7))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x8))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x9))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xA))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xB))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xC))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xD))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xE))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xF))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x0))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x1))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x2))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x3))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x4))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x5))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x6))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x7))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x8))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x9))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xA))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xB))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xC))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xD))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xE))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xF))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x0))},
+	{SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x1))},
+#endif
+	{ /* null */ },
+};
+
+#ifndef CONFIG_CNSS_SDIO
+MODULE_DEVICE_TABLE(sdio, ar6k_id_table);
+
+static struct sdio_driver ar6k_driver = {
+	.name = "ar6k_wlan",
+	.id_table = ar6k_id_table,
+	.probe = hif_device_inserted,
+	.remove = hif_device_removed,
+};
+
+static const struct dev_pm_ops ar6k_device_pm_ops = {
+	.suspend = hif_device_suspend,
+	.resume = hif_device_resume,
+};
+
+#endif
+
+/* make sure we only unregister when registered. */
+static int registered;
+
+struct osdrv_callbacks osdrv_callbacks;
+uint32_t onebitmode;
+uint32_t busspeedlow;
+uint32_t debughif;
+
+static struct hif_sdio_dev *hif_devices[MAX_HIF_DEVICES];
+
+static void reset_all_cards(void);
+static QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
+		   struct sdio_func *func);
+static QDF_STATUS hif_enable_func(struct hif_sdio_dev *device,
+		   struct sdio_func *func);
+
+#ifdef DEBUG
+ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif,
+				 "hif",
+				 "(Linux MMC) Host Interconnect Framework",
+				 ATH_DEBUG_MASK_DEFAULTS, 0, NULL);
+#endif
+
+static int hif_sdio_init_callbacks(struct osdrv_callbacks *callbacks)
+{
+	int status = 0;
+	/* store the callback handlers */
+	osdrv_callbacks = *callbacks;
+
+	/* Register with bus driver core is done from HDD */
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("%s: HIFInit registering\n",
+					__func__));
+	registered = 1;
+
+	return status;
+}
+static void hif_sdio_remove_callbacks(void)
+{
+	qdf_mem_zero(&osdrv_callbacks, sizeof(osdrv_callbacks));
+}
+
+
+/**
+ * hif_init() - Initializes the driver callbacks
+ * @callbacks: pointer to driver callback structure
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+QDF_STATUS hif_init(struct osdrv_callbacks *callbacks)
+{
+	int status;
+
+	AR_DEBUG_ASSERT(callbacks != NULL);
+	A_REGISTER_MODULE_DEBUG_INFO(hif);
+
+	HIF_ENTER();
+
+	status = hif_sdio_init_callbacks(callbacks);
+	AR_DEBUG_ASSERT(status == 0);
+
+	if (status != 0) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			("%s sdio_register_driver failed!", __func__));
+		return QDF_STATUS_E_FAILURE;
+	} else {
+		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s sdio_register_driver successful", __func__));
+	}
+
+	return QDF_STATUS_SUCCESS;
+
+}
+
+/**
+ * __hif_read_write() - sdio read/write wrapper
+ * @device: pointer to hif device structure
+ * @address: address to read
+ * @buffer: buffer to hold read/write data
+ * @length: length to read/write
+ * @request: read/write/sync/async request
+ * @context: pointer to hold calling context
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+static QDF_STATUS
+__hif_read_write(struct hif_sdio_dev *device,
+		 uint32_t address,
+		 char *buffer,
+		 uint32_t length, uint32_t request, void *context)
+{
+	uint8_t opcode;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	int ret;
+	uint8_t *tbuffer;
+	bool bounced = false;
+
+	AR_DEBUG_ASSERT(device != NULL);
+	AR_DEBUG_ASSERT(device->func != NULL);
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("__hif_read_write, addr:0X%06X, len:%08d, %s, %s\n",
+			 address, length,
+			 request & HIF_SDIO_READ ? "Read " : "Write",
+			 request & HIF_ASYNCHRONOUS ? "Async" : "Sync "));
+
+	do {
+		if (request & HIF_EXTENDED_IO) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				("%s: Command type: CMD53\n", __func__));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: Invalid command type: 0x%08x\n",
+				__func__, request));
+			status = QDF_STATUS_E_INVAL;
+			break;
+		}
+
+		if (request & HIF_BLOCK_BASIS) {
+			/* round to whole block length size */
+			length =
+				(length / HIF_MBOX_BLOCK_SIZE) *
+				HIF_MBOX_BLOCK_SIZE;
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: Block mode (BlockLen: %d)\n",
+					 __func__, length));
+		} else if (request & HIF_BYTE_BASIS) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: Byte mode (BlockLen: %d)\n",
+					 __func__, length));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: Invalid data mode: 0x%08x\n",
+					 __func__, request));
+			status = QDF_STATUS_E_INVAL;
+			break;
+		}
+		if (request & HIF_SDIO_WRITE) {
+			struct hif_device_mbox_info MailBoxInfo;
+			unsigned int mboxLength = 0;
+			hif_configure_device(device,
+					     HIF_DEVICE_GET_MBOX_ADDR,
+					     &MailBoxInfo, sizeof(MailBoxInfo));
+			if (address >= 0x800 && address < 0xC00) {
+				/* Host control register and CIS Window */
+				mboxLength = 0;
+			} else if (address == MailBoxInfo.mbox_addresses[0]
+				   || address == MailBoxInfo.mbox_addresses[1]
+				   || address == MailBoxInfo.mbox_addresses[2]
+				   || address ==
+						MailBoxInfo.mbox_addresses[3]) {
+				mboxLength = HIF_MBOX_WIDTH;
+			} else if (address ==
+				   MailBoxInfo.mbox_prop[0].extended_address) {
+				mboxLength =
+					MailBoxInfo.mbox_prop[0].extended_size;
+			} else if (address ==
+				   MailBoxInfo.mbox_prop[1].extended_address) {
+				mboxLength =
+					MailBoxInfo.mbox_prop[1].extended_size;
+			} else {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("Invalid written address: 0x%08x\n",
+					address));
+				break;
+			}
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				("address:%08X, Length:0x%08X, Dummy:0x%04X, "
+				 "Final:0x%08X\n",
+				 address, length,
+				 (request & HIF_DUMMY_SPACE_MASK) >> 16,
+				 mboxLength ==
+				 0 ? address : address + (mboxLength -
+				 length)));
+			if (mboxLength != 0) {
+				if (length > mboxLength) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: written length(0x%08X) "
+					"larger than mbox len(0x%08x)\n",
+						 __func__, length, mboxLength));
+					break;
+				}
+				address += (mboxLength - length);
+				/*
+				 * plus dummy byte count
+				 */
+				address += ((request &
+						HIF_DUMMY_SPACE_MASK) >> 16);
+			}
+		}
+
+		if (request & HIF_FIXED_ADDRESS) {
+			opcode = CMD53_FIXED_ADDRESS;
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: Address mode: Fixed 0x%X\n",
+					 __func__, address));
+		} else if (request & HIF_INCREMENTAL_ADDRESS) {
+			opcode = CMD53_INCR_ADDRESS;
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				("%s: Address mode: Incremental 0x%X\n",
+				 __func__, address));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: Invalid address mode: 0x%08x\n",
+				 __func__, request));
+			status = QDF_STATUS_E_INVAL;
+			break;
+		}
+
+		if (request & HIF_SDIO_WRITE) {
+#if HIF_USE_DMA_BOUNCE_BUFFER
+			if (BUFFER_NEEDS_BOUNCE(buffer)) {
+				AR_DEBUG_ASSERT(device->dma_buffer != NULL);
+				tbuffer = device->dma_buffer;
+				/* copy the write data to the dma buffer */
+				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
+				if (length > HIF_DMA_BUFFER_SIZE) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+						("%s: Invalid write length:"
+						 "%d\n", __func__, length));
+					status = QDF_STATUS_E_INVAL;
+					break;
+				}
+				memcpy(tbuffer, buffer, length);
+				bounced = true;
+			} else {
+				tbuffer = buffer;
+			}
+#else
+			tbuffer = buffer;
+#endif
+			if (opcode == CMD53_FIXED_ADDRESS  && tbuffer != NULL) {
+				ret =
+					sdio_writesb(device->func, address,
+						tbuffer,
+						length);
+				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: writesb ret=%d address:"
+					 " 0x%X, len: %d, 0x%X\n",
+					 __func__, ret, address, length,
+					 *(int *)tbuffer));
+			} else {
+				ret =
+					sdio_memcpy_toio(device->func, address,
+							 tbuffer, length);
+				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: writeio ret=%d address: "
+					" 0x%X, len: %d, 0x%X\n",
+					 __func__, ret, address, length,
+					 *(int *)tbuffer));
+			}
+		} else if (request & HIF_SDIO_READ) {
+#if HIF_USE_DMA_BOUNCE_BUFFER
+			if (BUFFER_NEEDS_BOUNCE(buffer)) {
+				AR_DEBUG_ASSERT(device->dma_buffer != NULL);
+				AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
+				if (length > HIF_DMA_BUFFER_SIZE) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: Invalid read length: %d\n",
+					__func__, length));
+					status = QDF_STATUS_E_INVAL;
+					break;
+				}
+				tbuffer = device->dma_buffer;
+				bounced = true;
+			} else {
+				tbuffer = buffer;
+			}
+#else
+			tbuffer = buffer;
+#endif
+			if (opcode == CMD53_FIXED_ADDRESS && tbuffer != NULL) {
+				ret =
+					sdio_readsb(device->func, tbuffer,
+						    address,
+						    length);
+				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: readsb ret=%d address:"
+					 " 0x%X, len: %d, 0x%X\n",
+					 __func__, ret, address, length,
+					 *(int *)tbuffer));
+			} else {
+				ret =
+					sdio_memcpy_fromio(device->func,
+							   tbuffer,
+							   address, length);
+				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: readio ret=%d address: 0x%X,"
+					" len: %d, 0x%X\n",
+					 __func__, ret, address, length,
+					 *(int *)tbuffer));
+			}
+#if HIF_USE_DMA_BOUNCE_BUFFER
+			if (bounced)
+				memcpy(buffer, tbuffer, length);
+#endif
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: Invalid direction: 0x%08x\n",
+					 __func__, request));
+			status = QDF_STATUS_E_INVAL;
+			break;
+		}
+
+		if (ret) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: SDIO bus operation failed! "
+					"MMC stack returned : %d\n",
+					 __func__, ret));
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("__hif_read_write, addr:0X%06X, "
+				"len:%08d, %s, %s\n",
+				 address, length,
+				 request & HIF_SDIO_READ ? "Read " : "Write",
+				 request & HIF_ASYNCHRONOUS ? "Async" :
+					 "Sync "));
+			status = QDF_STATUS_E_FAILURE;
+		}
+	} while (false);
+
+	return status;
+}
+
+/**
+ * add_to_async_list() - add bus reqest to async task list
+ * @device: pointer to hif device
+ * @busrequest: pointer to type of bus request
+ *
+ * Return: None.
+ */
+void add_to_async_list(struct hif_sdio_dev *device,
+		      struct bus_request *busrequest)
+{
+	struct bus_request *async;
+	struct bus_request *active;
+
+	qdf_spin_lock_irqsave(&device->asynclock);
+	active = device->asyncreq;
+	if (active == NULL) {
+		device->asyncreq = busrequest;
+		device->asyncreq->inusenext = NULL;
+	} else {
+		for (async = device->asyncreq;
+		     async != NULL; async = async->inusenext) {
+			active = async;
+		}
+		active->inusenext = busrequest;
+		busrequest->inusenext = NULL;
+	}
+	qdf_spin_unlock_irqrestore(&device->asynclock);
+}
+
+/**
+ * hif_read_write() - queue a read/write request
+ * @device: pointer to hif device structure
+ * @address: address to read
+ * @buffer: buffer to hold read/write data
+ * @length: length to read/write
+ * @request: read/write/sync/async request
+ * @context: pointer to hold calling context
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+QDF_STATUS
+hif_read_write(struct hif_sdio_dev *device,
+		uint32_t address,
+		char *buffer, uint32_t length,
+		uint32_t request, void *context)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct bus_request *busrequest;
+
+	AR_DEBUG_ASSERT(device != NULL);
+	AR_DEBUG_ASSERT(device->func != NULL);
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: device 0x%p addr 0x%X buffer 0x%p "
+			"len %d req 0x%X context 0x%p",
+			 __func__, device, address, buffer,
+			 length, request, context));
+
+	/*sdio r/w action is not needed when suspend, so just return */
+	if ((device->is_suspend == true)
+	    && (device->power_config == HIF_DEVICE_POWER_CUT)) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n"));
+		return QDF_STATUS_SUCCESS;
+	}
+	do {
+		if ((request & HIF_ASYNCHRONOUS) ||
+			(request & HIF_SYNCHRONOUS)) {
+			/* serialize all requests through the async thread */
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: Execution mode: %s\n", __func__,
+					 (request & HIF_ASYNCHRONOUS) ? "Async"
+					 : "Synch"));
+			busrequest = hif_allocate_bus_request(device);
+			if (busrequest == NULL) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("no async bus requests "
+					 "available (%s, addr:0x%X, len:%d)\n",
+					 request & HIF_SDIO_READ ? "READ" :
+					 "WRITE", address, length));
+				return QDF_STATUS_E_FAILURE;
+			}
+			busrequest->address = address;
+			busrequest->buffer = buffer;
+			busrequest->length = length;
+			busrequest->request = request;
+			busrequest->context = context;
+
+			add_to_async_list(device, busrequest);
+
+			if (request & HIF_SYNCHRONOUS) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: queued sync req: 0x%lX\n",
+					 __func__, (unsigned long)busrequest));
+
+				/* wait for completion */
+				up(&device->sem_async);
+				if (down_interruptible(&busrequest->sem_req) !=
+				    0) {
+					/* interrupted, exit */
+					return QDF_STATUS_E_FAILURE;
+				} else {
+					QDF_STATUS status = busrequest->status;
+					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				    ("%s: sync return freeing 0x%lX: 0x%X\n",
+						 __func__,
+						 (unsigned long)
+						 busrequest,
+						 busrequest->status));
+					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+						("%s: freeing req: 0x%X\n",
+						 __func__,
+						 (unsigned int)
+						 request));
+					hif_free_bus_request(device,
+						busrequest);
+					return status;
+				}
+			} else {
+				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: queued async req: 0x%lX\n",
+						__func__,
+						 (unsigned long)busrequest));
+				up(&device->sem_async);
+				return QDF_STATUS_E_PENDING;
+			}
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: Invalid execution mode: 0x%08x\n",
+					__func__,
+					 (unsigned int)request));
+			status = QDF_STATUS_E_INVAL;
+			break;
+		}
+	} while (0);
+
+	return status;
+}
+
+/**
+ * async_task() - thread function to serialize all bus requests
+ * @param: pointer to hif device
+ *
+ * thread function to serialize all requests, both sync and async
+ * Return: 0 on success, error number otherwise.
+ */
+static int async_task(void *param)
+{
+	struct hif_sdio_dev *device;
+	struct bus_request *request;
+	QDF_STATUS status;
+
+	device = (struct hif_sdio_dev *) param;
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (!device->async_shutdown) {
+		/* wait for work */
+		if (down_interruptible(&device->sem_async) != 0) {
+			/* interrupted, exit */
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: async task interrupted\n",
+					 __func__));
+			break;
+		}
+		if (device->async_shutdown) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: async task stopping\n",
+					 __func__));
+			break;
+		}
+		/* we want to hold the host over multiple cmds
+		 * if possible, but holding the host blocks
+		 * card interrupts */
+		sdio_claim_host(device->func);
+		qdf_spin_lock_irqsave(&device->asynclock);
+		/* pull the request to work on */
+		while (device->asyncreq != NULL) {
+			request = device->asyncreq;
+			if (request->inusenext != NULL)
+				device->asyncreq = request->inusenext;
+			else
+				device->asyncreq = NULL;
+			qdf_spin_unlock_irqrestore(&device->asynclock);
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				("%s: async_task processing req: 0x%lX\n",
+				 __func__, (unsigned long)request));
+
+			if (request->scatter_req != NULL) {
+				A_ASSERT(device->scatter_enabled);
+				/* pass the request to scatter routine which
+				 * executes it synchronously, note, no need
+				 * to free the request since scatter requests
+				 * are maintained on a separate list */
+				status = do_hif_read_write_scatter(device,
+							request);
+			} else {
+				/* call hif_read_write in sync mode */
+				status =
+					__hif_read_write(device,
+							 request->address,
+							 request->buffer,
+							 request->length,
+							 request->
+							 request &
+							 ~HIF_SYNCHRONOUS,
+							 NULL);
+				if (request->request & HIF_ASYNCHRONOUS) {
+					void *context = request->context;
+					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: freeing req: 0x%lX\n",
+						 __func__, (unsigned long)
+						 request));
+					hif_free_bus_request(device, request);
+					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				      ("%s: async_task completion req 0x%lX\n",
+						 __func__, (unsigned long)
+						 request));
+					device->htc_callbacks.
+					rwCompletionHandler(context,
+							    status);
+				} else {
+					AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				      ("%s: async_task upping req: 0x%lX\n",
+						 __func__, (unsigned long)
+						 request));
+					request->status = status;
+					up(&request->sem_req);
+				}
+			}
+			qdf_spin_lock_irqsave(&device->asynclock);
+		}
+		qdf_spin_unlock_irqrestore(&device->asynclock);
+		sdio_release_host(device->func);
+	}
+
+	complete_and_exit(&device->async_completion, 0);
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+/**
+ * sdio_card_highspeed() - check if high speed supported
+ * @card: pointer to mmc card struct
+ *
+ * Return: non zero if card supports high speed.
+ */
+static inline int sdio_card_highspeed(struct mmc_card *card)
+{
+	return mmc_card_highspeed(card);
+}
+#else
+static inline int sdio_card_highspeed(struct mmc_card *card)
+{
+	return mmc_card_hs(card);
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+/**
+ * sdio_card_set_highspeed() - set high speed
+ * @card: pointer to mmc card struct
+ *
+ * Return: none.
+ */
+static inline void sdio_card_set_highspeed(struct mmc_card *card)
+{
+	mmc_card_set_highspeed(card);
+}
+#else
+static inline void sdio_card_set_highspeed(struct mmc_card *card)
+{
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+/**
+ * sdio_card_state() - set card state
+ * @card: pointer to mmc card struct
+ *
+ * Return: none.
+ */
+static inline void sdio_card_state(struct mmc_card *card)
+{
+	card->state &= ~MMC_STATE_HIGHSPEED;
+}
+#else
+static inline void sdio_card_state(struct mmc_card *card)
+{
+}
+#endif
+
+/**
+ * reinit_sdio() - re-initialize sdio bus
+ * @param: pointer to hif device
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+QDF_STATUS reinit_sdio(struct hif_sdio_dev *device)
+{
+	int32_t err = 0;
+	struct mmc_host *host;
+	struct mmc_card *card;
+	struct sdio_func *func;
+	uint8_t  cmd52_resp;
+	uint32_t clock;
+
+	func = device->func;
+	card = func->card;
+	host = card->host;
+
+	sdio_claim_host(func);
+
+	do {
+		/* Enable high speed */
+		if (card->host->caps & MMC_CAP_SD_HIGHSPEED) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				("%s: Set high speed mode\n",
+				__func__));
+			err = func0_cmd52_read_byte(card, SDIO_CCCR_SPEED,
+						&cmd52_resp);
+			if (err) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s: CMD52 read to CCCR speed "
+					"register failed  : %d\n",
+					__func__, err));
+				sdio_card_state(card);
+		/* no need to break */
+			} else {
+				err = func0_cmd52_write_byte(card,
+						SDIO_CCCR_SPEED,
+						(cmd52_resp | SDIO_SPEED_EHS));
+				if (err) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+						("%s: CMD52 write to CCCR speed"
+						 " register failed  : %d\n",
+						 __func__, err));
+				break;
+				}
+				sdio_card_set_highspeed(card);
+				host->ios.timing = MMC_TIMING_SD_HS;
+				host->ops->set_ios(host, &host->ios);
+			}
+		}
+
+		/* Set clock */
+		if (sdio_card_highspeed(card))
+			clock = 50000000;
+		else
+			clock = card->cis.max_dtr;
+
+		if (clock > host->f_max)
+			clock = host->f_max;
+	/*
+	 * In fpga mode the clk should be set to 12500000,
+	 * or will result in scan channel setting timeout error.
+	 * So in fpga mode, please set module parameter mmcclock
+	 * to 12500000.
+	 */
+	if (mmcclock > 0)
+		clock = mmcclock;
+		host->ios.clock = clock;
+		host->ops->set_ios(host, &host->ios);
+
+
+	if (card->host->caps & MMC_CAP_4_BIT_DATA) {
+		/* CMD52: Set bus width & disable card detect resistor */
+		err = func0_cmd52_write_byte(card, SDIO_CCCR_IF,
+				SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_4BIT);
+		if (err) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s: CMD52 to set bus mode "
+					"failed : %d\n",
+					__func__, err));
+			break;
+		}
+		host->ios.bus_width = MMC_BUS_WIDTH_4;
+		host->ops->set_ios(host, &host->ios);
+	}
+	} while (0);
+
+	sdio_release_host(func);
+
+	return (err) ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
+}
+
+/*
+ * Setup IRQ mode for deep sleep and WoW
+ * Switch back to 1 bits mode when we suspend for
+ * WoW in order to detect SDIO irq without clock.
+ * Re-enable async 4-bit irq mode for some host controllers
+ * after resume.
+ */
+static int sdio_enable4bits(struct hif_sdio_dev *device, int enable)
+{
+	int ret = 0;
+	struct sdio_func *func = device->func;
+	struct mmc_card *card = func->card;
+	struct mmc_host *host = card->host;
+
+	if (!(host->caps & (MMC_CAP_4_BIT_DATA)))
+		return 0;
+
+	if (card->cccr.low_speed && !card->cccr.wide_bus)
+		return 0;
+
+	sdio_claim_host(func);
+	do {
+		int setAsyncIRQ = 0;
+		__u16 manufacturer_id =
+			device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK;
+
+		/* Re-enable 4-bit ASYNC interrupt on AR6003x
+		 * after system resume for some host controller */
+		if (manufacturer_id == MANUFACTURER_ID_AR6003_BASE) {
+			setAsyncIRQ = 1;
+			ret =
+				func0_cmd52_write_byte(func->card,
+					    CCCR_SDIO_IRQ_MODE_REG_AR6003,
+					    enable ?
+					    SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003
+					    : 0);
+		} else if (manufacturer_id == MANUFACTURER_ID_AR6320_BASE
+			   || manufacturer_id == MANUFACTURER_ID_QCA9377_BASE) {
+			unsigned char data = 0;
+			setAsyncIRQ = 1;
+			ret =
+				func0_cmd52_read_byte(func->card,
+					      CCCR_SDIO_IRQ_MODE_REG_AR6320,
+						   &data);
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s: failed to read interrupt "
+					"extension register %d\n",
+						 __func__, ret));
+				sdio_release_host(func);
+				return ret;
+			}
+			if (enable)
+				data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320;
+			else
+				data &= ~SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320;
+			ret =
+				func0_cmd52_write_byte(func->card,
+					       CCCR_SDIO_IRQ_MODE_REG_AR6320,
+					       data);
+		}
+		if (setAsyncIRQ) {
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s: failed to setup 4-bit "
+					"ASYNC IRQ mode into %d err %d\n",
+					 __func__, enable, ret));
+			} else {
+				AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+					("%s: Setup 4-bit ASYNC "
+					"IRQ mode into %d successfully\n",
+					 __func__, enable));
+			}
+		}
+	} while (0);
+	sdio_release_host(func);
+
+	return ret;
+}
+
+
+/**
+ * power_state_change_notify() - SDIO bus power notification handler
+ * @config: hif device power change type
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+QDF_STATUS
+power_state_change_notify(struct hif_sdio_dev *device,
+			HIF_DEVICE_POWER_CHANGE_TYPE config)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct sdio_func *func = device->func;
+	int old_reset_val;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: config type %d\n",
+			__func__, config));
+	switch (config) {
+	case HIF_DEVICE_POWER_DOWN:
+		/* Disable 4bits to allow SDIO bus to detect
+		 * DAT1 as interrupt source */
+		sdio_enable4bits(device, 0);
+		break;
+	case HIF_DEVICE_POWER_CUT:
+		old_reset_val = reset_sdio_on_unload;
+		reset_sdio_on_unload = 1;
+		status = hif_disable_func(device, func);
+		reset_sdio_on_unload = old_reset_val;
+		if (!device->is_suspend) {
+			device->power_config = config;
+			mmc_detect_change(device->host, HZ / 3);
+		}
+		break;
+	case HIF_DEVICE_POWER_UP:
+		if (device->power_config == HIF_DEVICE_POWER_CUT) {
+			if (device->is_suspend) {
+				status = reinit_sdio(device);
+				/* set power_config before EnableFunc to
+				 * passthrough sdio r/w action when resuming
+				 * from cut power */
+				device->power_config = config;
+				if (status == QDF_STATUS_SUCCESS)
+					status = hif_enable_func(device, func);
+			} else {
+				/* device->func is bad pointer at this time */
+				mmc_detect_change(device->host, 0);
+				return QDF_STATUS_E_PENDING;
+			}
+		} else if (device->power_config == HIF_DEVICE_POWER_DOWN) {
+			int ret = sdio_enable4bits(device, 1);
+			status = (ret == 0) ? QDF_STATUS_SUCCESS :
+						QDF_STATUS_E_FAILURE;
+		}
+		break;
+	}
+	device->power_config = config;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s:\n", __func__));
+
+	return status;
+}
+
+#ifdef SDIO_3_0
+/**
+ * set_extended_mbox_size() - set extended MBOX size
+ * @pinfo: sdio mailbox info
+ *
+ * Return: none.
+ */
+static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
+{
+	pinfo->mbox_prop[0].extended_size =
+		HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
+	pinfo->mbox_prop[1].extended_size =
+		HIF_MBOX1_EXTENDED_WIDTH_AR6320;
+}
+
+/**
+ * set_extended_mbox_address() - set extended MBOX address
+ * @pinfo: sdio mailbox info
+ *
+ * Return: none.
+ */
+static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
+{
+	pinfo->mbox_prop[1].extended_address =
+		pinfo->mbox_prop[0].extended_address +
+		pinfo->mbox_prop[0].extended_size +
+		HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
+}
+#else
+static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
+{
+	pinfo->mbox_prop[0].extended_size =
+		HIF_MBOX0_EXTENDED_WIDTH_AR6320;
+}
+static inline void
+set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
+{
+
+}
+#endif
+
+/**
+ * set_extended_mbox_window_info() - set extended MBOX window
+ * information for SDIO interconnects
+ * @manf_id: manufacturer id
+ * @pinfo: sdio mailbox info
+ *
+ * Return: none.
+ */
+static void set_extended_mbox_window_info(uint16_t manf_id,
+			 struct hif_device_mbox_info *pinfo)
+{
+	switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) {
+	case MANUFACTURER_ID_AR6002_BASE:
+		/* MBOX 0 has an extended range */
+
+		pinfo->mbox_prop[0].extended_address =
+			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
+		pinfo->mbox_prop[0].extended_size =
+			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
+
+		pinfo->mbox_prop[0].extended_address =
+			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
+		pinfo->mbox_prop[0].extended_size =
+			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
+
+		pinfo->mbox_prop[0].extended_address =
+			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
+		pinfo->mbox_prop[0].extended_size =
+			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
+
+		break;
+	case MANUFACTURER_ID_AR6003_BASE:
+		/* MBOX 0 has an extended range */
+		pinfo->mbox_prop[0].extended_address =
+			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
+		pinfo->mbox_prop[0].extended_size =
+			HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
+		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
+		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
+		break;
+	case MANUFACTURER_ID_AR6004_BASE:
+		pinfo->mbox_prop[0].extended_address =
+			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
+		pinfo->mbox_prop[0].extended_size =
+			HIF_MBOX0_EXTENDED_WIDTH_AR6004;
+		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
+		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
+		break;
+	case MANUFACTURER_ID_AR6320_BASE: {
+		uint16_t ManuRevID =
+			manf_id & MANUFACTURER_ID_AR6K_REV_MASK;
+		pinfo->mbox_prop[0].extended_address =
+			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
+		if (ManuRevID < 4) {
+			pinfo->mbox_prop[0].extended_size =
+				HIF_MBOX0_EXTENDED_WIDTH_AR6320;
+		} else {
+		/* from rome 2.0(0x504), the width has been extended to 56K */
+			set_extended_mbox_size(pinfo);
+		}
+		set_extended_mbox_address(pinfo);
+		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
+		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
+		break;
+	}
+	case MANUFACTURER_ID_QCA9377_BASE:
+		pinfo->mbox_prop[0].extended_address =
+			HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
+		pinfo->mbox_prop[0].extended_size =
+			HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
+		pinfo->mbox_prop[1].extended_address =
+			pinfo->mbox_prop[0].extended_address +
+			pinfo->mbox_prop[0].extended_size +
+			HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
+		pinfo->mbox_prop[1].extended_size =
+			HIF_MBOX1_EXTENDED_WIDTH_AR6320;
+		pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
+		pinfo->gmbox_size = HIF_GMBOX_WIDTH;
+		break;
+	default:
+		A_ASSERT(false);
+		break;
+	}
+}
+
+/**
+ * hif_configure_device() - configure sdio device
+ * @device: pointer to hif device structure
+ * @opcode: configuration type
+ * @config: configuration value to set
+ * @configLen: configuration length
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+QDF_STATUS
+hif_configure_device(struct hif_sdio_dev *device,
+		     enum hif_device_config_opcode opcode,
+		     void *config, uint32_t config_len)
+{
+	uint32_t count;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	switch (opcode) {
+	case HIF_DEVICE_GET_MBOX_BLOCK_SIZE:
+		((uint32_t *) config)[0] = HIF_MBOX0_BLOCK_SIZE;
+		((uint32_t *) config)[1] = HIF_MBOX1_BLOCK_SIZE;
+		((uint32_t *) config)[2] = HIF_MBOX2_BLOCK_SIZE;
+		((uint32_t *) config)[3] = HIF_MBOX3_BLOCK_SIZE;
+		break;
+
+	case HIF_DEVICE_GET_MBOX_ADDR:
+		for (count = 0; count < 4; count++) {
+			((uint32_t *) config)[count] =
+				HIF_MBOX_START_ADDR(count);
+		}
+
+		if (config_len >= sizeof(struct hif_device_mbox_info)) {
+			set_extended_mbox_window_info((uint16_t) device->func->
+					      device,
+					      (struct hif_device_mbox_info *)
+					      config);
+		}
+
+		break;
+	case HIF_DEVICE_GET_PENDING_EVENTS_FUNC:
+		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+				("%s: configuration opcode %d\n",
+				 __func__, opcode));
+		status = QDF_STATUS_E_FAILURE;
+		break;
+	case HIF_DEVICE_GET_IRQ_PROC_MODE:
+		*((enum hif_device_irq_mode *) config) =
+			HIF_DEVICE_IRQ_SYNC_ONLY;
+		break;
+	case HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC:
+		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+				("%s: configuration opcode %d\n",
+				 __func__, opcode));
+		status = QDF_STATUS_E_FAILURE;
+		break;
+	case HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT:
+		if (!device->scatter_enabled)
+			return QDF_STATUS_E_NOSUPPORT;
+		status =
+			setup_hif_scatter_support(device,
+				  (struct HIF_DEVICE_SCATTER_SUPPORT_INFO *)
+				   config);
+		if (QDF_IS_STATUS_ERROR(status))
+			device->scatter_enabled = false;
+		break;
+	case HIF_DEVICE_GET_OS_DEVICE:
+		/* pass back a pointer to the SDIO function's "dev" struct */
+		((struct HIF_DEVICE_OS_DEVICE_INFO *) config)->os_dev =
+			&device->func->dev;
+		break;
+	case HIF_DEVICE_POWER_STATE_CHANGE:
+		status =
+			power_state_change_notify(device,
+					  *(HIF_DEVICE_POWER_CHANGE_TYPE *)
+					   config);
+		break;
+	case HIF_DEVICE_GET_IRQ_YIELD_PARAMS:
+		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+			("%s: configuration opcode %d\n",
+				 __func__, opcode));
+		status = QDF_STATUS_E_FAILURE;
+		break;
+	case HIF_DEVICE_SET_HTC_CONTEXT:
+		device->htc_context = config;
+		break;
+	case HIF_DEVICE_GET_HTC_CONTEXT:
+		if (config == NULL) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+				("%s: htc context is NULL\n",
+				__func__));
+			return QDF_STATUS_E_FAILURE;
+		}
+		*(void **)config = device->htc_context;
+		break;
+	case HIF_BMI_DONE:
+	{
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			("%s: BMI_DONE\n", __func__));
+		break;
+	}
+	default:
+		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+			("%s: Unsupported configuration opcode: %d\n",
+			 __func__, opcode));
+		status = QDF_STATUS_E_FAILURE;
+	}
+
+	return status;
+}
+
+/**
+ * hif_sdio_shutdown() - hif-sdio shutdown routine
+ * @hif_ctx: pointer to hif_softc structore
+ *
+ * Return: None.
+ */
+void hif_sdio_shutdown(struct hif_softc *hif_ctx)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: Enter\n", __func__));
+	if (hif_device != NULL) {
+		AR_DEBUG_ASSERT(hif_device->power_config == HIF_DEVICE_POWER_CUT
+				|| hif_device->func != NULL);
+	} else {
+		int i;
+		/* since we are unloading the driver anyways,
+		 * reset all cards in case the SDIO card is
+		 * externally powered and we are unloading the SDIO
+		 * stack. This avoids the problem when the SDIO stack
+		 * is reloaded and attempts are made to re-enumerate
+		 * a card that is already enumerated */
+		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				("%s: hif_shut_down_device, resetting\n",
+				__func__));
+		reset_all_cards();
+
+		/* Unregister with bus driver core */
+		if (registered) {
+			registered = 0;
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			   ("%s: Unregistering with the bus driver\n",
+			   __func__));
+			hif_sdio_remove_callbacks();
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: Unregistered!",
+					__func__));
+		}
+
+		for (i = 0; i < MAX_HIF_DEVICES; ++i) {
+			if (hif_devices[i] && hif_devices[i]->func == NULL) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				("%s: Remove pending hif_device %p\n",
+					 __func__, hif_devices[i]));
+				del_hif_device(hif_devices[i]);
+				hif_devices[i] = NULL;
+			}
+		}
+	}
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: Exit\n", __func__));
+}
+
+/**
+ * hif_irq_handler() - hif-sdio interrupt handler
+ * @func: pointer to sdio_func
+ *
+ * Return: None.
+ */
+static void hif_irq_handler(struct sdio_func *func)
+{
+	QDF_STATUS status;
+	struct hif_sdio_dev *device;
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: Enter\n", __func__));
+
+	device = get_hif_device(func);
+	atomic_set(&device->irq_handling, 1);
+	/* release the host during intr so we can use
+	 * it when we process cmds */
+	sdio_release_host(device->func);
+	status = device->htc_callbacks.dsrHandler(device->htc_callbacks
+						  .context);
+	sdio_claim_host(device->func);
+	atomic_set(&device->irq_handling, 0);
+	AR_DEBUG_ASSERT(status == QDF_STATUS_SUCCESS ||
+			status == QDF_STATUS_E_CANCELED);
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: Exit\n", __func__));
+}
+
+/**
+ * startup_task() - startup task to fill ol_softc
+ * @param: pointer to struct hif_sdio_dev
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+static int startup_task(void *param)
+{
+	struct hif_sdio_dev *device;
+
+	device = (struct hif_sdio_dev *) param;
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: call HTC from startup_task\n",
+			__func__));
+	/* start  up inform DRV layer */
+	if ((osdrv_callbacks.
+	     device_inserted_handler(osdrv_callbacks.context,
+				device)) != QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: Device rejected\n", __func__));
+	}
+
+	return 0;
+}
+
+static int enable_task(void *param)
+{
+	struct hif_sdio_dev *device;
+	device = (struct hif_sdio_dev *) param;
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+		("%s: call  from resume_task\n",
+		__func__));
+
+	/* start  up inform DRV layer */
+	if (device &&
+	    device->claimed_ctx &&
+	    osdrv_callbacks.device_power_change_handler &&
+	    osdrv_callbacks.device_power_change_handler(device->claimed_ctx,
+						    HIF_DEVICE_POWER_UP) !=
+	    QDF_STATUS_SUCCESS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: Device rejected\n",
+			__func__));
+	}
+
+	return 0;
+}
+
+/**
+ * foce_drive_strength() - Set sdio drive strength
+ * @func: pointer to sdio_func
+ *
+ * Return: none.
+ */
+static void foce_drive_strength(struct sdio_func *func)
+{
+	unsigned int  addr = SDIO_CCCR_DRIVE_STRENGTH;
+	unsigned char value = 0;
+
+	uint32_t err = func0_cmd52_read_byte(func->card,
+			addr, &value);
+	if (err) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+		  ("%s: Read CCCR 0x%02X failed: %d\n",
+			 __func__,
+			(unsigned int) addr,
+			(unsigned int) err));
+	} else {
+		value = (value &
+			(~(SDIO_DRIVE_DTSx_MASK <<
+			SDIO_DRIVE_DTSx_SHIFT))) |
+			SDIO_DTSx_SET_TYPE_D;
+		err = func0_cmd52_write_byte(func->card, addr,
+				value);
+		if (err) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			  ("%s: Write CCCR 0x%02X to 0x%02X failed: %d\n",
+			    __func__,
+				(unsigned int) addr,
+				(unsigned int) value,
+				(unsigned int) err));
+		} else {
+			addr = CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR;
+			value = 0;
+			err = func0_cmd52_read_byte(func->card,
+					 addr, &value);
+			if (err) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("Read CCCR 0x%02X failed: %d\n",
+					(unsigned int) addr,
+					(unsigned int) err));
+			} else {
+				value = (value &
+					(~CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK))
+					 | CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A
+					 | CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C
+					 | CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D;
+				err = func0_cmd52_write_byte(func->card,
+						addr, value);
+				if (err) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					  ("Write CCCR 0x%02X to 0x%02X failed: %d\n",
+						(unsigned int) addr,
+						(unsigned int) value,
+						(unsigned int) err));
+				}
+			}
+		}
+	}
+}
+
+/**
+ * write_cccr() - write CCCR
+ * @func: pointer to sdio_func
+ *
+ * Return: none.
+ */
+static void write_cccr(struct sdio_func *func)
+{
+	if (writecccr1) {
+		uint32_t err = func0_cmd52_write_byte(func->card,
+				      writecccr1,
+				      writecccr1value);
+		if (err) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Write CCCR 0x%02X to 0x%02X failed: %d\n",
+				(unsigned int)writecccr1,
+				(unsigned int)writecccr1value,
+				(unsigned int)err));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Write CCCR 0x%02X to 0x%02X OK\n",
+				(unsigned int)writecccr1,
+				(unsigned int)writecccr1value));
+		}
+	}
+	if (writecccr2) {
+		uint32_t err = func0_cmd52_write_byte(func->card,
+						      writecccr2,
+						      writecccr2value);
+		if (err) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Write CCCR 0x%02X to 0x%02X failed: %d\n",
+				(unsigned int)writecccr2,
+				(unsigned int)writecccr2value,
+				(unsigned int)err));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Write CCCR 0x%02X to 0x%02X OK\n",
+				(unsigned int)writecccr2,
+				(unsigned int)writecccr2value));
+		}
+	}
+	if (writecccr3) {
+		uint32_t err = func0_cmd52_write_byte(func->card,
+					      writecccr3,
+						      writecccr3value);
+		if (err) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Write CCCR 0x%02X to 0x%02X failed: %d\n",
+				(unsigned int)writecccr3,
+				(unsigned int)writecccr3value,
+				(unsigned int)err));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Write CCCR 0x%02X to 0x%02X OK\n",
+				(unsigned int)writecccr3,
+				(unsigned int)writecccr3value));
+		}
+	}
+	if (writecccr4) {
+		uint32_t err = func0_cmd52_write_byte(func->card,
+						      writecccr4,
+						      writecccr4value);
+		if (err)
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Write CCCR 0x%02X to 0x%02X failed: %d\n",
+				(unsigned int)writecccr4,
+				(unsigned int)writecccr4value,
+				(unsigned int)err));
+		else
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Write CCCR 0x%02X to 0x%02X OK\n",
+				(unsigned int)writecccr4,
+				(unsigned int)writecccr4value));
+	}
+}
+
+/**
+ * hif_device_inserted() - hif-sdio driver probe handler
+ * @func: pointer to sdio_func
+ * @id: pointer to sdio_device_id
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+static int hif_device_inserted(struct sdio_func *func,
+			       const struct sdio_device_id *id)
+{
+	int i;
+	int ret;
+	struct hif_sdio_dev *device = NULL;
+	int count;
+	uint32_t clock, clock_set = 12500000;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: Function: "
+			"0x%X, Vendor ID: 0x%X, Device ID: 0x%X, "
+			"block size: 0x%X/0x%X\n",
+			 __func__,
+			 func->num, func->vendor, id->device,
+			 func->max_blksize,
+			 func->cur_blksize));
+	/* dma_mask should not be NULL, otherwise dma_map_single
+	 * will crash. TODO: check why dma_mask is NULL here */
+	if (func->dev.dma_mask == NULL) {
+		static u64 dma_mask = 0xFFFFFFFF;
+		func->dev.dma_mask = &dma_mask;
+	}
+	for (i = 0; i < MAX_HIF_DEVICES; ++i) {
+		struct hif_sdio_dev *hifdevice = hif_devices[i];
+		if (hifdevice && hifdevice->power_config == HIF_DEVICE_POWER_CUT
+		    && hifdevice->host == func->card->host) {
+			hifdevice->func = func;
+			hifdevice->power_config = HIF_DEVICE_POWER_UP;
+			sdio_set_drvdata(func, hifdevice);
+			device = get_hif_device(func);
+
+			if (device->is_suspend) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: Resume from suspend",
+					__func__));
+				ret = reinit_sdio(device);
+			}
+			break;
+		}
+	}
+
+	if (device == NULL) {
+		if (add_hif_device(func) == NULL)
+			return QDF_STATUS_E_FAILURE;
+		device = get_hif_device(func);
+
+		for (i = 0; i < MAX_HIF_DEVICES; ++i) {
+			if (hif_devices[i] == NULL) {
+				hif_devices[i] = device;
+				break;
+			}
+		}
+		if (i == MAX_HIF_DEVICES) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: No more hif_devices[] slot for %p",
+					 __func__, device));
+		}
+
+		device->id = id;
+		device->host = func->card->host;
+		device->is_disabled = true;
+/* TODO: MMC SDIO3.0 Setting should also be modified in ReInit()
+ * function when Power Manage work. */
+		sdio_claim_host(func);
+		/* force driver strength to type D */
+		if (forcedriverstrength == 1)
+			foce_drive_strength(func);
+		write_cccr(func);
+		/* Set MMC Clock */
+		if (mmcclock > 0)
+			clock_set = mmcclock;
+		if (sdio_card_highspeed(func->card))
+			clock = 50000000;
+		else
+			clock = func->card->cis.max_dtr;
+		if (clock > device->host->f_max)
+			clock = device->host->f_max;
+
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			("%s: Dumping clocks (%d,%d)\n",
+		       __func__, func->card->cis.max_dtr,
+		       device->host->f_max));
+
+		/* only when mmcclock module parameter is specified,
+		 * set the clock explicitly
+		 */
+		if (mmcclock > 0) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("Decrease host clock from %d to %d(%d,%d)\n",
+					 clock, clock_set,
+					 func->card->cis.max_dtr,
+					 device->host->f_max));
+			device->host->ios.clock = clock_set;
+			device->host->ops->set_ios(device->host,
+						   &device->host->ios);
+		}
+		/* Set SDIO3.0 */
+		/* Set MMC Bus Width: 1-1Bit, 4-4Bit, 8-8Bit */
+		if (mmcbuswidth > 0) {
+			if (mmcbuswidth == 1) {
+				ret =
+					func0_cmd52_write_byte(func->card,
+							  SDIO_CCCR_IF,
+							  SDIO_BUS_CD_DISABLE
+							  |
+							  SDIO_BUS_WIDTH_1BIT);
+				if (ret) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+						("%s: CMD52 to set bus width failed: %d\n",
+							 __func__,
+							 ret));
+					return ret;
+				}
+				device->host->ios.bus_width =
+					MMC_BUS_WIDTH_1;
+				device->host->ops->set_ios(device->host,
+							   &device->
+							   host->ios);
+			} else if (mmcbuswidth == 4
+				   && (device->host->
+				       caps & MMC_CAP_4_BIT_DATA)) {
+				ret =
+					func0_cmd52_write_byte(func->card,
+						       SDIO_CCCR_IF,
+						       SDIO_BUS_CD_DISABLE
+						       |
+						       SDIO_BUS_WIDTH_4BIT);
+				if (ret) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s: CMD52 to bus width failed: %d\n",
+					 __func__,
+						 ret));
+					return ret;
+				}
+				device->host->ios.bus_width =
+					MMC_BUS_WIDTH_4;
+				device->host->ops->set_ios(device->host,
+							   &device->
+							   host->ios);
+			} else if (mmcbuswidth == 8
+				 && (device->host->
+				     caps & MMC_CAP_8_BIT_DATA)) {
+				ret =
+					func0_cmd52_write_byte(func->card,
+						       SDIO_CCCR_IF,
+						       SDIO_BUS_CD_DISABLE
+						       |
+						       SDIO_BUS_WIDTH_8BIT);
+				if (ret) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s: CMD52 to bus width failed: %d\n",
+							 __func__,
+							 ret));
+					return ret;
+				}
+				device->host->ios.bus_width =
+					MMC_BUS_WIDTH_8;
+				device->host->ops->set_ios(device->host,
+							   &device->
+							   host->ios);
+			} else {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: MMC bus width %d is not supported.\n",
+						 __func__,
+						 mmcbuswidth));
+				return ret = QDF_STATUS_E_FAILURE;
+			}
+			AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+				("%s: Set MMC bus width to %dBit.\n",
+					 __func__, mmcbuswidth));
+		}
+		if (debugcccr)
+			hif_dump_cccr(device);
+
+		sdio_release_host(func);
+	}
+
+	qdf_spinlock_create(&device->lock);
+
+	qdf_spinlock_create(&device->asynclock);
+
+	DL_LIST_INIT(&device->scatter_req_head);
+
+	if (!nohifscattersupport) {
+		/* try to allow scatter operation on all instances,
+		 * unless globally overridden */
+		device->scatter_enabled = true;
+	} else
+		device->scatter_enabled = false;
+
+	/* Initialize the bus requests to be used later */
+	qdf_mem_zero(device->bus_request, sizeof(device->bus_request));
+	for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) {
+		sema_init(&device->bus_request[count].sem_req, 0);
+		hif_free_bus_request(device, &device->bus_request[count]);
+	}
+	sema_init(&device->sem_async, 0);
+
+	ret = hif_enable_func(device, func);
+
+	return (ret == QDF_STATUS_SUCCESS || ret == QDF_STATUS_E_PENDING)
+						? 0 : QDF_STATUS_E_FAILURE;
+}
+
+/**
+ * hif_ack_interrupt() - Acknowledge hif device irq
+ * @device: pointer to struct hif_sdio_dev
+ *
+ * This should translate to an acknowledgment to the bus driver indicating that
+ * the previous interrupt request has been serviced and the all the relevant
+ * sources have been cleared. HTC is ready to process more interrupts.
+ * This should prevent the bus driver from raising an interrupt unless the
+ * previous one has been serviced and acknowledged using the previous API.
+ *
+ * Return: None.
+ */
+void hif_ack_interrupt(struct hif_sdio_dev *device)
+{
+	AR_DEBUG_ASSERT(device != NULL);
+
+	/* Acknowledge our function IRQ */
+}
+
+/**
+ * hif_un_mask_interrupt() - Re-enable hif device irq
+ * @device: pointer to struct hif_sdio_dev
+ *
+ *
+ * Return: None.
+ */
+void hif_un_mask_interrupt(struct hif_sdio_dev *device)
+{
+	int ret;
+
+	AR_DEBUG_ASSERT(device != NULL);
+	AR_DEBUG_ASSERT(device->func != NULL);
+
+	HIF_ENTER();
+	/*
+	 * On HP Elitebook 8460P, interrupt mode is not stable
+	 * in high throughput, so polling method should be used
+	 * instead of interrupt mode. */
+	if (brokenirq) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+			("%s: Using broken IRQ mode\n",
+			__func__));
+		/* disable IRQ support even the capability exists */
+		device->func->card->host->caps &= ~MMC_CAP_SDIO_IRQ;
+	}
+	/* Register the IRQ Handler */
+	sdio_claim_host(device->func);
+	ret = sdio_claim_irq(device->func, hif_irq_handler);
+	sdio_release_host(device->func);
+	AR_DEBUG_ASSERT(ret == 0);
+	HIF_EXIT();
+}
+
+/**
+ * hif_mask_interrupt() - Disable hif device irq
+ * @device: pointer to struct hif_sdio_dev
+ *
+ *
+ * Return: None.
+ */
+void hif_mask_interrupt(struct hif_sdio_dev *device)
+{
+	int ret;
+	AR_DEBUG_ASSERT(device != NULL);
+	AR_DEBUG_ASSERT(device->func != NULL);
+
+	HIF_ENTER();
+
+	/* Mask our function IRQ */
+	sdio_claim_host(device->func);
+	while (atomic_read(&device->irq_handling)) {
+		sdio_release_host(device->func);
+		schedule_timeout_interruptible(HZ / 10);
+		sdio_claim_host(device->func);
+	}
+	ret = sdio_release_irq(device->func);
+	sdio_release_host(device->func);
+	if (ret) {
+		if (ret == -ETIMEDOUT) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+				("%s: Timeout to mask interrupt\n",
+				__func__));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: Unable to mask interrupt %d\n",
+					 __func__, ret));
+			AR_DEBUG_ASSERT(ret == 0);
+		}
+	}
+	HIF_EXIT();
+}
+
+/**
+ * hif_allocate_bus_request() - Allocate hif bus request
+ * @device: pointer to struct hif_sdio_dev
+ *
+ *
+ * Return: pointer to struct bus_request structure.
+ */
+struct bus_request *hif_allocate_bus_request(struct hif_sdio_dev *device)
+{
+	struct bus_request *busrequest;
+
+	qdf_spin_lock_irqsave(&device->lock);
+	busrequest = device->bus_request_free_queue;
+	/* Remove first in list */
+	if (busrequest != NULL)
+		device->bus_request_free_queue = busrequest->next;
+
+	/* Release lock */
+	qdf_spin_unlock_irqrestore(&device->lock);
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: hif_allocate_bus_request: 0x%p\n",
+			__func__, busrequest));
+
+	return busrequest;
+}
+
+/**
+ * hif_free_bus_request() - Free hif bus request
+ * @device: pointer to struct hif_sdio_dev
+ *
+ *
+ * Return: None.
+ */
+void hif_free_bus_request(struct hif_sdio_dev *device,
+			  struct bus_request *busrequest)
+{
+	AR_DEBUG_ASSERT(busrequest != NULL);
+	/* Acquire lock */
+	qdf_spin_lock_irqsave(&device->lock);
+
+	/* Insert first in list */
+	busrequest->next = device->bus_request_free_queue;
+	busrequest->inusenext = NULL;
+	device->bus_request_free_queue = busrequest;
+
+	/* Release lock */
+	qdf_spin_unlock_irqrestore(&device->lock);
+}
+
+static QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
+		struct sdio_func *func)
+{
+	int ret;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	HIF_ENTER();
+	device = get_hif_device(func);
+	if (!IS_ERR(device->async_task)) {
+		init_completion(&device->async_completion);
+		device->async_shutdown = 1;
+		up(&device->sem_async);
+		wait_for_completion(&device->async_completion);
+		device->async_task = NULL;
+		sema_init(&device->sem_async, 0);
+	}
+	/* Disable the card */
+	sdio_claim_host(device->func);
+	ret = sdio_disable_func(device->func);
+	if (ret)
+		status = QDF_STATUS_E_FAILURE;
+
+	if (reset_sdio_on_unload && status == QDF_STATUS_SUCCESS) {
+		/* reset the SDIO interface. It's useful in automated testing
+		 * where the card does not need to be removed at the end
+		 * of the test. It is expected that the user will also
+		 * un/reload the host controller driver to force the bus
+		 * driver to re-enumerate the slot */
+		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+				("%s: reseting SDIO card",
+				__func__));
+
+		/* sdio_f0_writeb() cannot be used here, this allows access
+		 * to undefined registers in the range of: 0xF0-0xFF */
+
+		ret =
+			func0_cmd52_write_byte(device->func->card,
+						SDIO_CCCR_ABORT,
+					       (1 << 3));
+		if (ret) {
+			status = QDF_STATUS_E_FAILURE;
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: reset failed : %d\n",
+				__func__, ret));
+		}
+	}
+
+	sdio_release_host(device->func);
+
+	if (status == QDF_STATUS_SUCCESS)
+		device->is_disabled = true;
+	cleanup_hif_scatter_resources(device);
+
+	HIF_EXIT();
+
+	return status;
+}
+
+static QDF_STATUS hif_enable_func(struct hif_sdio_dev *device,
+				struct sdio_func *func)
+{
+	struct task_struct *task;
+	const char *task_name = NULL;
+	int (*taskFunc)(void *) = NULL;
+	int ret = QDF_STATUS_SUCCESS;
+
+	HIF_ENTER("sdio_func 0x%p", func);
+
+	device = get_hif_device(func);
+
+	if (device->is_disabled) {
+		int setAsyncIRQ = 0;
+		__u16 manufacturer_id =
+			device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK;
+		/* enable the SDIO function */
+		sdio_claim_host(func);
+		/* enable 4-bit ASYNC interrupt on AR6003x or later devices */
+		if (manufacturer_id == MANUFACTURER_ID_AR6003_BASE) {
+			setAsyncIRQ = 1;
+			ret =
+				func0_cmd52_write_byte(func->card,
+					CCCR_SDIO_IRQ_MODE_REG_AR6003,
+					SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003);
+		} else if (manufacturer_id == MANUFACTURER_ID_AR6320_BASE
+			   || manufacturer_id == MANUFACTURER_ID_QCA9377_BASE) {
+			unsigned char data = 0;
+			setAsyncIRQ = 1;
+			ret =
+				func0_cmd52_read_byte(func->card,
+					      CCCR_SDIO_IRQ_MODE_REG_AR6320,
+						      &data);
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+					("%s: failed to read irq reg %d\n",
+						 __func__, ret));
+				sdio_release_host(func);
+				return QDF_STATUS_E_FAILURE;
+			}
+			data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320;
+			ret =
+				func0_cmd52_write_byte(func->card,
+					       CCCR_SDIO_IRQ_MODE_REG_AR6320,
+						       data);
+		}
+		if (setAsyncIRQ) {
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: failed to enable ASYNC IRQ mode %d\n",
+						 __func__, ret));
+				sdio_release_host(func);
+				return QDF_STATUS_E_FAILURE;
+			}
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+				("%s: 4-bit ASYNC IRQ mode enabled\n",
+				 __func__));
+		}
+
+		/* set CCCR 0xF0[7:6] to increase async interrupt delay clock to
+		 * fix interrupt missing issue on dell 8460p */
+		if (asyncintdelay != 0) {
+			unsigned char data = 0;
+			ret =
+				func0_cmd52_read_byte(func->card,
+					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+						      &data);
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: failed to read CCCR %d, val is %d\n",
+					__func__,
+					 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+					 ret));
+				sdio_release_host(func);
+				return QDF_STATUS_E_FAILURE;
+			}
+			data = (data & ~CCCR_SDIO_ASYNC_INT_DELAY_MASK) |
+			       ((asyncintdelay <<
+				 CCCR_SDIO_ASYNC_INT_DELAY_LSB) &
+				CCCR_SDIO_ASYNC_INT_DELAY_MASK);
+			ret =
+				func0_cmd52_write_byte(func->card,
+					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+					      data);
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("%s: failed to write CCCR %d, val is %d\n",
+					__func__,
+					 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+					 ret));
+				sdio_release_host(func);
+				return QDF_STATUS_E_FAILURE;
+			}
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			   ("%s: Set async interrupt delay clock as %d.\n",
+			   __func__,
+			   asyncintdelay));
+		}
+		/* give us some time to enable, in ms */
+		func->enable_timeout = 100;
+		ret = sdio_enable_func(func);
+		if (ret) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: Unable to enable AR6K: 0x%X\n",
+				 __func__, ret));
+			sdio_release_host(func);
+			return QDF_STATUS_E_FAILURE;
+		}
+		ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+
+		if (modstrength) {
+			unsigned int address = WINDOW_DATA_ADDRESS;
+			unsigned int value = 0x0FFF;
+			ret =
+				sdio_memcpy_toio(device->func, address,
+						&value, 4);
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+					("memcpy_toio 0x%x 0x%x error:%d\n",
+				       address, value, ret));
+			} else {
+				AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+					("memcpy_toio, 0x%x 0x%x OK\n", address,
+				       value));
+				address = WINDOW_WRITE_ADDR_ADDRESS;
+				value = 0x50F8;
+				ret =
+					sdio_memcpy_toio(device->func, address,
+							 &value, 4);
+				if (ret)
+					AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+					   ("memcpy_toio 0x%x 0x%x error:%d\n",
+						address, value, ret));
+				else
+					AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+						("memcpy_toio, 0x%x 0x%x OK\n",
+					       address, value));
+			}
+		};
+		sdio_release_host(func);
+		if (ret) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			("%s: can't set block size 0x%x  AR6K: 0x%X\n",
+				 __func__, HIF_MBOX_BLOCK_SIZE,
+				 ret));
+			return QDF_STATUS_E_FAILURE;
+		}
+		device->is_disabled = false;
+		/* create async I/O thread */
+		if (!device->async_task) {
+			device->async_shutdown = 0;
+			device->async_task = kthread_create(async_task,
+							    (void *)device,
+							    "AR6K Async");
+			if (IS_ERR(device->async_task)) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: to create async task\n",
+						 __func__));
+				return QDF_STATUS_E_FAILURE;
+			}
+			AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+					("%s: start async task\n",
+					__func__));
+			wake_up_process(device->async_task);
+		}
+	}
+
+	if (!device->claimed_ctx) {
+		taskFunc = startup_task;
+		task_name = "AR6K startup";
+		ret = QDF_STATUS_SUCCESS;
+	} else {
+		taskFunc = enable_task;
+		task_name = "AR6K enable";
+		ret = QDF_STATUS_E_PENDING;
+	}
+	/* create resume thread */
+	task = kthread_create(taskFunc, (void *)device, task_name);
+	if (IS_ERR(task)) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: to create enabel task\n",
+				 __func__));
+		return QDF_STATUS_E_FAILURE;
+	}
+	wake_up_process(task);
+
+	/* task will call the enable func, indicate pending */
+	HIF_EXIT();
+
+	return ret;
+}
+
+int hif_device_suspend(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	int ret = QDF_STATUS_SUCCESS;
+#if defined(MMC_PM_KEEP_POWER)
+	mmc_pm_flag_t pm_flag = 0;
+	HIF_DEVICE_POWER_CHANGE_TYPE config;
+	struct mmc_host *host = NULL;
+#endif
+
+	struct hif_sdio_dev *device = get_hif_device(func);
+
+#if defined(MMC_PM_KEEP_POWER)
+	if (device && device->func)
+		host = device->func->card->host;
+#endif
+
+	HIF_ENTER();
+	if (device && device->claimed_ctx
+	    && osdrv_callbacks.device_suspend_handler) {
+		device->is_suspend = true;
+		status =
+		osdrv_callbacks.device_suspend_handler(device->claimed_ctx);
+
+#if defined(MMC_PM_KEEP_POWER)
+		switch (forcesleepmode) {
+		case 0: /* depend on sdio host pm capbility */
+			pm_flag = sdio_get_host_pm_caps(func);
+			break;
+		case 1: /* force WOW */
+			pm_flag |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+			break;
+		case 2: /* force DeepSleep */
+			pm_flag &= ~MMC_PM_WAKE_SDIO_IRQ;
+			pm_flag |= MMC_PM_KEEP_POWER;
+			break;
+		case 3: /* force CutPower */
+			pm_flag &=
+				~(MMC_PM_WAKE_SDIO_IRQ | MMC_PM_WAKE_SDIO_IRQ);
+			break;
+		}
+		if (!(pm_flag & MMC_PM_KEEP_POWER)) {
+			/* cut power support */
+			/* setting power_config before hif_configure_device to
+			 * skip sdio r/w when suspending with cut power */
+			AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+				("hif_device_suspend: cut power enter\n"));
+			config = HIF_DEVICE_POWER_CUT;
+			device->power_config = config;
+			if ((device->claimed_ctx != NULL)
+			    && osdrv_callbacks.device_removed_handler) {
+				status = osdrv_callbacks.
+						device_removed_handler(device->
+								claimed_ctx,
+								device);
+			}
+			ret = hif_configure_device(device,
+					   HIF_DEVICE_POWER_STATE_CHANGE,
+					   &config,
+					   sizeof
+					   (HIF_DEVICE_POWER_CHANGE_TYPE));
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				   ("%s: hif config device failed: %d\n",
+					 __func__, ret));
+				return ret;
+			}
+
+			hif_mask_interrupt(device);
+			device->device_state = HIF_DEVICE_STATE_CUTPOWER;
+			AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+				("hif_device_suspend: cut power success\n"));
+			return ret;
+		} else {
+			ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+			if (ret) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				  ("%s: set sdio pm flags failed %d\n",
+					 __func__, ret));
+				return ret;
+			}
+
+			/* TODO:WOW support */
+			if (pm_flag & MMC_PM_WAKE_SDIO_IRQ) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+					("hif_device_suspend: wow enter\n"));
+				config = HIF_DEVICE_POWER_DOWN;
+				ret = hif_configure_device(device,
+					   HIF_DEVICE_POWER_STATE_CHANGE,
+					   &config,
+					   sizeof
+					   (HIF_DEVICE_POWER_CHANGE_TYPE));
+
+				if (ret) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("%s: hif config dev failed: %d\n",
+						 __func__, ret));
+					return ret;
+				}
+				ret =
+					sdio_set_host_pm_flags(func,
+						       MMC_PM_WAKE_SDIO_IRQ);
+				if (ret) {
+					AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+						("%s: set sdio pm flags %d\n",
+							 __func__, ret));
+					return ret;
+				}
+				hif_mask_interrupt(device);
+				device->device_state = HIF_DEVICE_STATE_WOW;
+				AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+					("hif_device_suspend: wow success\n"));
+				return ret;
+			} else {
+				/* deep sleep support */
+				AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+					("%s: deep sleep enter\n",
+					 __func__));
+
+				/*
+				 * Wait for some async clean handler finished.
+				 * These handlers are part of vdev disconnect.
+				 * As handlers are async,sleep is not suggested,
+				 * some blocking method may be a good choice.
+				 * But before adding callback function to these
+				 * handler, sleep wait is a simple method.
+				 */
+				msleep(100);
+				hif_mask_interrupt(device);
+				device->device_state =
+					HIF_DEVICE_STATE_DEEPSLEEP;
+				AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+					("%s: deep sleep done\n",
+					 __func__));
+				return ret;
+			}
+		}
+#endif
+	}
+
+	HIF_EXIT();
+
+	switch (status) {
+	case QDF_STATUS_SUCCESS:
+#if defined(MMC_PM_KEEP_POWER)
+		if (host) {
+			host->pm_flags &=
+				~(MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ);
+		}
+#endif
+		return 0;
+	case QDF_STATUS_E_BUSY:
+#if defined(MMC_PM_KEEP_POWER)
+		if (host) {
+			/* WAKE_SDIO_IRQ in order to wake up by DAT1 */
+			host->pm_flags |=
+				(MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ);
+			host->pm_flags &= host->pm_caps;
+		}
+		return 0;
+#else
+		return -EBUSY; /* Hack to support deep sleep and wow */
+#endif
+	default:
+		device->is_suspend = false;
+
+		return QDF_STATUS_E_FAILURE;
+	}
+}
+
+int hif_device_resume(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	HIF_DEVICE_POWER_CHANGE_TYPE config;
+	struct hif_sdio_dev *device;
+
+	device = get_hif_device(func);
+
+	if (device->device_state == HIF_DEVICE_STATE_CUTPOWER) {
+		config = HIF_DEVICE_POWER_UP;
+		status = hif_configure_device(device,
+					      HIF_DEVICE_POWER_STATE_CHANGE,
+					      &config,
+					      sizeof
+					      (HIF_DEVICE_POWER_CHANGE_TYPE));
+		if (status) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: hif_configure_device failed\n",
+				 __func__));
+			return status;
+		}
+	} else if (device->device_state == HIF_DEVICE_STATE_DEEPSLEEP) {
+		hif_un_mask_interrupt(device);
+	} else if (device->device_state == HIF_DEVICE_STATE_WOW) {
+		/*TODO:WOW support */
+		hif_un_mask_interrupt(device);
+	}
+
+	/*
+	 * device_resume_handler do nothing now. If some operation
+	 * should be added to this handler in power cut
+	 * resume flow, do make sure those operation is not
+	 * depent on what startup_task has done,or the resume
+	 * flow will block.
+	 */
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: +hif_device_resume\n",
+			 __func__));
+	if (device && device->claimed_ctx
+	    && osdrv_callbacks.device_suspend_handler) {
+		status =
+		osdrv_callbacks.device_resume_handler(device->claimed_ctx);
+		device->is_suspend = false;
+	}
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: -hif_device_resume\n",
+			 __func__));
+	device->device_state = HIF_DEVICE_STATE_ON;
+
+	return QDF_IS_STATUS_SUCCESS(status) ? 0 : status;
+}
+
+static void hif_device_removed(struct sdio_func *func)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct hif_sdio_dev *device;
+	AR_DEBUG_ASSERT(func != NULL);
+
+	HIF_ENTER();
+
+	device = get_hif_device(func);
+
+	if (device->power_config == HIF_DEVICE_POWER_CUT) {
+		device->func = NULL;    /* func will be free by mmc stack */
+		return;         /* Just return for cut-off mode */
+	} else {
+		int i;
+		for (i = 0; i < MAX_HIF_DEVICES; ++i) {
+			if (hif_devices[i] == device)
+				hif_devices[i] = NULL;
+		}
+	}
+
+	if (device->claimed_ctx != NULL)
+		status =
+		osdrv_callbacks.device_removed_handler(device->claimed_ctx,
+							    device);
+
+	hif_mask_interrupt(device);
+
+	if (device->is_disabled)
+		device->is_disabled = false;
+	else
+		status = hif_disable_func(device, func);
+
+
+	del_hif_device(device);
+	if (status != QDF_STATUS_SUCCESS)
+		AR_DEBUG_PRINTF(ATH_DEBUG_WARN,
+		  ("%s: Unable to disable sdio func\n",
+		   __func__));
+
+	HIF_EXIT();
+}
+
+/*
+ * This should be moved to AR6K HTC layer.
+ */
+QDF_STATUS hif_wait_for_pending_recv(struct hif_sdio_dev *device)
+{
+	int32_t cnt = 10;
+	uint8_t host_int_status;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	do {
+		while (atomic_read(&device->irq_handling)) {
+			/* wait until irq handler finished all the jobs */
+			schedule_timeout_interruptible(HZ / 10);
+		}
+		/* check if there is any pending irq due to force done */
+		host_int_status = 0;
+		status = hif_read_write(device, HOST_INT_STATUS_ADDRESS,
+					(uint8_t *) &host_int_status,
+					sizeof(host_int_status),
+					HIF_RD_SYNC_BYTE_INC, NULL);
+		host_int_status =
+			QDF_IS_STATUS_SUCCESS(status) ?
+				(host_int_status & (1 << 0)) : 0;
+		if (host_int_status)
+			/* wait until irq handler finishs its job */
+			schedule_timeout_interruptible(1);
+	} while (host_int_status && --cnt > 0);
+
+	if (host_int_status && cnt == 0)
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: Unable clear up pending IRQ\n",
+				 __func__));
+
+	return QDF_STATUS_SUCCESS;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) && \
+		 !defined(WITH_BACKPORTS)
+/**
+ * hif_sdio_set_drvdata() - set driver data
+ * @func: pointer to sdio function
+ * @hifdevice: pointer to hif device
+ *
+ * Return: non zero for success.
+ */
+static inline int hif_sdio_set_drvdata(struct sdio_func *func,
+					struct hif_sdio_dev *hifdevice)
+{
+	return sdio_set_drvdata(func, hifdevice);
+}
+#else
+static inline int hif_sdio_set_drvdata(struct sdio_func *func,
+					struct hif_sdio_dev *hifdevice)
+{
+	sdio_set_drvdata(func, hifdevice);
+	return 0;
+}
+#endif
+
+static struct hif_sdio_dev *add_hif_device(struct sdio_func *func)
+{
+	struct hif_sdio_dev *hifdevice = NULL;
+	int ret = 0;
+
+	HIF_ENTER();
+	AR_DEBUG_ASSERT(func != NULL);
+	hifdevice = (struct hif_sdio_dev *) qdf_mem_malloc(sizeof(
+							struct hif_sdio_dev));
+	AR_DEBUG_ASSERT(hifdevice != NULL);
+	qdf_mem_zero(hifdevice, sizeof(*hifdevice));
+#if HIF_USE_DMA_BOUNCE_BUFFER
+	hifdevice->dma_buffer = qdf_mem_malloc(HIF_DMA_BUFFER_SIZE);
+	AR_DEBUG_ASSERT(hifdevice->dma_buffer != NULL);
+#endif
+	hifdevice->func = func;
+	hifdevice->power_config = HIF_DEVICE_POWER_UP;
+	hifdevice->device_state = HIF_DEVICE_STATE_ON;
+	ret = hif_sdio_set_drvdata(func, hifdevice);
+	HIF_EXIT("status %d", ret);
+
+	return hifdevice;
+}
+
+static struct hif_sdio_dev *get_hif_device(struct sdio_func *func)
+{
+	AR_DEBUG_ASSERT(func != NULL);
+
+	return (struct hif_sdio_dev *) sdio_get_drvdata(func);
+}
+
+static void del_hif_device(struct hif_sdio_dev *device)
+{
+	AR_DEBUG_ASSERT(device != NULL);
+	AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
+			("%s: deleting hif device 0x%p\n",
+				__func__, device));
+	if (device->dma_buffer != NULL)
+		qdf_mem_free(device->dma_buffer);
+
+	qdf_mem_free(device);
+}
+
+static void reset_all_cards(void)
+{
+}
+
+void hif_release_device(struct hif_opaque_softc *hif_ctx)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+
+	hif_device->claimed_ctx = NULL;
+}
+
+QDF_STATUS hif_attach_htc(struct hif_sdio_dev *device,
+				HTC_CALLBACKS *callbacks)
+{
+	if (device->htc_callbacks.context != NULL)
+		/* already in use! */
+		return QDF_STATUS_E_FAILURE;
+	device->htc_callbacks = *callbacks;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
+{
+	struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx);
+	struct hif_sdio_dev *hif_device = scn->hif_handle;
+
+	qdf_mem_zero(&hif_device->htc_callbacks,
+			  sizeof(hif_device->htc_callbacks));
+}
+
+#define SDIO_SET_CMD52_ARG(arg, rw, func, raw, address, writedata) \
+			((arg) = (((rw) & 1) << 31) | \
+			((func & 0x7) << 28) | \
+			(((raw) & 1) << 27) | \
+			(1 << 26) | \
+			(((address) & 0x1FFFF) << 9) | \
+			(1 << 8) | \
+			((writedata) & 0xFF))
+
+#define SDIO_SET_CMD52_READ_ARG(arg, func, address) \
+	SDIO_SET_CMD52_ARG(arg, 0, (func), 0, address, 0x00)
+#define SDIO_SET_CMD52_WRITE_ARG(arg, func, address, value) \
+	SDIO_SET_CMD52_ARG(arg, 1, (func), 0, address, value)
+
+static int func0_cmd52_write_byte(struct mmc_card *card,
+				  unsigned int address,
+				  unsigned char byte)
+{
+	struct mmc_command io_cmd;
+	unsigned long arg;
+	int status = 0;
+
+	memset(&io_cmd, 0, sizeof(io_cmd));
+	SDIO_SET_CMD52_WRITE_ARG(arg, 0, address, byte);
+	io_cmd.opcode = SD_IO_RW_DIRECT;
+	io_cmd.arg = arg;
+	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+	status = mmc_wait_for_cmd(card->host, &io_cmd, 0);
+
+	if (status)
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: mmc_wait_for_cmd returned %d\n",
+				 __func__, status));
+
+	return status;
+}
+
+static int func0_cmd52_read_byte(struct mmc_card *card,
+				 unsigned int address,
+				 unsigned char *byte)
+{
+	struct mmc_command io_cmd;
+	unsigned long arg;
+	int32_t err;
+
+	memset(&io_cmd, 0, sizeof(io_cmd));
+	SDIO_SET_CMD52_READ_ARG(arg, 0, address);
+	io_cmd.opcode = SD_IO_RW_DIRECT;
+	io_cmd.arg = arg;
+	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+	err = mmc_wait_for_cmd(card->host, &io_cmd, 0);
+
+	if ((!err) && (byte))
+		*byte = io_cmd.resp[0] & 0xFF;
+
+	if (err)
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%s: mmc_wait_for_cmd returned %d\n",
+				 __func__, err));
+
+	return err;
+}
+
+void hif_dump_cccr(struct hif_sdio_dev *hif_device)
+{
+	int i;
+	uint8_t cccr_val;
+	uint32_t err;
+
+	if (!hif_device || !hif_device->func ||
+				!hif_device->func->card) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			("hif_dump_cccr incorrect input arguments\n"));
+		return;
+	}
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("hif_dump_cccr "));
+	for (i = 0; i <= 0x16; i++) {
+		err = func0_cmd52_read_byte(hif_device->func->card,
+						i, &cccr_val);
+		if (err) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("Reading CCCR 0x%02X failed: %d\n",
+			       (unsigned int)i, (unsigned int)err));
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("%X(%X) ", (unsigned int)i,
+			       (unsigned int)cccr_val));
+		}
+	}
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("\n"));
+}
+
+#ifdef CONFIG_CNSS_SDIO
+int hif_sdio_device_inserted(struct device *dev,
+					const struct sdio_device_id *id)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+
+	return hif_device_inserted(func, id);
+}
+
+void hif_sdio_device_removed(struct sdio_func *func)
+{
+	hif_device_removed(func);
+}
+#endif

+ 479 - 0
hif/src/sdio/native_sdio/src/hif_scatter.c

@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/kthread.h>
+#include "hif_internal.h"
+#include <qdf_mem.h>
+#include "dl_list.h"
+#define ATH_MODULE_NAME hif
+#include "a_debug.h"
+
+#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT
+
+#define _CMD53_ARG_READ          0
+#define _CMD53_ARG_WRITE         1
+#define _CMD53_ARG_BLOCK_BASIS   1
+#define _CMD53_ARG_FIXED_ADDRESS 0
+#define _CMD53_ARG_INCR_ADDRESS  1
+
+#define SDIO_SET_CMD53_ARG(arg, rw, func, mode, opcode, address, bytes_blocks) \
+		((arg) = (((rw) & 1) << 31) | \
+		((func & 0x7) << 28) | \
+		(((mode) & 1) << 27) | \
+		(((opcode) & 1) << 26) | \
+		(((address) & 0x1FFFF) << 9) | \
+		((bytes_blocks) & 0x1FF))
+
+/**
+ * free_scatter_req() - free scattered request.
+ * @device: hif device context
+ * @pReq: scatter list node
+ *
+ * Return: none
+ */
+static void free_scatter_req(struct hif_sdio_dev *device,
+		struct _HIF_SCATTER_REQ *pReq)
+{
+	qdf_spin_lock_irqsave(&device->lock);
+
+	dl_list_insert_tail(&device->scatter_req_head, &pReq->list_link);
+
+	qdf_spin_unlock_irqrestore(&device->lock);
+}
+
+/**
+ * alloc_scatter_req() - allocate scattered request.
+ * @device: hif device context
+ *
+ *
+ * Return: pointer to allocated scatter list node
+ */
+static struct _HIF_SCATTER_REQ *alloc_scatter_req(struct hif_sdio_dev *device)
+{
+	DL_LIST *item;
+
+	qdf_spin_lock_irqsave(&device->lock);
+
+	item = dl_list_remove_item_from_head(&device->scatter_req_head);
+
+	qdf_spin_unlock_irqrestore(&device->lock);
+
+	if (item != NULL)
+		return A_CONTAINING_STRUCT(item,
+			struct _HIF_SCATTER_REQ, list_link);
+
+	return NULL;
+}
+
+/**
+ * do_hif_read_write_scatter() - rd/wr scattered operation.
+ * @device: hif device context
+ * @busrequest: rd/wr bus request
+ *
+ * called by async task to perform the operation synchronously
+ * using direct MMC APIs
+ * Return: int
+ */
+QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device,
+		struct bus_request *busrequest)
+{
+	int i;
+	uint8_t rw;
+	uint8_t opcode;
+	struct mmc_request mmcreq;
+	struct mmc_command cmd;
+	struct mmc_data data;
+	struct HIF_SCATTER_REQ_PRIV *req_priv;
+	struct _HIF_SCATTER_REQ *req;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct scatterlist *sg;
+
+	HIF_ENTER();
+
+	req_priv = busrequest->scatter_req;
+
+	A_ASSERT(req_priv != NULL);
+
+	req = req_priv->hif_scatter_req;
+
+	memset(&mmcreq, 0, sizeof(struct mmc_request));
+	memset(&cmd, 0, sizeof(struct mmc_command));
+	memset(&data, 0, sizeof(struct mmc_data));
+
+	data.blksz = HIF_MBOX_BLOCK_SIZE;
+	data.blocks = req->total_length / HIF_MBOX_BLOCK_SIZE;
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+			("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d), (tot:%d,sg:%d)\n",
+			 (req->request & HIF_SDIO_WRITE) ? "WRITE" : "READ",
+			 req->address, data.blksz, data.blocks,
+			 req->total_length, req->valid_scatter_entries));
+
+	if (req->request & HIF_SDIO_WRITE) {
+		rw = _CMD53_ARG_WRITE;
+		data.flags = MMC_DATA_WRITE;
+	} else {
+		rw = _CMD53_ARG_READ;
+		data.flags = MMC_DATA_READ;
+	}
+
+	if (req->request & HIF_FIXED_ADDRESS)
+		opcode = _CMD53_ARG_FIXED_ADDRESS;
+	else
+		opcode = _CMD53_ARG_INCR_ADDRESS;
+
+	/* fill SG entries */
+	sg = req_priv->sgentries;
+	sg_init_table(sg, req->valid_scatter_entries);
+
+	/* assemble SG list */
+	for (i = 0; i < req->valid_scatter_entries; i++, sg++) {
+		/* setup each sg entry */
+		if ((unsigned long)req->scatter_list[i].buffer & 0x3) {
+			/* note some scatter engines can handle unaligned
+			 * buffers, print this as informational only */
+			AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+				("HIF: (%s) Scatter Buf is unaligned 0x%lx\n",
+				 req->
+				 request & HIF_SDIO_WRITE ? "WRITE" : "READ",
+				 (unsigned long)req->scatter_list[i].
+				 buffer));
+		}
+
+		AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+				("  %d:  Addr:0x%lX, Len:%d\n", i,
+				 (unsigned long)req->scatter_list[i].buffer,
+				 req->scatter_list[i].length));
+
+		sg_set_buf(sg, req->scatter_list[i].buffer,
+			   req->scatter_list[i].length);
+	}
+	/* set scatter-gather table for request */
+	data.sg = req_priv->sgentries;
+	data.sg_len = req->valid_scatter_entries;
+	/* set command argument */
+	SDIO_SET_CMD53_ARG(cmd.arg,
+			   rw,
+			   device->func->num,
+			   _CMD53_ARG_BLOCK_BASIS,
+			   opcode, req->address, data.blocks);
+
+	cmd.opcode = SD_IO_RW_EXTENDED;
+	cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+	mmcreq.cmd = &cmd;
+	mmcreq.data = &data;
+
+	mmc_set_data_timeout(&data, device->func->card);
+	/* synchronous call to process request */
+	mmc_wait_for_req(device->func->card->host, &mmcreq);
+
+	if (cmd.error) {
+		status = QDF_STATUS_E_FAILURE;
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("HIF-SCATTER: cmd error: %d\n", cmd.error));
+	}
+
+	if (data.error) {
+		status = QDF_STATUS_E_FAILURE;
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("HIF-SCATTER: data error: %d\n", data.error));
+	}
+
+	if (QDF_IS_STATUS_ERROR(status)) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+			("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n",
+			 (req->request & HIF_SDIO_WRITE) ? "WRITE" : "READ",
+			 req->address, data.blksz, data.blocks));
+	}
+
+	/* set completion status, fail or success */
+	req->completion_status = status;
+
+	if (req->request & HIF_ASYNCHRONOUS) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+				("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n",
+				 (unsigned long)busrequest, status));
+		/* complete the request */
+		A_ASSERT(req->completion_routine != NULL);
+		req->completion_routine(req);
+	} else {
+		AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+			("HIF-SCATTER async_task upping busreq : 0x%lX (%d)\n",
+			 (unsigned long)busrequest, status));
+		/* signal wait */
+		up(&busrequest->sem_req);
+	}
+	HIF_EXIT();
+
+	return status;
+}
+
+/**
+ * alloc_scatter_req() - callback to issue a read-write
+ * scatter request.
+ * @device: hif device context
+ * @pReq: rd/wr scatter request
+ *
+ * Return: int
+ */
+static QDF_STATUS hif_read_write_scatter(struct hif_sdio_dev *device,
+				   struct _HIF_SCATTER_REQ *req)
+{
+	QDF_STATUS status = QDF_STATUS_E_INVAL;
+	uint32_t request = req->request;
+	struct HIF_SCATTER_REQ_PRIV *req_priv =
+		(struct HIF_SCATTER_REQ_PRIV *) req->hif_private[0];
+
+	do {
+
+		A_ASSERT(req_priv != NULL);
+
+		AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+			("HIF-SCATTER: total len: %d Scatter Entries: %d\n",
+				 req->total_length,
+				 req->valid_scatter_entries));
+
+		if (!(request & HIF_EXTENDED_IO)) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("HIF-SCATTER: Invalid command type: 0x%08x\n",
+					 request));
+			break;
+		}
+
+		if (!(request & (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS))) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("HIF-SCATTER: Invalid mode: 0x%08x\n",
+					 request));
+			break;
+		}
+
+		if (!(request & HIF_BLOCK_BASIS)) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("HIF-SCATTER: Invalid data mode: 0x%08x\n",
+					 request));
+			break;
+		}
+
+		if (req->total_length > MAX_SCATTER_REQ_TRANSFER_SIZE) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+				("HIF-SCATTER: Invalid length: %d\n",
+					 req->total_length));
+			break;
+		}
+
+		if (req->total_length == 0) {
+			A_ASSERT(false);
+			break;
+		}
+
+		/* add bus request to the async list for the async
+		 * I/O thread to process */
+		add_to_async_list(device, req_priv->busrequest);
+
+		if (request & HIF_SYNCHRONOUS) {
+			AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+				("HIF-SCATTER: queued sync req: 0x%lX\n",
+					 (unsigned long)req_priv->busrequest));
+			/* signal thread and wait */
+			up(&device->sem_async);
+			if (down_interruptible(&req_priv->busrequest->sem_req)
+			    != 0) {
+				AR_DEBUG_PRINTF(ATH_DEBUG_ERROR,
+					("HIF-SCATTER: interrupted!\n"));
+				/* interrupted, exit */
+				status = QDF_STATUS_E_FAILURE;
+				break;
+			} else {
+				status = req->completion_status;
+			}
+		} else {
+			AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
+				("HIF-SCATTER: queued async req: 0x%lX\n",
+					 (unsigned long)req_priv->busrequest));
+			/* wake thread, it will process and then take
+			 * care of the async callback */
+			up(&device->sem_async);
+			status = QDF_STATUS_SUCCESS;
+		}
+
+	} while (false);
+
+	if (QDF_IS_STATUS_ERROR(status) && (request & HIF_ASYNCHRONOUS)) {
+		req->completion_status = status;
+		req->completion_routine(req);
+		status = QDF_STATUS_SUCCESS;
+	}
+
+	return status;
+}
+
+/**
+ * setup_hif_scatter_support() - setup of HIF scatter resources
+ * scatter request.
+ * @device: hif device context
+ * @pInfo: scatter info
+ *
+ * Return: int
+ */
+QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device,
+			   struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info)
+{
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+	int i;
+	struct HIF_SCATTER_REQ_PRIV *req_priv;
+	struct bus_request *busrequest;
+
+	if (device->func->card->host->max_segs <
+	    MAX_SCATTER_ENTRIES_PER_REQ) {
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("host only supports scatter of : %d entries,"
+				 "need: %d\n",
+				 device->func->card->host->max_segs,
+				 MAX_SCATTER_ENTRIES_PER_REQ));
+		status = QDF_STATUS_E_NOSUPPORT;
+		goto end;
+	}
+
+	AR_DEBUG_PRINTF(ATH_DEBUG_ANY,
+			("max scatter req : %d entries: %d\n",
+			 MAX_SCATTER_REQUESTS,
+			 MAX_SCATTER_ENTRIES_PER_REQ));
+
+	for (i = 0; i < MAX_SCATTER_REQUESTS; i++) {
+		/* allocate the private request blob */
+		req_priv =
+			(struct HIF_SCATTER_REQ_PRIV *)
+			qdf_mem_malloc(sizeof(
+					struct HIF_SCATTER_REQ_PRIV));
+		if (NULL == req_priv)
+			goto end;
+		qdf_mem_zero(req_priv, sizeof(
+					struct HIF_SCATTER_REQ_PRIV));
+		/* save the device instance */
+		req_priv->device = device;
+		/* allocate the scatter request */
+		req_priv->hif_scatter_req =
+			(struct _HIF_SCATTER_REQ *)
+			qdf_mem_malloc(sizeof(struct _HIF_SCATTER_REQ) +
+				       (MAX_SCATTER_ENTRIES_PER_REQ -
+			       1) * (sizeof(struct _HIF_SCATTER_ITEM)));
+
+		if (NULL == req_priv->hif_scatter_req) {
+			qdf_mem_free(req_priv);
+			goto end;
+		}
+		/* just zero the main part of the scatter request */
+		qdf_mem_zero(req_priv->hif_scatter_req,
+			     sizeof(struct _HIF_SCATTER_REQ));
+		/* back pointer to the private struct */
+		req_priv->hif_scatter_req->hif_private[0] = req_priv;
+		/* allocate a bus request for this scatter request */
+		busrequest = hif_allocate_bus_request(device);
+		if (NULL == busrequest) {
+			qdf_mem_free(req_priv->hif_scatter_req);
+			qdf_mem_free(req_priv);
+			goto end;
+		}
+		/* assign the scatter request to this bus request */
+		busrequest->scatter_req = req_priv;
+		/* point back to the request */
+		req_priv->busrequest = busrequest;
+		/* req_priv it to the scatter pool */
+		free_scatter_req(device, req_priv->hif_scatter_req);
+	}
+
+	if (i != MAX_SCATTER_REQUESTS) {
+		status = QDF_STATUS_E_NOMEM;
+		AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+				("failed to alloc scatter resources !\n"));
+		goto end;
+	}
+
+	/* set scatter function pointers */
+	info->allocate_req_func = alloc_scatter_req;
+	info->free_req_func = free_scatter_req;
+	info->read_write_scatter_func = hif_read_write_scatter;
+	info->max_scatter_entries = MAX_SCATTER_ENTRIES_PER_REQ;
+	info->max_tx_size_per_scatter_req =
+		MAX_SCATTER_REQ_TRANSFER_SIZE;
+
+	status = QDF_STATUS_SUCCESS;
+
+end:
+	if (QDF_IS_STATUS_ERROR(status))
+		cleanup_hif_scatter_resources(device);
+
+	return status;
+}
+
+/**
+ * cleanup_hif_scatter_resources() - cleanup HIF scatter resources
+ * scatter request.
+ * @device: hif device context
+ *
+ *
+ * Return: none
+ */
+void cleanup_hif_scatter_resources(struct hif_sdio_dev *device)
+{
+	struct HIF_SCATTER_REQ_PRIV *req_priv;
+	struct _HIF_SCATTER_REQ *req;
+
+	/* empty the free list */
+
+	while (true) {
+		req = alloc_scatter_req(device);
+
+		if (NULL == req)
+			break;
+
+		req_priv = (struct HIF_SCATTER_REQ_PRIV *)req->hif_private[0];
+		A_ASSERT(req_priv != NULL);
+
+		if (req_priv->busrequest != NULL) {
+			req_priv->busrequest->scatter_req = NULL;
+			/* free bus request */
+			hif_free_bus_request(device, req_priv->busrequest);
+			req_priv->busrequest = NULL;
+		}
+
+		if (req_priv->hif_scatter_req != NULL) {
+			qdf_mem_free(req_priv->hif_scatter_req);
+			req_priv->hif_scatter_req = NULL;
+		}
+
+		qdf_mem_free(req_priv);
+	}
+}
+
+#endif /* HIF_LINUX_MMC_SCATTER_SUPPORT */

+ 77 - 0
hif/src/sdio/regtable_sdio.c

@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include "bmi_msg.h"
+#include "cepci.h"
+
+#define MISSING 0
+#include "regtable_sdio.h"
+#include "targaddrs.h"
+#include "if_sdio.h"
+#include "ar9888def.h"
+#include "ar6320def.h"
+#include "ar6320v2def.h"
+
+void target_register_tbl_attach(struct hif_softc *scn, u32 target_type)
+{
+	switch (target_type) {
+	case TARGET_TYPE_AR9888:
+		scn->targetdef = &ar9888_targetdef;
+		break;
+	case TARGET_TYPE_AR6320:
+		scn->targetdef = &ar6320_targetdef;
+		break;
+	case TARGET_TYPE_AR6320V2:
+		scn->targetdef = &ar6320v2_targetdef;
+		break;
+	default:
+		break;
+	}
+}
+
+void hif_register_tbl_attach(struct hif_softc *scn, u32 hif_type)
+{
+	if (NULL == scn) {
+		QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: sc is NULL", __func__);
+		return;
+	}
+
+	switch (hif_type) {
+	case HIF_TYPE_AR9888:
+		scn->hostdef = &ar9888_hostdef;
+		break;
+	case HIF_TYPE_AR6320:
+		scn->hostdef = &ar6320_hostdef;
+		break;
+	case HIF_TYPE_AR6320V2:
+		scn->hostdef = &ar6320v2_hostdef;
+		break;
+	default:
+		break;
+	}
+}

+ 885 - 0
hif/src/sdio/regtable_sdio.h

@@ -0,0 +1,885 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+#ifndef _REGTABLE_SDIO_H_
+#define _REGTABLE_SDIO_H_
+
+#define MISSING  0
+extern struct hif_sdio_softc *scn;
+
+struct targetdef_s {
+	uint32_t d_RTC_SOC_BASE_ADDRESS;
+	uint32_t d_RTC_WMAC_BASE_ADDRESS;
+	uint32_t d_SYSTEM_SLEEP_OFFSET;
+	uint32_t d_WLAN_SYSTEM_SLEEP_OFFSET;
+	uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_LSB;
+	uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_MASK;
+	uint32_t d_CLOCK_CONTROL_OFFSET;
+	uint32_t d_CLOCK_CONTROL_SI0_CLK_MASK;
+	uint32_t d_RESET_CONTROL_OFFSET;
+	uint32_t d_RESET_CONTROL_MBOX_RST_MASK;
+	uint32_t d_RESET_CONTROL_SI0_RST_MASK;
+	uint32_t d_WLAN_RESET_CONTROL_OFFSET;
+	uint32_t d_WLAN_RESET_CONTROL_COLD_RST_MASK;
+	uint32_t d_WLAN_RESET_CONTROL_WARM_RST_MASK;
+	uint32_t d_GPIO_BASE_ADDRESS;
+	uint32_t d_GPIO_PIN0_OFFSET;
+	uint32_t d_GPIO_PIN1_OFFSET;
+	uint32_t d_GPIO_PIN0_CONFIG_MASK;
+	uint32_t d_GPIO_PIN1_CONFIG_MASK;
+	uint32_t d_SI_CONFIG_BIDIR_OD_DATA_LSB;
+	uint32_t d_SI_CONFIG_BIDIR_OD_DATA_MASK;
+	uint32_t d_SI_CONFIG_I2C_LSB;
+	uint32_t d_SI_CONFIG_I2C_MASK;
+	uint32_t d_SI_CONFIG_POS_SAMPLE_LSB;
+	uint32_t d_SI_CONFIG_POS_SAMPLE_MASK;
+	uint32_t d_SI_CONFIG_INACTIVE_CLK_LSB;
+	uint32_t d_SI_CONFIG_INACTIVE_CLK_MASK;
+	uint32_t d_SI_CONFIG_INACTIVE_DATA_LSB;
+	uint32_t d_SI_CONFIG_INACTIVE_DATA_MASK;
+	uint32_t d_SI_CONFIG_DIVIDER_LSB;
+	uint32_t d_SI_CONFIG_DIVIDER_MASK;
+	uint32_t d_SI_BASE_ADDRESS;
+	uint32_t d_SI_CONFIG_OFFSET;
+	uint32_t d_SI_TX_DATA0_OFFSET;
+	uint32_t d_SI_TX_DATA1_OFFSET;
+	uint32_t d_SI_RX_DATA0_OFFSET;
+	uint32_t d_SI_RX_DATA1_OFFSET;
+	uint32_t d_SI_CS_OFFSET;
+	uint32_t d_SI_CS_DONE_ERR_MASK;
+	uint32_t d_SI_CS_DONE_INT_MASK;
+	uint32_t d_SI_CS_START_LSB;
+	uint32_t d_SI_CS_START_MASK;
+	uint32_t d_SI_CS_RX_CNT_LSB;
+	uint32_t d_SI_CS_RX_CNT_MASK;
+	uint32_t d_SI_CS_TX_CNT_LSB;
+	uint32_t d_SI_CS_TX_CNT_MASK;
+	uint32_t d_BOARD_DATA_SZ;
+	uint32_t d_BOARD_EXT_DATA_SZ;
+	uint32_t d_MBOX_BASE_ADDRESS;
+	uint32_t d_LOCAL_SCRATCH_OFFSET;
+	uint32_t d_CPU_CLOCK_OFFSET;
+	uint32_t d_LPO_CAL_OFFSET;
+	uint32_t d_GPIO_PIN10_OFFSET;
+	uint32_t d_GPIO_PIN11_OFFSET;
+	uint32_t d_GPIO_PIN12_OFFSET;
+	uint32_t d_GPIO_PIN13_OFFSET;
+	uint32_t d_CLOCK_GPIO_OFFSET;
+	uint32_t d_CPU_CLOCK_STANDARD_LSB;
+	uint32_t d_CPU_CLOCK_STANDARD_MASK;
+	uint32_t d_LPO_CAL_ENABLE_LSB;
+	uint32_t d_LPO_CAL_ENABLE_MASK;
+	uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB;
+	uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK;
+	uint32_t d_ANALOG_INTF_BASE_ADDRESS;
+	uint32_t d_WLAN_MAC_BASE_ADDRESS;
+	uint32_t d_FW_INDICATOR_ADDRESS;
+	uint32_t d_DRAM_BASE_ADDRESS;
+	uint32_t d_SOC_CORE_BASE_ADDRESS;
+	uint32_t d_CORE_CTRL_ADDRESS;
+	uint32_t d_MSI_NUM_REQUEST;
+	uint32_t d_MSI_ASSIGN_FW;
+	uint32_t d_CORE_CTRL_CPU_INTR_MASK;
+	uint32_t d_SR_WR_INDEX_ADDRESS;
+	uint32_t d_DST_WATERMARK_ADDRESS;
+
+	/* htt_rx.c */
+	uint32_t d_RX_MSDU_END_4_FIRST_MSDU_MASK;
+	uint32_t d_RX_MSDU_END_4_FIRST_MSDU_LSB;
+	uint32_t d_RX_MPDU_START_0_RETRY_LSB;
+	uint32_t d_RX_MPDU_START_0_RETRY_MASK;
+	uint32_t d_RX_MPDU_START_0_SEQ_NUM_MASK;
+	uint32_t d_RX_MPDU_START_0_SEQ_NUM_LSB;
+	uint32_t d_RX_MPDU_START_2_PN_47_32_LSB;
+	uint32_t d_RX_MPDU_START_2_PN_47_32_MASK;
+	uint32_t d_RX_MPDU_START_2_TID_LSB;
+	uint32_t d_RX_MPDU_START_2_TID_MASK;
+	uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK;
+	uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB;
+	uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_MASK;
+	uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_LSB;
+	uint32_t d_RX_MSDU_END_4_LAST_MSDU_MASK;
+	uint32_t d_RX_MSDU_END_4_LAST_MSDU_LSB;
+	uint32_t d_RX_ATTENTION_0_MCAST_BCAST_MASK;
+	uint32_t d_RX_ATTENTION_0_MCAST_BCAST_LSB;
+	uint32_t d_RX_ATTENTION_0_FRAGMENT_MASK;
+	uint32_t d_RX_ATTENTION_0_FRAGMENT_LSB;
+	uint32_t d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
+	uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK;
+	uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB;
+	uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_MASK;
+	uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_LSB;
+	uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET;
+	uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_MASK;
+	uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_LSB;
+	uint32_t d_RX_MPDU_START_0_ENCRYPTED_MASK;
+	uint32_t d_RX_MPDU_START_0_ENCRYPTED_LSB;
+	uint32_t d_RX_ATTENTION_0_MORE_DATA_MASK;
+	uint32_t d_RX_ATTENTION_0_MSDU_DONE_MASK;
+	uint32_t d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK;
+	/* end */
+
+	/* PLL start */
+	uint32_t d_EFUSE_OFFSET;
+	uint32_t d_EFUSE_XTAL_SEL_MSB;
+	uint32_t d_EFUSE_XTAL_SEL_LSB;
+	uint32_t d_EFUSE_XTAL_SEL_MASK;
+	uint32_t d_BB_PLL_CONFIG_OFFSET;
+	uint32_t d_BB_PLL_CONFIG_OUTDIV_MSB;
+	uint32_t d_BB_PLL_CONFIG_OUTDIV_LSB;
+	uint32_t d_BB_PLL_CONFIG_OUTDIV_MASK;
+	uint32_t d_BB_PLL_CONFIG_FRAC_MSB;
+	uint32_t d_BB_PLL_CONFIG_FRAC_LSB;
+	uint32_t d_BB_PLL_CONFIG_FRAC_MASK;
+	uint32_t d_WLAN_PLL_SETTLE_TIME_MSB;
+	uint32_t d_WLAN_PLL_SETTLE_TIME_LSB;
+	uint32_t d_WLAN_PLL_SETTLE_TIME_MASK;
+	uint32_t d_WLAN_PLL_SETTLE_OFFSET;
+	uint32_t d_WLAN_PLL_SETTLE_SW_MASK;
+	uint32_t d_WLAN_PLL_SETTLE_RSTMASK;
+	uint32_t d_WLAN_PLL_SETTLE_RESET;
+	uint32_t d_WLAN_PLL_CONTROL_NOPWD_MSB;
+	uint32_t d_WLAN_PLL_CONTROL_NOPWD_LSB;
+	uint32_t d_WLAN_PLL_CONTROL_NOPWD_MASK;
+	uint32_t d_WLAN_PLL_CONTROL_BYPASS_MSB;
+	uint32_t d_WLAN_PLL_CONTROL_BYPASS_LSB;
+	uint32_t d_WLAN_PLL_CONTROL_BYPASS_MASK;
+	uint32_t d_WLAN_PLL_CONTROL_BYPASS_RESET;
+	uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MSB;
+	uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_LSB;
+	uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MASK;
+	uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_RESET;
+	uint32_t d_WLAN_PLL_CONTROL_REFDIV_MSB;
+	uint32_t d_WLAN_PLL_CONTROL_REFDIV_LSB;
+	uint32_t d_WLAN_PLL_CONTROL_REFDIV_MASK;
+	uint32_t d_WLAN_PLL_CONTROL_REFDIV_RESET;
+	uint32_t d_WLAN_PLL_CONTROL_DIV_MSB;
+	uint32_t d_WLAN_PLL_CONTROL_DIV_LSB;
+	uint32_t d_WLAN_PLL_CONTROL_DIV_MASK;
+	uint32_t d_WLAN_PLL_CONTROL_DIV_RESET;
+	uint32_t d_WLAN_PLL_CONTROL_OFFSET;
+	uint32_t d_WLAN_PLL_CONTROL_SW_MASK;
+	uint32_t d_WLAN_PLL_CONTROL_RSTMASK;
+	uint32_t d_WLAN_PLL_CONTROL_RESET;
+	uint32_t d_SOC_CORE_CLK_CTRL_OFFSET;
+	uint32_t d_SOC_CORE_CLK_CTRL_DIV_MSB;
+	uint32_t d_SOC_CORE_CLK_CTRL_DIV_LSB;
+	uint32_t d_SOC_CORE_CLK_CTRL_DIV_MASK;
+	uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MSB;
+	uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_LSB;
+	uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MASK;
+	uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_RESET;
+	uint32_t d_RTC_SYNC_STATUS_OFFSET;
+	uint32_t d_SOC_CPU_CLOCK_OFFSET;
+	uint32_t d_SOC_CPU_CLOCK_STANDARD_MSB;
+	uint32_t d_SOC_CPU_CLOCK_STANDARD_LSB;
+	uint32_t d_SOC_CPU_CLOCK_STANDARD_MASK;
+	/* PLL end */
+
+	uint32_t d_SOC_POWER_REG_OFFSET;
+	uint32_t d_SOC_RESET_CONTROL_ADDRESS;
+	uint32_t d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
+	uint32_t d_CPU_INTR_ADDRESS;
+	uint32_t d_SOC_LF_TIMER_CONTROL0_ADDRESS;
+	uint32_t d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
+
+	/* chip id start */
+	uint32_t d_SOC_CHIP_ID_ADDRESS;
+	uint32_t d_SOC_CHIP_ID_VERSION_MASK;
+	uint32_t d_SOC_CHIP_ID_VERSION_LSB;
+	uint32_t d_SOC_CHIP_ID_REVISION_MASK;
+	uint32_t d_SOC_CHIP_ID_REVISION_LSB;
+	/* chip id end */
+
+	uint32_t d_A_SOC_CORE_SCRATCH_0_ADDRESS;
+	uint32_t d_A_SOC_CORE_SCRATCH_1_ADDRESS;
+	uint32_t d_A_SOC_CORE_SCRATCH_2_ADDRESS;
+	uint32_t d_A_SOC_CORE_SCRATCH_3_ADDRESS;
+	uint32_t d_A_SOC_CORE_SCRATCH_4_ADDRESS;
+	uint32_t d_A_SOC_CORE_SCRATCH_5_ADDRESS;
+	uint32_t d_A_SOC_CORE_SCRATCH_6_ADDRESS;
+	uint32_t d_A_SOC_CORE_SCRATCH_7_ADDRESS;
+	uint32_t d_A_SOC_CORE_SPARE_0_REGISTER;
+	uint32_t d_A_SOC_CORE_SPARE_1_REGISTER;
+
+	uint32_t d_WLAN_DEBUG_INPUT_SEL_OFFSET;
+	uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MSB;
+	uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_LSB;
+	uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MASK;
+	uint32_t d_WLAN_DEBUG_CONTROL_OFFSET;
+	uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MSB;
+	uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_LSB;
+	uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MASK;
+	uint32_t d_WLAN_DEBUG_OUT_OFFSET;
+	uint32_t d_WLAN_DEBUG_OUT_DATA_MSB;
+	uint32_t d_WLAN_DEBUG_OUT_DATA_LSB;
+	uint32_t d_WLAN_DEBUG_OUT_DATA_MASK;
+	uint32_t d_AMBA_DEBUG_BUS_OFFSET;
+	uint32_t d_AMBA_DEBUG_BUS_SEL_MSB;
+	uint32_t d_AMBA_DEBUG_BUS_SEL_LSB;
+	uint32_t d_AMBA_DEBUG_BUS_SEL_MASK;
+
+#ifdef QCA_WIFI_3_0_ADRASTEA
+	uint32_t d_Q6_ENABLE_REGISTER_0;
+	uint32_t d_Q6_ENABLE_REGISTER_1;
+	uint32_t d_Q6_CAUSE_REGISTER_0;
+	uint32_t d_Q6_CAUSE_REGISTER_1;
+	uint32_t d_Q6_CLEAR_REGISTER_0;
+	uint32_t d_Q6_CLEAR_REGISTER_1;
+#endif
+};
+
+#define A_SOC_CORE_SPARE_0_REGISTER \
+	(scn->targetdef->d_A_SOC_CORE_SPARE_0_REGISTER)
+#define A_SOC_CORE_SCRATCH_0_ADDRESS  \
+	(scn->targetdef->d_A_SOC_CORE_SCRATCH_0_ADDRESS)
+#define A_SOC_CORE_SCRATCH_1_ADDRESS  \
+	(scn->targetdef->d_A_SOC_CORE_SCRATCH_1_ADDRESS)
+#define A_SOC_CORE_SCRATCH_2_ADDRESS  \
+	(scn->targetdef->d_A_SOC_CORE_SCRATCH_2_ADDRESS)
+#define A_SOC_CORE_SCRATCH_3_ADDRESS  \
+	(scn->targetdef->d_A_SOC_CORE_SCRATCH_3_ADDRESS)
+#define A_SOC_CORE_SCRATCH_4_ADDRESS  \
+	(scn->targetdef->d_A_SOC_CORE_SCRATCH_4_ADDRESS)
+#define A_SOC_CORE_SCRATCH_5_ADDRESS  \
+	(scn->targetdef->d_A_SOC_CORE_SCRATCH_5_ADDRESS)
+#define A_SOC_CORE_SCRATCH_6_ADDRESS  \
+	(scn->targetdef->d_A_SOC_CORE_SCRATCH_6_ADDRESS)
+#define A_SOC_CORE_SCRATCH_7_ADDRESS  \
+	(scn->targetdef->d_A_SOC_CORE_SCRATCH_7_ADDRESS)
+#define RTC_SOC_BASE_ADDRESS  (scn->targetdef->d_RTC_SOC_BASE_ADDRESS)
+#define RTC_WMAC_BASE_ADDRESS (scn->targetdef->d_RTC_WMAC_BASE_ADDRESS)
+#define SYSTEM_SLEEP_OFFSET   (scn->targetdef->d_SYSTEM_SLEEP_OFFSET)
+#define WLAN_SYSTEM_SLEEP_OFFSET \
+	(scn->targetdef->d_WLAN_SYSTEM_SLEEP_OFFSET)
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB \
+	(scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_LSB)
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK \
+	(scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_MASK)
+#define CLOCK_CONTROL_OFFSET (scn->targetdef->d_CLOCK_CONTROL_OFFSET)
+#define CLOCK_CONTROL_SI0_CLK_MASK \
+	(scn->targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK)
+#define RESET_CONTROL_OFFSET    (scn->targetdef->d_RESET_CONTROL_OFFSET)
+#define RESET_CONTROL_MBOX_RST_MASK \
+	(scn->targetdef->d_RESET_CONTROL_MBOX_RST_MASK)
+#define RESET_CONTROL_SI0_RST_MASK \
+	(scn->targetdef->d_RESET_CONTROL_SI0_RST_MASK)
+#define WLAN_RESET_CONTROL_OFFSET \
+	(scn->targetdef->d_WLAN_RESET_CONTROL_OFFSET)
+#define WLAN_RESET_CONTROL_COLD_RST_MASK \
+	(scn->targetdef->d_WLAN_RESET_CONTROL_COLD_RST_MASK)
+#define WLAN_RESET_CONTROL_WARM_RST_MASK \
+	(scn->targetdef->d_WLAN_RESET_CONTROL_WARM_RST_MASK)
+#define GPIO_BASE_ADDRESS       (scn->targetdef->d_GPIO_BASE_ADDRESS)
+#define GPIO_PIN0_OFFSET        (scn->targetdef->d_GPIO_PIN0_OFFSET)
+#define GPIO_PIN1_OFFSET        (scn->targetdef->d_GPIO_PIN1_OFFSET)
+#define GPIO_PIN0_CONFIG_MASK   (scn->targetdef->d_GPIO_PIN0_CONFIG_MASK)
+#define GPIO_PIN1_CONFIG_MASK   (scn->targetdef->d_GPIO_PIN1_CONFIG_MASK)
+#define A_SOC_CORE_SCRATCH_0    (scn->targetdef->d_A_SOC_CORE_SCRATCH_0)
+#define SI_CONFIG_BIDIR_OD_DATA_LSB \
+	(scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB)
+#define SI_CONFIG_BIDIR_OD_DATA_MASK \
+	(scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK)
+#define SI_CONFIG_I2C_LSB       (scn->targetdef->d_SI_CONFIG_I2C_LSB)
+#define SI_CONFIG_I2C_MASK \
+	(scn->targetdef->d_SI_CONFIG_I2C_MASK)
+#define SI_CONFIG_POS_SAMPLE_LSB \
+	(scn->targetdef->d_SI_CONFIG_POS_SAMPLE_LSB)
+#define SI_CONFIG_POS_SAMPLE_MASK \
+	(scn->targetdef->d_SI_CONFIG_POS_SAMPLE_MASK)
+#define SI_CONFIG_INACTIVE_CLK_LSB \
+	(scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB)
+#define SI_CONFIG_INACTIVE_CLK_MASK \
+	(scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK)
+#define SI_CONFIG_INACTIVE_DATA_LSB \
+	(scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB)
+#define SI_CONFIG_INACTIVE_DATA_MASK \
+	(scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK)
+#define SI_CONFIG_DIVIDER_LSB   (scn->targetdef->d_SI_CONFIG_DIVIDER_LSB)
+#define SI_CONFIG_DIVIDER_MASK  (scn->targetdef->d_SI_CONFIG_DIVIDER_MASK)
+#define SI_BASE_ADDRESS         (scn->targetdef->d_SI_BASE_ADDRESS)
+#define SI_CONFIG_OFFSET        (scn->targetdef->d_SI_CONFIG_OFFSET)
+#define SI_TX_DATA0_OFFSET      (scn->targetdef->d_SI_TX_DATA0_OFFSET)
+#define SI_TX_DATA1_OFFSET      (scn->targetdef->d_SI_TX_DATA1_OFFSET)
+#define SI_RX_DATA0_OFFSET      (scn->targetdef->d_SI_RX_DATA0_OFFSET)
+#define SI_RX_DATA1_OFFSET      (scn->targetdef->d_SI_RX_DATA1_OFFSET)
+#define SI_CS_OFFSET            (scn->targetdef->d_SI_CS_OFFSET)
+#define SI_CS_DONE_ERR_MASK     (scn->targetdef->d_SI_CS_DONE_ERR_MASK)
+#define SI_CS_DONE_INT_MASK     (scn->targetdef->d_SI_CS_DONE_INT_MASK)
+#define SI_CS_START_LSB         (scn->targetdef->d_SI_CS_START_LSB)
+#define SI_CS_START_MASK        (scn->targetdef->d_SI_CS_START_MASK)
+#define SI_CS_RX_CNT_LSB        (scn->targetdef->d_SI_CS_RX_CNT_LSB)
+#define SI_CS_RX_CNT_MASK       (scn->targetdef->d_SI_CS_RX_CNT_MASK)
+#define SI_CS_TX_CNT_LSB        (scn->targetdef->d_SI_CS_TX_CNT_LSB)
+#define SI_CS_TX_CNT_MASK       (scn->targetdef->d_SI_CS_TX_CNT_MASK)
+#define EEPROM_SZ               (scn->targetdef->d_BOARD_DATA_SZ)
+#define EEPROM_EXT_SZ           (scn->targetdef->d_BOARD_EXT_DATA_SZ)
+#define MBOX_BASE_ADDRESS       (scn->targetdef->d_MBOX_BASE_ADDRESS)
+#define LOCAL_SCRATCH_OFFSET    (scn->targetdef->d_LOCAL_SCRATCH_OFFSET)
+#define CPU_CLOCK_OFFSET        (scn->targetdef->d_CPU_CLOCK_OFFSET)
+#define LPO_CAL_OFFSET          (scn->targetdef->d_LPO_CAL_OFFSET)
+#define GPIO_PIN10_OFFSET       (scn->targetdef->d_GPIO_PIN10_OFFSET)
+#define GPIO_PIN11_OFFSET       (scn->targetdef->d_GPIO_PIN11_OFFSET)
+#define GPIO_PIN12_OFFSET       (scn->targetdef->d_GPIO_PIN12_OFFSET)
+#define GPIO_PIN13_OFFSET       (scn->targetdef->d_GPIO_PIN13_OFFSET)
+#define CLOCK_GPIO_OFFSET       (scn->targetdef->d_CLOCK_GPIO_OFFSET)
+#define CPU_CLOCK_STANDARD_LSB  (scn->targetdef->d_CPU_CLOCK_STANDARD_LSB)
+#define CPU_CLOCK_STANDARD_MASK (scn->targetdef->d_CPU_CLOCK_STANDARD_MASK)
+#define LPO_CAL_ENABLE_LSB      (scn->targetdef->d_LPO_CAL_ENABLE_LSB)
+#define LPO_CAL_ENABLE_MASK     (scn->targetdef->d_LPO_CAL_ENABLE_MASK)
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB \
+	(scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB)
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK \
+	(scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK)
+#define ANALOG_INTF_BASE_ADDRESS (scn->targetdef->d_ANALOG_INTF_BASE_ADDRESS)
+#define WLAN_MAC_BASE_ADDRESS    (scn->targetdef->d_WLAN_MAC_BASE_ADDRESS)
+#define FW_INDICATOR_ADDRESS     (scn->targetdef->d_FW_INDICATOR_ADDRESS)
+#define DRAM_BASE_ADDRESS        (scn->targetdef->d_DRAM_BASE_ADDRESS)
+#define SOC_CORE_BASE_ADDRESS    (scn->targetdef->d_SOC_CORE_BASE_ADDRESS)
+#define CORE_CTRL_ADDRESS        (scn->targetdef->d_CORE_CTRL_ADDRESS)
+#define CORE_CTRL_CPU_INTR_MASK  (scn->targetdef->d_CORE_CTRL_CPU_INTR_MASK)
+#define SOC_RESET_CONTROL_ADDRESS  (scn->targetdef->d_SOC_RESET_CONTROL_ADDRESS)
+#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK \
+	(scn->targetdef->d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK)
+#define CPU_INTR_ADDRESS        (scn->targetdef->d_CPU_INTR_ADDRESS)
+#define SOC_LF_TIMER_CONTROL0_ADDRESS \
+	(scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ADDRESS)
+#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK \
+	(scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK)
+
+
+#define CHIP_ID_ADDRESS           (scn->targetdef->d_SOC_CHIP_ID_ADDRESS)
+#define SOC_CHIP_ID_REVISION_MASK (scn->targetdef->d_SOC_CHIP_ID_REVISION_MASK)
+#define SOC_CHIP_ID_REVISION_LSB  (scn->targetdef->d_SOC_CHIP_ID_REVISION_LSB)
+#define SOC_CHIP_ID_VERSION_MASK  (scn->targetdef->d_SOC_CHIP_ID_VERSION_MASK)
+#define SOC_CHIP_ID_VERSION_LSB   (scn->targetdef->d_SOC_CHIP_ID_VERSION_LSB)
+#define CHIP_ID_REVISION_GET(x) \
+	(((x) & SOC_CHIP_ID_REVISION_MASK) >> SOC_CHIP_ID_REVISION_LSB)
+#define CHIP_ID_VERSION_GET(x) \
+	(((x) & SOC_CHIP_ID_VERSION_MASK) >> SOC_CHIP_ID_VERSION_LSB)
+
+/* misc */
+#define SR_WR_INDEX_ADDRESS     (scn->targetdef->d_SR_WR_INDEX_ADDRESS)
+#define DST_WATERMARK_ADDRESS   (scn->targetdef->d_DST_WATERMARK_ADDRESS)
+#define SOC_POWER_REG_OFFSET    (scn->targetdef->d_SOC_POWER_REG_OFFSET)
+/* end */
+
+/* htt_rx.c */
+#define RX_MSDU_END_4_FIRST_MSDU_MASK \
+	(pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_MASK)
+#define RX_MSDU_END_4_FIRST_MSDU_LSB \
+	(pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_LSB)
+#define RX_MPDU_START_0_RETRY_LSB  \
+	(pdev->targetdef->d_RX_MPDU_START_0_RETRY_LSB)
+#define RX_MPDU_START_0_RETRY_MASK  \
+	(pdev->targetdef->d_RX_MPDU_START_0_RETRY_MASK)
+#define RX_MPDU_START_0_SEQ_NUM_MASK \
+	(pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_MASK)
+#define RX_MPDU_START_0_SEQ_NUM_LSB \
+	(pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_LSB)
+#define RX_MPDU_START_2_PN_47_32_LSB \
+	(pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_LSB)
+#define RX_MPDU_START_2_PN_47_32_MASK \
+	(pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_MASK)
+#define RX_MPDU_START_2_TID_LSB  \
+	(pdev->targetdef->d_RX_MPDU_START_2_TID_LSB)
+#define RX_MPDU_START_2_TID_MASK  \
+	(pdev->targetdef->d_RX_MPDU_START_2_TID_MASK)
+#define RX_MSDU_END_1_KEY_ID_OCT_MASK \
+	(pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_MASK)
+#define RX_MSDU_END_1_KEY_ID_OCT_LSB \
+	(pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_LSB)
+#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK \
+	(pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK)
+#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB \
+	(pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB)
+#define RX_MSDU_END_4_LAST_MSDU_MASK \
+	(pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_MASK)
+#define RX_MSDU_END_4_LAST_MSDU_LSB \
+	(pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_LSB)
+#define RX_ATTENTION_0_MCAST_BCAST_MASK \
+	(pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_MASK)
+#define RX_ATTENTION_0_MCAST_BCAST_LSB \
+	(pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_LSB)
+#define RX_ATTENTION_0_FRAGMENT_MASK \
+	(pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_MASK)
+#define RX_ATTENTION_0_FRAGMENT_LSB \
+	(pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_LSB)
+#define RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK \
+	(pdev->targetdef->d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK)
+#define RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK \
+	(pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK)
+#define RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB \
+	(pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB)
+#define RX_MSDU_START_0_MSDU_LENGTH_MASK \
+	(pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_MASK)
+#define RX_MSDU_START_0_MSDU_LENGTH_LSB \
+	(pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_LSB)
+#define RX_MSDU_START_2_DECAP_FORMAT_OFFSET \
+	(pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET)
+#define RX_MSDU_START_2_DECAP_FORMAT_MASK \
+	(pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_MASK)
+#define RX_MSDU_START_2_DECAP_FORMAT_LSB \
+	(pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_LSB)
+#define RX_MPDU_START_0_ENCRYPTED_MASK \
+	(pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_MASK)
+#define RX_MPDU_START_0_ENCRYPTED_LSB \
+	(pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_LSB)
+#define RX_ATTENTION_0_MORE_DATA_MASK \
+	(pdev->targetdef->d_RX_ATTENTION_0_MORE_DATA_MASK)
+#define RX_ATTENTION_0_MSDU_DONE_MASK \
+	(pdev->targetdef->d_RX_ATTENTION_0_MSDU_DONE_MASK)
+#define RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK \
+	(pdev->targetdef->d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK)
+/* end */
+
+/* copy_engine.c */
+/* end */
+/* PLL start */
+#define EFUSE_OFFSET              (scn->targetdef->d_EFUSE_OFFSET)
+#define EFUSE_XTAL_SEL_MSB        (scn->targetdef->d_EFUSE_XTAL_SEL_MSB)
+#define EFUSE_XTAL_SEL_LSB        (scn->targetdef->d_EFUSE_XTAL_SEL_LSB)
+#define EFUSE_XTAL_SEL_MASK       (scn->targetdef->d_EFUSE_XTAL_SEL_MASK)
+#define BB_PLL_CONFIG_OFFSET      (scn->targetdef->d_BB_PLL_CONFIG_OFFSET)
+#define BB_PLL_CONFIG_OUTDIV_MSB  (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MSB)
+#define BB_PLL_CONFIG_OUTDIV_LSB  (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_LSB)
+#define BB_PLL_CONFIG_OUTDIV_MASK (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MASK)
+#define BB_PLL_CONFIG_FRAC_MSB    (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MSB)
+#define BB_PLL_CONFIG_FRAC_LSB    (scn->targetdef->d_BB_PLL_CONFIG_FRAC_LSB)
+#define BB_PLL_CONFIG_FRAC_MASK   (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MASK)
+#define WLAN_PLL_SETTLE_TIME_MSB  (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MSB)
+#define WLAN_PLL_SETTLE_TIME_LSB  (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_LSB)
+#define WLAN_PLL_SETTLE_TIME_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MASK)
+#define WLAN_PLL_SETTLE_OFFSET    (scn->targetdef->d_WLAN_PLL_SETTLE_OFFSET)
+#define WLAN_PLL_SETTLE_SW_MASK   (scn->targetdef->d_WLAN_PLL_SETTLE_SW_MASK)
+#define WLAN_PLL_SETTLE_RSTMASK   (scn->targetdef->d_WLAN_PLL_SETTLE_RSTMASK)
+#define WLAN_PLL_SETTLE_RESET     (scn->targetdef->d_WLAN_PLL_SETTLE_RESET)
+#define WLAN_PLL_CONTROL_NOPWD_MSB  \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MSB)
+#define WLAN_PLL_CONTROL_NOPWD_LSB  \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_LSB)
+#define WLAN_PLL_CONTROL_NOPWD_MASK \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MASK)
+#define WLAN_PLL_CONTROL_BYPASS_MSB \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MSB)
+#define WLAN_PLL_CONTROL_BYPASS_LSB \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_LSB)
+#define WLAN_PLL_CONTROL_BYPASS_MASK \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MASK)
+#define WLAN_PLL_CONTROL_BYPASS_RESET \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_RESET)
+#define WLAN_PLL_CONTROL_CLK_SEL_MSB \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MSB)
+#define WLAN_PLL_CONTROL_CLK_SEL_LSB \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_LSB)
+#define WLAN_PLL_CONTROL_CLK_SEL_MASK \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MASK)
+#define WLAN_PLL_CONTROL_CLK_SEL_RESET \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_RESET)
+#define WLAN_PLL_CONTROL_REFDIV_MSB \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MSB)
+#define WLAN_PLL_CONTROL_REFDIV_LSB \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_LSB)
+#define WLAN_PLL_CONTROL_REFDIV_MASK \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MASK)
+#define WLAN_PLL_CONTROL_REFDIV_RESET \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_RESET)
+#define WLAN_PLL_CONTROL_DIV_MSB   (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MSB)
+#define WLAN_PLL_CONTROL_DIV_LSB   (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_LSB)
+#define WLAN_PLL_CONTROL_DIV_MASK  (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MASK)
+#define WLAN_PLL_CONTROL_DIV_RESET \
+	(scn->targetdef->d_WLAN_PLL_CONTROL_DIV_RESET)
+#define WLAN_PLL_CONTROL_OFFSET    (scn->targetdef->d_WLAN_PLL_CONTROL_OFFSET)
+#define WLAN_PLL_CONTROL_SW_MASK   (scn->targetdef->d_WLAN_PLL_CONTROL_SW_MASK)
+#define WLAN_PLL_CONTROL_RSTMASK   (scn->targetdef->d_WLAN_PLL_CONTROL_RSTMASK)
+#define WLAN_PLL_CONTROL_RESET     (scn->targetdef->d_WLAN_PLL_CONTROL_RESET)
+#define SOC_CORE_CLK_CTRL_OFFSET   (scn->targetdef->d_SOC_CORE_CLK_CTRL_OFFSET)
+#define SOC_CORE_CLK_CTRL_DIV_MSB  (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MSB)
+#define SOC_CORE_CLK_CTRL_DIV_LSB  (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_LSB)
+#define SOC_CORE_CLK_CTRL_DIV_MASK \
+	(scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MASK)
+#define RTC_SYNC_STATUS_PLL_CHANGING_MSB \
+	(scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MSB)
+#define RTC_SYNC_STATUS_PLL_CHANGING_LSB \
+	(scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_LSB)
+#define RTC_SYNC_STATUS_PLL_CHANGING_MASK \
+	(scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MASK)
+#define RTC_SYNC_STATUS_PLL_CHANGING_RESET \
+	(scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_RESET)
+#define RTC_SYNC_STATUS_OFFSET      (scn->targetdef->d_RTC_SYNC_STATUS_OFFSET)
+#define SOC_CPU_CLOCK_OFFSET        (scn->targetdef->d_SOC_CPU_CLOCK_OFFSET)
+#define SOC_CPU_CLOCK_STANDARD_MSB \
+	(scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MSB)
+#define SOC_CPU_CLOCK_STANDARD_LSB \
+	(scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_LSB)
+#define SOC_CPU_CLOCK_STANDARD_MASK \
+	(scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MASK)
+/* PLL end */
+
+/* SET macros */
+#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) \
+	(((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & \
+	    WLAN_SYSTEM_SLEEP_DISABLE_MASK)
+#define SI_CONFIG_BIDIR_OD_DATA_SET(x) \
+	(((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK)
+#define SI_CONFIG_I2C_SET(x)  (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK)
+#define SI_CONFIG_POS_SAMPLE_SET(x) \
+	(((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK)
+#define SI_CONFIG_INACTIVE_CLK_SET(x) \
+	(((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK)
+#define SI_CONFIG_INACTIVE_DATA_SET(x) \
+	(((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK)
+#define SI_CONFIG_DIVIDER_SET(x) \
+	(((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK)
+#define SI_CS_START_SET(x)  (((x) << SI_CS_START_LSB) & SI_CS_START_MASK)
+#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK)
+#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK)
+#define LPO_CAL_ENABLE_SET(x) \
+	(((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK)
+#define CPU_CLOCK_STANDARD_SET(x) \
+	(((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK)
+#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) \
+	(((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & CLOCK_GPIO_BT_CLK_OUT_EN_MASK)
+/* copy_engine.c */
+/* end */
+/* PLL start */
+#define EFUSE_XTAL_SEL_GET(x) \
+	(((x) & EFUSE_XTAL_SEL_MASK) >> EFUSE_XTAL_SEL_LSB)
+#define EFUSE_XTAL_SEL_SET(x) \
+	(((x) << EFUSE_XTAL_SEL_LSB) & EFUSE_XTAL_SEL_MASK)
+#define BB_PLL_CONFIG_OUTDIV_GET(x) \
+	(((x) & BB_PLL_CONFIG_OUTDIV_MASK) >> BB_PLL_CONFIG_OUTDIV_LSB)
+#define BB_PLL_CONFIG_OUTDIV_SET(x) \
+	(((x) << BB_PLL_CONFIG_OUTDIV_LSB) & BB_PLL_CONFIG_OUTDIV_MASK)
+#define BB_PLL_CONFIG_FRAC_GET(x) \
+	(((x) & BB_PLL_CONFIG_FRAC_MASK) >> BB_PLL_CONFIG_FRAC_LSB)
+#define BB_PLL_CONFIG_FRAC_SET(x) \
+	(((x) << BB_PLL_CONFIG_FRAC_LSB) & BB_PLL_CONFIG_FRAC_MASK)
+#define WLAN_PLL_SETTLE_TIME_GET(x) \
+	(((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB)
+#define WLAN_PLL_SETTLE_TIME_SET(x) \
+	(((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK)
+#define WLAN_PLL_CONTROL_NOPWD_GET(x) \
+	(((x) & WLAN_PLL_CONTROL_NOPWD_MASK) >> WLAN_PLL_CONTROL_NOPWD_LSB)
+#define WLAN_PLL_CONTROL_NOPWD_SET(x) \
+	(((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & WLAN_PLL_CONTROL_NOPWD_MASK)
+#define WLAN_PLL_CONTROL_BYPASS_GET(x) \
+	(((x) & WLAN_PLL_CONTROL_BYPASS_MASK) >> WLAN_PLL_CONTROL_BYPASS_LSB)
+#define WLAN_PLL_CONTROL_BYPASS_SET(x) \
+	(((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & WLAN_PLL_CONTROL_BYPASS_MASK)
+#define WLAN_PLL_CONTROL_CLK_SEL_GET(x) \
+	(((x) & WLAN_PLL_CONTROL_CLK_SEL_MASK) >> WLAN_PLL_CONTROL_CLK_SEL_LSB)
+#define WLAN_PLL_CONTROL_CLK_SEL_SET(x) \
+	(((x) << WLAN_PLL_CONTROL_CLK_SEL_LSB) & WLAN_PLL_CONTROL_CLK_SEL_MASK)
+#define WLAN_PLL_CONTROL_REFDIV_GET(x) \
+	(((x) & WLAN_PLL_CONTROL_REFDIV_MASK) >> WLAN_PLL_CONTROL_REFDIV_LSB)
+#define WLAN_PLL_CONTROL_REFDIV_SET(x) \
+	(((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & WLAN_PLL_CONTROL_REFDIV_MASK)
+#define WLAN_PLL_CONTROL_DIV_GET(x) \
+	(((x) & WLAN_PLL_CONTROL_DIV_MASK) >> WLAN_PLL_CONTROL_DIV_LSB)
+#define WLAN_PLL_CONTROL_DIV_SET(x) \
+	(((x) << WLAN_PLL_CONTROL_DIV_LSB) & WLAN_PLL_CONTROL_DIV_MASK)
+#define SOC_CORE_CLK_CTRL_DIV_GET(x) \
+	(((x) & SOC_CORE_CLK_CTRL_DIV_MASK) >> SOC_CORE_CLK_CTRL_DIV_LSB)
+#define SOC_CORE_CLK_CTRL_DIV_SET(x) \
+	(((x) << SOC_CORE_CLK_CTRL_DIV_LSB) & SOC_CORE_CLK_CTRL_DIV_MASK)
+#define RTC_SYNC_STATUS_PLL_CHANGING_GET(x) \
+	(((x) & RTC_SYNC_STATUS_PLL_CHANGING_MASK) >> \
+		RTC_SYNC_STATUS_PLL_CHANGING_LSB)
+#define RTC_SYNC_STATUS_PLL_CHANGING_SET(x) \
+	(((x) << RTC_SYNC_STATUS_PLL_CHANGING_LSB) & \
+		RTC_SYNC_STATUS_PLL_CHANGING_MASK)
+#define SOC_CPU_CLOCK_STANDARD_GET(x) \
+	(((x) & SOC_CPU_CLOCK_STANDARD_MASK) >> SOC_CPU_CLOCK_STANDARD_LSB)
+#define SOC_CPU_CLOCK_STANDARD_SET(x) \
+	(((x) << SOC_CPU_CLOCK_STANDARD_LSB) & SOC_CPU_CLOCK_STANDARD_MASK)
+/* PLL end */
+
+#ifdef QCA_WIFI_3_0_ADRASTEA
+#define Q6_ENABLE_REGISTER_0 \
+	(scn->targetdef->d_Q6_ENABLE_REGISTER_0)
+#define Q6_ENABLE_REGISTER_1 \
+	(scn->targetdef->d_Q6_ENABLE_REGISTER_1)
+#define Q6_CAUSE_REGISTER_0 \
+	(scn->targetdef->d_Q6_CAUSE_REGISTER_0)
+#define Q6_CAUSE_REGISTER_1 \
+	(scn->targetdef->d_Q6_CAUSE_REGISTER_1)
+#define Q6_CLEAR_REGISTER_0 \
+	(scn->targetdef->d_Q6_CLEAR_REGISTER_0)
+#define Q6_CLEAR_REGISTER_1 \
+	(scn->targetdef->d_Q6_CLEAR_REGISTER_1)
+#endif
+
+struct hostdef_s {
+	uint32_t d_INT_STATUS_ENABLE_ERROR_LSB;
+	uint32_t d_INT_STATUS_ENABLE_ERROR_MASK;
+	uint32_t d_INT_STATUS_ENABLE_CPU_LSB;
+	uint32_t d_INT_STATUS_ENABLE_CPU_MASK;
+	uint32_t d_INT_STATUS_ENABLE_COUNTER_LSB;
+	uint32_t d_INT_STATUS_ENABLE_COUNTER_MASK;
+	uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_LSB;
+	uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_MASK;
+	uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB;
+	uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK;
+	uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB;
+	uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK;
+	uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_LSB;
+	uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_MASK;
+	uint32_t d_INT_STATUS_ENABLE_ADDRESS;
+	uint32_t d_CPU_INT_STATUS_ENABLE_BIT_LSB;
+	uint32_t d_CPU_INT_STATUS_ENABLE_BIT_MASK;
+	uint32_t d_HOST_INT_STATUS_ADDRESS;
+	uint32_t d_CPU_INT_STATUS_ADDRESS;
+	uint32_t d_ERROR_INT_STATUS_ADDRESS;
+	uint32_t d_ERROR_INT_STATUS_WAKEUP_MASK;
+	uint32_t d_ERROR_INT_STATUS_WAKEUP_LSB;
+	uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK;
+	uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB;
+	uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_MASK;
+	uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_LSB;
+	uint32_t d_COUNT_DEC_ADDRESS;
+	uint32_t d_HOST_INT_STATUS_CPU_MASK;
+	uint32_t d_HOST_INT_STATUS_CPU_LSB;
+	uint32_t d_HOST_INT_STATUS_ERROR_MASK;
+	uint32_t d_HOST_INT_STATUS_ERROR_LSB;
+	uint32_t d_HOST_INT_STATUS_COUNTER_MASK;
+	uint32_t d_HOST_INT_STATUS_COUNTER_LSB;
+	uint32_t d_RX_LOOKAHEAD_VALID_ADDRESS;
+	uint32_t d_WINDOW_DATA_ADDRESS;
+	uint32_t d_WINDOW_READ_ADDR_ADDRESS;
+	uint32_t d_WINDOW_WRITE_ADDR_ADDRESS;
+	uint32_t d_SOC_GLOBAL_RESET_ADDRESS;
+	uint32_t d_RTC_STATE_ADDRESS;
+	uint32_t d_RTC_STATE_COLD_RESET_MASK;
+	uint32_t d_RTC_STATE_V_MASK;
+	uint32_t d_RTC_STATE_V_LSB;
+	uint32_t d_FW_IND_EVENT_PENDING;
+	uint32_t d_FW_IND_INITIALIZED;
+	uint32_t d_FW_IND_HELPER;
+	uint32_t d_RTC_STATE_V_ON;
+#if defined(SDIO_3_0)
+	uint32_t d_HOST_INT_STATUS_MBOX_DATA_MASK;
+	uint32_t d_HOST_INT_STATUS_MBOX_DATA_LSB;
+#endif
+	uint32_t d_MSI_MAGIC_ADR_ADDRESS;
+	uint32_t d_MSI_MAGIC_ADDRESS;
+	uint32_t d_ENABLE_MSI;
+	uint32_t d_MUX_ID_MASK;
+	uint32_t d_TRANSACTION_ID_MASK;
+	uint32_t d_DESC_DATA_FLAG_MASK;
+};
+#define DESC_DATA_FLAG_MASK        (scn->hostdef->d_DESC_DATA_FLAG_MASK)
+#define MUX_ID_MASK                (scn->hostdef->d_MUX_ID_MASK)
+#define TRANSACTION_ID_MASK        (scn->hostdef->d_TRANSACTION_ID_MASK)
+#define ENABLE_MSI                 (scn->hostdef->d_ENABLE_MSI)
+#define INT_STATUS_ENABLE_ERROR_LSB \
+	(scn->hostdef->d_INT_STATUS_ENABLE_ERROR_LSB)
+#define INT_STATUS_ENABLE_ERROR_MASK \
+	(scn->hostdef->d_INT_STATUS_ENABLE_ERROR_MASK)
+#define INT_STATUS_ENABLE_CPU_LSB  (scn->hostdef->d_INT_STATUS_ENABLE_CPU_LSB)
+#define INT_STATUS_ENABLE_CPU_MASK (scn->hostdef->d_INT_STATUS_ENABLE_CPU_MASK)
+#define INT_STATUS_ENABLE_COUNTER_LSB \
+	(scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_LSB)
+#define INT_STATUS_ENABLE_COUNTER_MASK \
+	(scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_MASK)
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB \
+	(scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_LSB)
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK \
+	(scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_MASK)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB \
+	(scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK \
+	(scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB \
+	(scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK \
+	(scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK)
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB \
+	(scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_LSB)
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK \
+	(scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_MASK)
+#define INT_STATUS_ENABLE_ADDRESS \
+	(scn->hostdef->d_INT_STATUS_ENABLE_ADDRESS)
+#define CPU_INT_STATUS_ENABLE_BIT_LSB \
+	(scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_LSB)
+#define CPU_INT_STATUS_ENABLE_BIT_MASK \
+	(scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_MASK)
+#define HOST_INT_STATUS_ADDRESS     (scn->hostdef->d_HOST_INT_STATUS_ADDRESS)
+#define CPU_INT_STATUS_ADDRESS      (scn->hostdef->d_CPU_INT_STATUS_ADDRESS)
+#define ERROR_INT_STATUS_ADDRESS    (scn->hostdef->d_ERROR_INT_STATUS_ADDRESS)
+#define ERROR_INT_STATUS_WAKEUP_MASK \
+	(scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_MASK)
+#define ERROR_INT_STATUS_WAKEUP_LSB \
+	(scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_LSB)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK \
+	(scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB \
+	(scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB)
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK \
+	(scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_MASK)
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB \
+	(scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_LSB)
+#define COUNT_DEC_ADDRESS          (scn->hostdef->d_COUNT_DEC_ADDRESS)
+#define HOST_INT_STATUS_CPU_MASK   (scn->hostdef->d_HOST_INT_STATUS_CPU_MASK)
+#define HOST_INT_STATUS_CPU_LSB    (scn->hostdef->d_HOST_INT_STATUS_CPU_LSB)
+#define HOST_INT_STATUS_ERROR_MASK (scn->hostdef->d_HOST_INT_STATUS_ERROR_MASK)
+#define HOST_INT_STATUS_ERROR_LSB  (scn->hostdef->d_HOST_INT_STATUS_ERROR_LSB)
+#define HOST_INT_STATUS_COUNTER_MASK \
+	(scn->hostdef->d_HOST_INT_STATUS_COUNTER_MASK)
+#define HOST_INT_STATUS_COUNTER_LSB \
+	(scn->hostdef->d_HOST_INT_STATUS_COUNTER_LSB)
+#define RX_LOOKAHEAD_VALID_ADDRESS (scn->hostdef->d_RX_LOOKAHEAD_VALID_ADDRESS)
+#define WINDOW_DATA_ADDRESS        (scn->hostdef->d_WINDOW_DATA_ADDRESS)
+#define WINDOW_READ_ADDR_ADDRESS   (scn->hostdef->d_WINDOW_READ_ADDR_ADDRESS)
+#define WINDOW_WRITE_ADDR_ADDRESS  (scn->hostdef->d_WINDOW_WRITE_ADDR_ADDRESS)
+#define SOC_GLOBAL_RESET_ADDRESS   (scn->hostdef->d_SOC_GLOBAL_RESET_ADDRESS)
+#define RTC_STATE_ADDRESS          (scn->hostdef->d_RTC_STATE_ADDRESS)
+#define RTC_STATE_COLD_RESET_MASK  (scn->hostdef->d_RTC_STATE_COLD_RESET_MASK)
+#define RTC_STATE_V_MASK           (scn->hostdef->d_RTC_STATE_V_MASK)
+#define RTC_STATE_V_LSB            (scn->hostdef->d_RTC_STATE_V_LSB)
+#define FW_IND_EVENT_PENDING       (scn->hostdef->d_FW_IND_EVENT_PENDING)
+#define FW_IND_INITIALIZED         (scn->hostdef->d_FW_IND_INITIALIZED)
+#define FW_IND_HELPER              (scn->hostdef->d_FW_IND_HELPER)
+#define RTC_STATE_V_ON             (scn->hostdef->d_RTC_STATE_V_ON)
+#if defined(SDIO_3_0)
+#define HOST_INT_STATUS_MBOX_DATA_MASK \
+	(scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_MASK)
+#define HOST_INT_STATUS_MBOX_DATA_LSB \
+	(scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_LSB)
+#endif
+
+#if !defined(MSI_MAGIC_ADR_ADDRESS)
+#define MSI_MAGIC_ADR_ADDRESS 0
+#define MSI_MAGIC_ADDRESS 0
+#endif
+
+/* SET/GET macros */
+#define INT_STATUS_ENABLE_ERROR_SET(x) \
+	(((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK)
+#define INT_STATUS_ENABLE_CPU_SET(x) \
+	(((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK)
+#define INT_STATUS_ENABLE_COUNTER_SET(x) \
+	(((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \
+		INT_STATUS_ENABLE_COUNTER_MASK)
+#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \
+	(((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \
+	 INT_STATUS_ENABLE_MBOX_DATA_MASK)
+#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \
+	(((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \
+		CPU_INT_STATUS_ENABLE_BIT_MASK)
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \
+	(((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \
+		ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK)
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) \
+	(((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \
+		ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK)
+#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \
+	(((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \
+		COUNTER_INT_STATUS_ENABLE_BIT_MASK)
+#define ERROR_INT_STATUS_WAKEUP_GET(x) \
+	(((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> \
+		ERROR_INT_STATUS_WAKEUP_LSB)
+#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \
+	(((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \
+		ERROR_INT_STATUS_RX_UNDERFLOW_LSB)
+#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \
+	(((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \
+		ERROR_INT_STATUS_TX_OVERFLOW_LSB)
+#define HOST_INT_STATUS_CPU_GET(x) \
+	(((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB)
+#define HOST_INT_STATUS_ERROR_GET(x) \
+	(((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB)
+#define HOST_INT_STATUS_COUNTER_GET(x) \
+	(((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB)
+#define RTC_STATE_V_GET(x) \
+	(((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+#if defined(SDIO_3_0)
+#define HOST_INT_STATUS_MBOX_DATA_GET(x) \
+	(((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \
+		HOST_INT_STATUS_MBOX_DATA_LSB)
+#endif
+
+#define INVALID_REG_LOC_DUMMY_DATA 0xAA
+
+#define AR6320_CORE_CLK_DIV_ADDR        0x403fa8
+#define AR6320_CPU_PLL_INIT_DONE_ADDR   0x403fd0
+#define AR6320_CPU_SPEED_ADDR           0x403fa4
+#define AR6320V2_CORE_CLK_DIV_ADDR      0x403fd8
+#define AR6320V2_CPU_PLL_INIT_DONE_ADDR 0x403fd0
+#define AR6320V2_CPU_SPEED_ADDR         0x403fd4
+#define AR6320V3_CORE_CLK_DIV_ADDR      0x404028
+#define AR6320V3_CPU_PLL_INIT_DONE_ADDR 0x404020
+#define AR6320V3_CPU_SPEED_ADDR         0x404024
+
+typedef enum {
+	SOC_REFCLK_UNKNOWN = -1, /* Unsupported ref clock -- use PLL Bypass */
+	SOC_REFCLK_48_MHZ = 0,
+	SOC_REFCLK_19_2_MHZ = 1,
+	SOC_REFCLK_24_MHZ = 2,
+	SOC_REFCLK_26_MHZ = 3,
+	SOC_REFCLK_37_4_MHZ = 4,
+	SOC_REFCLK_38_4_MHZ = 5,
+	SOC_REFCLK_40_MHZ = 6,
+	SOC_REFCLK_52_MHZ = 7,
+} A_refclk_speed_t;
+
+#define A_REFCLK_UNKNOWN    SOC_REFCLK_UNKNOWN
+#define A_REFCLK_48_MHZ     SOC_REFCLK_48_MHZ
+#define A_REFCLK_19_2_MHZ   SOC_REFCLK_19_2_MHZ
+#define A_REFCLK_24_MHZ     SOC_REFCLK_24_MHZ
+#define A_REFCLK_26_MHZ     SOC_REFCLK_26_MHZ
+#define A_REFCLK_37_4_MHZ   SOC_REFCLK_37_4_MHZ
+#define A_REFCLK_38_4_MHZ   SOC_REFCLK_38_4_MHZ
+#define A_REFCLK_40_MHZ     SOC_REFCLK_40_MHZ
+#define A_REFCLK_52_MHZ     SOC_REFCLK_52_MHZ
+
+#define TARGET_CPU_FREQ 176000000
+
+struct wlan_pll_s {
+	uint32_t refdiv;
+	uint32_t div;
+	uint32_t rnfrac;
+	uint32_t outdiv;
+};
+
+struct cmnos_clock_s {
+	A_refclk_speed_t refclk_speed;
+	uint32_t refclk_hz;
+	uint32_t pll_settling_time;     /* 50us */
+	struct wlan_pll_s wlan_pll;
+};
+
+typedef struct TGT_REG_SECTION {
+	uint32_t start_addr;
+	uint32_t end_addr;
+} tgt_reg_section;
+
+
+typedef struct TGT_REG_TABLE {
+	tgt_reg_section *section;
+	uint32_t section_size;
+} tgt_reg_table;
+#endif /* _REGTABLE_SDIO_H_ */

+ 4 - 0
qdf/inc/qdf_status.h

@@ -60,8 +60,10 @@
  * @QDF_STATUS_E_ENXIO: No such device or address
  * @QDF_STATUS_E_NETDOWN: network is down
  * @QDF_STATUS_E_IO: I/O Error
+ * @QDF_STATUS_E_PENDING: pending status
  * @QDF_STATUS_E_NETRESET: Network dropped connection because of reset
  * @QDF_STATUS_E_SIG: Exit due to received SIGINT
+ * @QDF_STATUS_E_PROTO: protocol error
  * @QDF_STATUS_NOT_INITIALIZED: resource not initialized
  * @QDF_STATUS_E_NULL_VALUE: request is null
  * @QDF_STATUS_PMC_PENDING: request pendign in pmc
@@ -103,8 +105,10 @@ typedef enum {
 	QDF_STATUS_E_ENXIO,
 	QDF_STATUS_E_NETDOWN,
 	QDF_STATUS_E_IO,
+	QDF_STATUS_E_PENDING,
 	QDF_STATUS_E_NETRESET,
 	QDF_STATUS_E_SIG,
+	QDF_STATUS_E_PROTO,
 	QDF_STATUS_NOT_INITIALIZED,
 	QDF_STATUS_E_NULL_VALUE,
 	QDF_STATUS_PMC_PENDING,

+ 2 - 1
qdf/linux/src/i_qdf_types.h

@@ -160,7 +160,8 @@ enum qdf_bus_type {
 	QDF_BUS_TYPE_PCI = 0,
 	QDF_BUS_TYPE_AHB,
 	QDF_BUS_TYPE_SNOC,
-	QDF_BUS_TYPE_SIM
+	QDF_BUS_TYPE_SIM,
+	QDF_BUS_TYPE_SDIO
 };
 
 /**