浏览代码

qcacld-3.0: dp: change legacy data path api to cdp api

Remove legacy apis call from out side of data path.
Replace legacy apis to cdp apis.
Introduce cdp apis wrappers.
dp module.

Change-Id: I345abb70b6ddd7f5768cea2d933e0023c5742b4a
CRs-fixed: 1075736
Leo Chang 8 年之前
父节点
当前提交
9872676b91

+ 7 - 0
core/dp/htt/htt_internal.h

@@ -132,6 +132,13 @@ struct htt_host_rx_desc_base {
 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.map_index)
 
 #define HTT_RX_RING_BUFF_DBG_LIST          1024
+
+#ifdef MSM_PLATFORM
+#define HTT_ADDRESS_MASK   0xfffffffffffffffe
+#else
+#define HTT_ADDRESS_MASK   0xfffffffe
+#endif /* MSM_PLATFORM */
+
 struct rx_buf_debug {
 	qdf_dma_addr_t paddr;
 	qdf_nbuf_t     nbuf;

+ 2 - 2
core/dp/htt/htt_t2h.c

@@ -448,8 +448,8 @@ void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 			     msg_start_ptr,
 			     sizeof(struct htt_wdi_ipa_op_response_t) +
 			     len);
-		ol_txrx_ipa_uc_op_response(pdev->txrx_pdev,
-					   op_msg_buffer);
+		cdp_ipa_op_response(cds_get_context(QDF_MODULE_ID_SOC),
+				pdev->txrx_pdev, op_msg_buffer);
 		break;
 	}
 

+ 4 - 3
core/dp/ol/inc/ol_cfg.h

@@ -516,7 +516,7 @@ unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev);
  * @param pdev - handle to the physical device
  */
 unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev);
-void ol_cfg_set_ipa_uc_tx_partition_base(ol_pdev_handle pdev, uint32_t value);
+void ol_cfg_set_ipa_uc_tx_partition_base(void *pdev, uint32_t value);
 #else
 static inline unsigned int ol_cfg_ipa_uc_offload_enabled(
 	ol_pdev_handle pdev)
@@ -549,7 +549,7 @@ static inline unsigned int ol_cfg_ipa_uc_tx_partition_base(
 }
 
 static inline void ol_cfg_set_ipa_uc_tx_partition_base(
-	ol_pdev_handle pdev, uint32_t value)
+	void *pdev, uint32_t value)
 {
 	return;
 }
@@ -564,8 +564,9 @@ static inline void ol_cfg_set_ipa_uc_tx_partition_base(
  *
  * Return: None
  */
-static inline void ol_set_cfg_flow_steering(ol_pdev_handle pdev, uint8_t val)
+static inline void ol_set_cfg_flow_steering(void *ppdev, uint8_t val)
 {
+	ol_pdev_handle pdev = ppdev;
 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
 
 	cfg->flow_steering_enabled = val;

+ 29 - 1
core/dp/ol/inc/ol_defines.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -38,6 +38,34 @@ struct wmi_unified;
 typedef struct wmi_unified *wmi_unified_t;
 
 typedef void *ol_scn_t;
+
+ /**
+  * ol_txrx_pdev_handle - opaque handle for txrx physical device
+  * object
+  */
+struct ol_txrx_pdev_t;
+typedef struct ol_txrx_pdev_t *ol_txrx_pdev_handle;
+
+/**
+ * ol_txrx_vdev_handle - opaque handle for txrx virtual device
+ * object
+ */
+struct ol_txrx_vdev_t;
+typedef struct ol_txrx_vdev_t *ol_txrx_vdev_handle;
+
+/**
+ * ol_pdev_handle - opaque handle for the configuration
+ * associated with the physical device
+ */
+struct ol_pdev_t;
+typedef struct ol_pdev_t *ol_pdev_handle;
+
+/**
+ * ol_txrx_peer_handle - opaque handle for txrx peer object
+ */
+struct ol_txrx_peer_t;
+typedef struct ol_txrx_peer_t *ol_txrx_peer_handle;
+
 /**
  * @wmi_event_handler function prototype
  */

+ 1 - 0
core/dp/ol/inc/ol_htt_api.h

@@ -41,6 +41,7 @@
 #include <htc_api.h>            /* HTC_HANDLE */
 #include "htt.h"                /* htt_dbg_stats_type, etc. */
 #include <cdp_txrx_cmn.h>       /* ol_pdev_handle */
+#include <ol_defines.h>
 
 /* TID */
 #define OL_HTT_TID_NON_QOS_UNICAST     16

+ 8 - 99
core/dp/ol/inc/ol_txrx_ctrl_api.h

@@ -41,40 +41,10 @@
 #include <wlan_defs.h>          /* MAX_SPATIAL_STREAM */
 #include <cdp_txrx_cmn.h>       /* ol_pdev_handle, ol_vdev_handle, etc */
 #include <cdp_txrx_cfg.h>
+#include <ol_defines.h>
 
 #define OL_ATH_TX_DRAIN_WAIT_DELAY 50
 
-/* Maximum number of station supported by data path, including BC. */
-#define WLAN_MAX_STA_COUNT  (HAL_NUM_STA)
-
-/* The symbolic station ID return to HDD to specify the packet is bc/mc */
-#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1)
-
-/* The symbolic station ID return to HDD to specify the packet is
-       to soft-AP itself */
-#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2)
-
-#define OL_TXQ_PAUSE_REASON_FW                (1 << 0)
-#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1)
-#define OL_TXQ_PAUSE_REASON_TX_ABORT          (1 << 2)
-#define OL_TXQ_PAUSE_REASON_VDEV_STOP         (1 << 3)
-#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4)
-
-/* command options for dumpStats*/
-#define WLAN_HDD_STATS        0
-#define WLAN_TXRX_STATS       1
-#define WLAN_TXRX_HIST_STATS  2
-#define WLAN_TXRX_TSO_STATS   3
-#define WLAN_HDD_NETIF_OPER_HISTORY 4
-#define WLAN_DUMP_TX_FLOW_POOL_INFO 5
-#define WLAN_TXRX_DESC_STATS  6
-#define WLAN_HIF_STATS  7
-#define WLAN_LRO_STATS  8
-#define WLAN_SCHEDULER_STATS        21
-#define WLAN_TX_QUEUE_STATS         22
-#define WLAN_BUNDLE_STATS           23
-#define WLAN_CREDIT_STATS           24
-
 /**
  * @brief Set up the data SW subsystem.
  * @details
@@ -97,7 +67,7 @@
  * @return 0 for success or error code
  */
 int
-ol_txrx_pdev_post_attach(ol_txrx_pdev_handle pdev);
+ol_txrx_pdev_post_attach(void *pdev);
 
 /**
  * @brief Parameter type to be input to ol_txrx_peer_update
@@ -138,43 +108,7 @@ ol_txrx_peer_update(ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac,
 		    union ol_txrx_peer_update_param_t *param,
 		    enum ol_txrx_peer_update_select_t select);
 
-enum {
-	OL_TX_WMM_AC_BE,
-	OL_TX_WMM_AC_BK,
-	OL_TX_WMM_AC_VI,
-	OL_TX_WMM_AC_VO,
-
-	OL_TX_NUM_WMM_AC
-};
-
-/**
- * @brief Parameter type to pass WMM setting to ol_txrx_set_wmm_param
- * @details
- *   The struct is used to specify informaiton to update TX WMM scheduler.
- */
-struct ol_tx_ac_param_t {
-	uint32_t aifs;
-	uint32_t cwmin;
-	uint32_t cwmax;
-};
-
-struct ol_tx_wmm_param_t {
-	struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
-};
-
 #if defined(CONFIG_HL_SUPPORT)
-/**
- * @brief Set paramters of WMM scheduler per AC settings.  .
- * @details
- *  This function applies only to HL systems.
- *
- * @param data_pdev - the physical device being paused
- * @param wmm_param - the wmm parameters
- */
-void
-ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
-		      struct ol_tx_wmm_param_t wmm_param);
-
 /**
  * @brief notify tx data SW that a peer-TID is ready to transmit to.
  * @details
@@ -251,14 +185,6 @@ void
 ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev);
 
 #else
-
-static inline
-void ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
-		      struct ol_tx_wmm_param_t wmm_param)
-{
-	return;
-}
-
 static inline void
 ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid)
 {
@@ -382,13 +308,9 @@ typedef void
  * @param ctxt - the context argument provided to the callback function
  */
 void
-ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle data_vdev,
+ol_txrx_data_tx_cb_set(void *data_vdev,
 		       ol_txrx_data_tx_cb callback, void *ctxt);
 
-#ifdef FEATURE_RUNTIME_PM
-QDF_STATUS ol_txrx_runtime_suspend(ol_txrx_pdev_handle txrx_pdev);
-QDF_STATUS ol_txrx_runtime_resume(ol_txrx_pdev_handle txrx_pdev);
-#endif
 
 QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout);
 
@@ -503,12 +425,10 @@ ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
 #define ol_txrx_peer_stats_copy(pdev, peer, stats) A_ERROR      /* failure */
 #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
 
-QDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id);
+QDF_STATUS ol_txrx_get_vdevid(void *peer, uint8_t *vdev_id);
 
 void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id);
 
-#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
-
 #define OL_TXRX_RSSI_INVALID 0xffff
 /**
  * @brief Provide the current RSSI average from data frames sent by a peer.
@@ -536,17 +456,6 @@ int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
 #define ol_txrx_peer_rssi(peer) OL_TXRX_RSSI_INVALID
 #endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
 
-/*
- * Bins used for reporting delay histogram:
- * bin 0:  0 - 10  ms delay
- * bin 1: 10 - 20  ms delay
- * bin 2: 20 - 40  ms delay
- * bin 3: 40 - 80  ms delay
- * bin 4: 80 - 160 ms delay
- * bin 5: > 160 ms delay
- */
-#define QCA_TX_DELAY_HIST_REPORT_BINS 6
-
 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
 
 /**
@@ -557,7 +466,7 @@ int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
  */
 void
 ol_txrx_bad_peer_txctl_set_setting(
-	struct ol_txrx_pdev_t *pdev,
+	void *pdev,
 	int enable,
 	int period,
 	int txq_limit);
@@ -570,7 +479,7 @@ ol_txrx_bad_peer_txctl_set_setting(
  */
 void
 ol_txrx_bad_peer_txctl_update_threshold(
-	struct ol_txrx_pdev_t *pdev,
+	void *pdev,
 	int level,
 	int tput_thresh,
 	int tx_limit);
@@ -579,7 +488,7 @@ ol_txrx_bad_peer_txctl_update_threshold(
 
 static inline void
 ol_txrx_bad_peer_txctl_set_setting(
-	struct ol_txrx_pdev_t *pdev,
+	void *pdev,
 	int enable,
 	int period,
 	int txq_limit)
@@ -589,7 +498,7 @@ ol_txrx_bad_peer_txctl_set_setting(
 
 static inline void
 ol_txrx_bad_peer_txctl_update_threshold(
-	struct ol_txrx_pdev_t *pdev,
+	void *pdev,
 	int level,
 	int tput_thresh,
 	int tx_limit)

+ 1 - 0
core/dp/ol/inc/ol_txrx_htt_api.h

@@ -37,6 +37,7 @@
 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
 
 #include <cdp_txrx_cmn.h>      /* ol_txrx_pdev_handle */
+#include <ol_defines.h>
 
 static inline uint16_t *ol_tx_msdu_id_storage(qdf_nbuf_t msdu)
 {

+ 1 - 1
core/dp/ol/inc/ol_txrx_osif_api.h

@@ -82,7 +82,7 @@ qdf_nbuf_t ol_txrx_osif_tso_segment(ol_txrx_vdev_handle txrx_vdev,
 				    int max_seg_payload_bytes,
 				    qdf_nbuf_t jumbo_tcp_frame);
 
-qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle data_vdev, qdf_nbuf_t skb);
+qdf_nbuf_t ol_tx_data(void *data_vdev, qdf_nbuf_t skb);
 
 void ol_rx_data_process(struct ol_txrx_peer_t *peer,
 			qdf_nbuf_t rx_buf_list);

+ 23 - 22
core/dp/txrx/ol_cfg.c

@@ -39,19 +39,18 @@ unsigned int vow_config = 0;
  *
  * Return: none
  */
-static
 void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
-	struct txrx_pdev_cfg_param_t cfg_param)
+	struct txrx_pdev_cfg_param_t *cfg_param)
 {
+	struct txrx_pdev_cfg_t *cfg_ctx = pcfg_ctx;
 	cfg_ctx->tx_flow_start_queue_offset =
-					cfg_param.tx_flow_start_queue_offset;
+					cfg_param->tx_flow_start_queue_offset;
 	cfg_ctx->tx_flow_stop_queue_th =
-					cfg_param.tx_flow_stop_queue_th;
+					cfg_param->tx_flow_stop_queue_th;
 }
 #else
-static
 void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
-	struct txrx_pdev_cfg_param_t cfg_param)
+	struct txrx_pdev_cfg_param_t *cfg_param)
 {
 	return;
 }
@@ -117,9 +116,9 @@ uint8_t ol_defrag_timeout_check(void)
  * Return: the control device object
  */
 
-ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
-				  struct txrx_pdev_cfg_param_t cfg_param)
+void *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
 {
+	struct txrx_pdev_cfg_param_t *cfg_param = pcfg_param;
 	struct txrx_pdev_cfg_t *cfg_ctx;
 
 	cfg_ctx = qdf_mem_malloc(sizeof(*cfg_ctx));
@@ -148,21 +147,21 @@ ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
 	cfg_ctx->dutycycle_level[3] = THROTTLE_DUTY_CYCLE_LEVEL3;
 	cfg_ctx->rx_fwd_disabled = 0;
 	cfg_ctx->is_packet_log_enabled = 0;
-	cfg_ctx->is_full_reorder_offload = cfg_param.is_full_reorder_offload;
+	cfg_ctx->is_full_reorder_offload = cfg_param->is_full_reorder_offload;
 	cfg_ctx->ipa_uc_rsc.uc_offload_enabled =
-		cfg_param.is_uc_offload_enabled;
-	cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param.uc_tx_buffer_count;
-	cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param.uc_tx_buffer_size;
+		cfg_param->is_uc_offload_enabled;
+	cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param->uc_tx_buffer_count;
+	cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param->uc_tx_buffer_size;
 	cfg_ctx->ipa_uc_rsc.rx_ind_ring_size =
-		cfg_param.uc_rx_indication_ring_count;
-	cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param.uc_tx_partition_base;
-	cfg_ctx->enable_rxthread = cfg_param.enable_rxthread;
+		cfg_param->uc_rx_indication_ring_count;
+	cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param->uc_tx_partition_base;
+	cfg_ctx->enable_rxthread = cfg_param->enable_rxthread;
 	cfg_ctx->ip_tcp_udp_checksum_offload =
-		cfg_param.ip_tcp_udp_checksum_offload;
-	cfg_ctx->ce_classify_enabled = cfg_param.ce_classify_enabled;
+		cfg_param->ip_tcp_udp_checksum_offload;
+	cfg_ctx->ce_classify_enabled = cfg_param->ce_classify_enabled;
 
 	ol_tx_set_flow_control_parameters(cfg_ctx, cfg_param);
-	return (ol_pdev_handle) cfg_ctx;
+	return (void *)cfg_ctx;
 }
 
 int ol_cfg_is_high_latency(ol_pdev_handle pdev)
@@ -210,8 +209,9 @@ int ol_cfg_rx_fwd_check(ol_pdev_handle pdev)
  * Currently only intra-bss fwd is supported.
  *
  */
-void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disable_rx_fwd)
+void ol_set_cfg_rx_fwd_disabled(void *ppdev, uint8_t disable_rx_fwd)
 {
+	ol_pdev_handle pdev = ppdev;
 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
 	cfg->rx_fwd_disabled = disable_rx_fwd;
 }
@@ -223,8 +223,9 @@ void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disable_rx_fwd)
  * @pdev - handle to the physical device
  * @val - 0 - disable, 1 - enable
  */
-void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val)
+void ol_set_cfg_packet_log_enabled(void *ppdev, uint8_t val)
 {
+	ol_pdev_handle pdev = ppdev;
 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
 	cfg->is_packet_log_enabled = val;
 }
@@ -404,9 +405,9 @@ unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev)
 	return cfg->ipa_uc_rsc.tx_partition_base;
 }
 
-void ol_cfg_set_ipa_uc_tx_partition_base(ol_pdev_handle pdev, uint32_t val)
+void ol_cfg_set_ipa_uc_tx_partition_base(void *pdev, uint32_t val)
 {
-	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+	struct txrx_pdev_cfg_t *cfg = pdev;
 	cfg->ipa_uc_rsc.tx_partition_base = val;
 }
 #endif /* IPA_OFFLOAD */

+ 9 - 4
core/dp/txrx/ol_tx.c

@@ -142,10 +142,11 @@ static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
  *
  * Return: skb/NULL for success
  */
-qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb)
+qdf_nbuf_t ol_tx_data(void *data_vdev, qdf_nbuf_t skb)
 {
 	struct ol_txrx_pdev_t *pdev;
 	qdf_nbuf_t ret;
+	ol_txrx_vdev_handle vdev = data_vdev;
 
 	if (qdf_unlikely(!vdev)) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
@@ -1650,9 +1651,11 @@ ol_tx_non_std_hl(ol_txrx_vdev_handle vdev,
  *  Return: null - success, skb - failure
  */
 qdf_nbuf_t
-ol_tx_non_std(ol_txrx_vdev_handle vdev,
+ol_tx_non_std(void *pvdev,
 	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
+
 	if (vdev->pdev->cfg.is_high_latency)
 		return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
 	else
@@ -1660,9 +1663,10 @@ ol_tx_non_std(ol_txrx_vdev_handle vdev,
 }
 
 void
-ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
+ol_txrx_data_tx_cb_set(void *pvdev,
 		       ol_txrx_data_tx_cb callback, void *ctxt)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 	pdev->tx_data_callback.func = callback;
 	pdev->tx_data_callback.ctxt = ctxt;
@@ -1689,11 +1693,12 @@ ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
  * for a given type of management frame.
  */
 void
-ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
+ol_txrx_mgmt_tx_cb_set(void *ppdev,
 		       uint8_t type,
 		       ol_txrx_mgmt_tx_cb download_cb,
 		       ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
 	pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
 	pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;

+ 32 - 15
core/dp/txrx/ol_tx_queue.c

@@ -577,8 +577,9 @@ ol_txrx_throttle_unpause(ol_txrx_pdev_handle pdev)
 }
 
 void
-ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
+ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 	struct ol_txrx_peer_t *peer;
 	/* TO DO: log the queue pause */
@@ -599,8 +600,9 @@ ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
 }
 
 
-void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
+void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 	struct ol_txrx_peer_t *peer;
 	/* TO DO: log the queue unpause */
@@ -624,8 +626,10 @@ void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
 	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
 }
 
-void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
+void ol_txrx_vdev_flush(void *pvdev)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
+
 	ol_tx_queue_vdev_flush(vdev->pdev, vdev);
 }
 
@@ -814,9 +818,10 @@ ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
 }
 
 void
-ol_txrx_bad_peer_txctl_set_setting(struct ol_txrx_pdev_t *pdev,
+ol_txrx_bad_peer_txctl_set_setting(void *ppdev,
 				   int enable, int period, int txq_limit)
 {
+	struct ol_txrx_pdev_t *pdev = ppdev;
 	if (enable)
 		pdev->tx_peer_bal.enabled = ol_tx_peer_bal_enable;
 	else
@@ -828,10 +833,12 @@ ol_txrx_bad_peer_txctl_set_setting(struct ol_txrx_pdev_t *pdev,
 }
 
 void
-ol_txrx_bad_peer_txctl_update_threshold(struct ol_txrx_pdev_t *pdev,
+ol_txrx_bad_peer_txctl_update_threshold(void *ppdev,
 					int level, int tput_thresh,
 					int tx_limit)
 {
+	struct ol_txrx_pdev_t *pdev = ppdev;
+
 	/* Set the current settingl */
 	pdev->tx_peer_bal.ctl_thresh[level].tput_thresh =
 		tput_thresh;
@@ -1685,8 +1692,10 @@ ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
  * will be paused.
  *
  */
-void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
+void ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
+
 	/* TO DO: log the queue pause */
 	/* acquire the mutex lock, since we'll be modifying the queues */
 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
@@ -1710,8 +1719,9 @@ void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
  * LL systems that use per-vdev tx queues for MCC or thermal throttling.
  *
  */
-void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
+void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	/* TO DO: log the queue unpause */
 	/* acquire the mutex lock, since we'll be modifying the queues */
 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
@@ -1746,8 +1756,9 @@ void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
  *  stale, and would need to be discarded.
  *
  */
-void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
+void ol_txrx_vdev_flush(void *pvdev)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 	qdf_timer_stop(&vdev->ll_pause.timer);
 	vdev->ll_pause.is_q_timer_on = false;
@@ -1769,7 +1780,7 @@ void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
 #endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
 
 #if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
-void ol_txrx_vdev_flush(ol_txrx_vdev_handle data_vdev)
+void ol_txrx_vdev_flush(void *data_vdev)
 {
 	return;
 }
@@ -1813,8 +1824,9 @@ ol_txrx_map_to_netif_reason_type(uint32_t reason)
  *
  * Return: none
  */
-void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
+void ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 	enum netif_reason_type netif_reason;
 
@@ -1838,8 +1850,9 @@ void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
  *
  * Return: none
  */
-void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
+void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 	enum netif_reason_type netif_reason;
 
@@ -1874,7 +1887,8 @@ void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
 
 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
-		ol_txrx_vdev_pause(vdev, reason);
+		cdp_fc_vdev_pause(
+			cds_get_context(QDF_MODULE_ID_SOC), vdev, reason);
 	}
 
 }
@@ -1891,7 +1905,8 @@ void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
 
 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
-		ol_txrx_vdev_unpause(vdev, reason);
+		cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
+				    vdev, reason);
 	}
 
 }
@@ -2062,8 +2077,9 @@ ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t *pdev, int level, int *ms)
 }
 #endif
 
-void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level)
+void ol_tx_throttle_set_level(void *ppdev, int level)
 {
+	struct ol_txrx_pdev_t *pdev = ppdev;
 	int ms = 0;
 
 	if (level >= THROTTLE_LEVEL_MAX) {
@@ -2084,9 +2100,10 @@ void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level)
 		qdf_timer_start(&pdev->tx_throttle.phase_timer, ms);
 }
 
-void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period,
+void ol_tx_throttle_init_period(void *ppdev, int period,
 				uint8_t *dutycycle_level)
 {
+	struct ol_txrx_pdev_t *pdev = ppdev;
 	int i;
 
 	/* Set the current throttle level */

+ 3 - 2
core/dp/txrx/ol_tx_sched.c

@@ -438,7 +438,7 @@ ol_tx_sched_init_rr(
 }
 
 void
-ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+ol_txrx_set_wmm_param(void *data_pdev,
 		      struct ol_tx_wmm_param_t wmm_param)
 {
 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
@@ -1057,9 +1057,10 @@ ol_tx_sched_init_wrr_adv(
  * settings of the scheduler, ie. VO, VI, BE, or BK.
  */
 void
-ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+ol_txrx_set_wmm_param(void *data_pdev,
 		      struct ol_tx_wmm_param_t wmm_param)
 {
+	ol_txrx_pdev_handle data_pdev = pdata_pdev;
 	struct ol_tx_sched_wrr_adv_t def_cfg;
 	struct ol_tx_sched_wrr_adv_t *scheduler =
 					data_pdev->tx_sched.scheduler;

+ 25 - 4
core/dp/txrx/ol_tx_send.c

@@ -116,6 +116,23 @@ ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
 #endif
 
 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
+/**
+ * ol_txrx_flow_control_cb() - call osif flow control callback
+ * @vdev: vdev handle
+ * @tx_resume: tx resume flag
+ *
+ * Return: none
+ */
+void ol_txrx_flow_control_cb(void *pvdev, bool tx_resume)
+{
+	struct ol_txrx_vdev_t *vdev = pvdev;
+	qdf_spin_lock_bh(&vdev->flow_control_lock);
+	if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
+		vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
+	qdf_spin_unlock_bh(&vdev->flow_control_lock);
+
+	return;
+}
 
 /**
  * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
@@ -867,8 +884,9 @@ ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
  * @details
  * @param interval - interval for stats computation
  */
-void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval)
+void ol_tx_set_compute_interval(void *ppdev, uint32_t interval)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval);
 }
 
@@ -886,10 +904,11 @@ void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval)
  * @param out_packet_loss_count - number of packets lost
  */
 void
-ol_tx_packet_count(ol_txrx_pdev_handle pdev,
+ol_tx_packet_count(void *ppdev,
 		   uint16_t *out_packet_count,
 		   uint16_t *out_packet_loss_count, int category)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	*out_packet_count = pdev->packet_count[category];
 	*out_packet_loss_count = pdev->packet_loss_count[category];
 	pdev->packet_count[category] = 0;
@@ -914,10 +933,11 @@ uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
 }
 
 void
-ol_tx_delay(ol_txrx_pdev_handle pdev,
+ol_tx_delay(void *ppdev,
 	    uint32_t *queue_delay_microsec,
 	    uint32_t *tx_delay_microsec, int category)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	int index;
 	uint32_t avg_delay_ticks;
 	struct ol_tx_delay_data *data;
@@ -960,9 +980,10 @@ ol_tx_delay(ol_txrx_pdev_handle pdev,
 }
 
 void
-ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
+ol_tx_delay_hist(void *ppdev,
 		 uint16_t *report_bin_values, int category)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	int index, i, j;
 	struct ol_tx_delay_data *data;
 

+ 626 - 89
core/dp/txrx/ol_txrx.c

@@ -83,6 +83,72 @@
 #include "epping_main.h"
 #include <a_types.h>
 
+#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_local_id(struct ol_txrx_pdev_t *pdev,
+			      uint8_t local_peer_id);
+
+#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
+QDF_STATUS ol_txrx_peer_state_update(struct ol_txrx_pdev_t *pdev,
+				     uint8_t *peer_mac,
+				     enum ol_txrx_peer_state state);
+void ol_vdev_rx_set_intrabss_fwd(void *vdev, bool val);
+int ol_txrx_get_tx_pending(void *pdev_handle);
+extern void
+ol_txrx_set_wmm_param(void *data_pdev,
+		      struct ol_tx_wmm_param_t wmm_param);
+extern qdf_nbuf_t
+ol_tx_non_std(void *pvdev,
+	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
+extern void ol_tx_throttle_init_period(void *ppdev, int period,
+				uint8_t *dutycycle_level);
+
+extern void ol_tx_throttle_set_level(void *ppdev, int level);
+
+extern void ol_set_cfg_rx_fwd_disabled(void *ppdev, uint8_t disable_rx_fwd);
+
+extern void ol_set_cfg_packet_log_enabled(void *ppdev, uint8_t val);
+
+extern void *ol_pdev_cfg_attach(qdf_device_t osdev, void *cfg_param);
+
+extern void ol_tx_set_flow_control_parameters(
+		struct txrx_pdev_cfg_t *cfg_ctx,
+		struct txrx_pdev_cfg_param_t *cfg_param);
+
+extern qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
+			qdf_nbuf_t skb);
+
+extern void ol_txrx_mgmt_tx_cb_set(void *ppdev,
+		       uint8_t type,
+		       ol_txrx_mgmt_tx_cb download_cb,
+		       ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt);
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+extern void ol_txrx_flow_control_cb(void *vdev,
+				    bool tx_resume);
+extern void ol_txrx_vdev_flush(void *pvdev);
+extern void ol_txrx_vdev_pause(void *pvdev, uint32_t reason);
+extern void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason);
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+#ifdef QCA_COMPUTE_TX_DELAY
+extern void ol_tx_delay(void *ppdev,
+	    uint32_t *queue_delay_microsec,
+	    uint32_t *tx_delay_microsec, int category);
+
+extern void ol_tx_delay_hist(void *ppdev,
+		 uint16_t *report_bin_values, int category);
+
+extern void ol_tx_packet_count(void *ppdev,
+		   uint16_t *out_packet_count,
+		   uint16_t *out_packet_loss_count, int category);
+
+extern void ol_tx_set_compute_interval(void *ppdev, uint32_t interval);
+#endif /* QCA_COMPUTE_TX_DELAY */
+
+extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
+		    uint64_t **last_pn, uint32_t **rmf_pn_replays);
+
 #ifdef CONFIG_HL_SUPPORT
 
 /**
@@ -93,8 +159,9 @@
  * Return: None
  */
 void
-ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr)
+ol_txrx_copy_mac_addr_raw(void *pvdev, uint8_t *bss_addr)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	if (bss_addr && vdev->last_real_peer &&
 	    !qdf_mem_cmp((u8 *)bss_addr,
 			     vdev->last_real_peer->mac_addr.raw,
@@ -113,10 +180,10 @@ ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr)
  * Return: None
  */
 void
-ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
-			   ol_txrx_vdev_handle vdev,
-			   uint8_t *peer_id)
+ol_txrx_add_last_real_peer(void *ppdev, void *pvdev, uint8_t *peer_id)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
+	ol_txrx_vdev_handle vdev = pvdev;
 	ol_txrx_peer_handle peer;
 	if (vdev->last_real_peer == NULL) {
 				peer = NULL;
@@ -136,8 +203,9 @@ ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
  * Return: true if last peer is not null
  */
 bool
-is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer)
+is_vdev_restore_last_peer(void *ppeer)
 {
+	struct ol_txrx_peer_t *peer = ppeer;
 	struct ol_txrx_vdev_t *vdev;
 	vdev = peer->vdev;
 	return vdev->last_real_peer && (vdev->last_real_peer == peer);
@@ -153,11 +221,11 @@ is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer)
  * Return: None
  */
 void
-ol_txrx_update_last_real_peer(
-	ol_txrx_pdev_handle pdev,
-	struct ol_txrx_peer_t *peer,
+ol_txrx_update_last_real_peer(void *ppdev, void *ppeer,
 	uint8_t *peer_id, bool restore_last_peer)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
+	struct ol_txrx_peer_t *peer = ppeer;
 	struct ol_txrx_vdev_t *vdev;
 	vdev = peer->vdev;
 	if (restore_last_peer && (vdev->last_real_peer == NULL)) {
@@ -267,11 +335,12 @@ uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
 
 
 #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
-ol_txrx_peer_handle
-ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
-				   ol_txrx_vdev_handle vdev,
-				   uint8_t *peer_addr, uint8_t *peer_id)
+void *
+ol_txrx_find_peer_by_addr_and_vdev(void *ppdev, void *pvdev,
+			uint8_t *peer_addr, uint8_t *peer_id)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_peer_t *peer;
 
 	peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
@@ -285,8 +354,9 @@ ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
 	return peer;
 }
 
-QDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id)
+QDF_STATUS ol_txrx_get_vdevid(void *ppeer, uint8_t *vdev_id)
 {
+	struct ol_txrx_peer_t *peer = ppeer;
 	if (!peer) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			  "peer argument is null!!");
@@ -325,7 +395,7 @@ void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
 	return peer->vdev;
 }
 
-ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
+void *ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
 					      uint8_t *peer_addr,
 					      uint8_t *peer_id)
 {
@@ -339,11 +409,12 @@ ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
 		 "%s: peer %p peer->ref_cnt %d", __func__, peer,
 		 qdf_atomic_read(&peer->ref_cnt));
-	return peer;
+	return (void *)peer;
 }
 
-uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer)
+uint16_t ol_txrx_local_peer_id(void *ppeer)
 {
+	ol_txrx_peer_handle peer = ppeer;
 	return peer->local_id;
 }
 
@@ -824,8 +895,9 @@ ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
  * Return: None
  */
 void
-ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag)
+ol_txrx_hl_tdls_flag_reset(void *pvdev, bool flag)
 {
+	struct ol_txrx_vdev_t *vdev = pvdev;
 	vdev->hlTdlsFlag = flag;
 }
 #endif
@@ -966,10 +1038,11 @@ ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
  * Return: txrx pdev handle
  *		  NULL for failure
  */
-ol_txrx_pdev_handle
-ol_txrx_pdev_attach(ol_pdev_handle ctrl_pdev,
-		    HTC_HANDLE htc_pdev, qdf_device_t osdev)
+void *
+ol_txrx_pdev_attach(ol_txrx_soc_handle soc, void *pctrl_pdev,
+		    HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
 {
+	ol_pdev_handle ctrl_pdev = pctrl_pdev;
 	struct ol_txrx_pdev_t *pdev;
 	int i;
 
@@ -1038,8 +1111,9 @@ fail0:
  *
  * Return: void
  */
-void htt_pkt_log_init(struct ol_txrx_pdev_t *handle, void *scn)
+void htt_pkt_log_init(void *ppdev, void *scn)
 {
+	struct ol_txrx_pdev_t *handle = ppdev;
 	if (handle->pkt_log_init)
 		return;
 
@@ -1070,7 +1144,7 @@ void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle, void *scn)
 	}
 }
 #else
-void htt_pkt_log_init(ol_txrx_pdev_handle handle, void *ol_sc) { }
+void htt_pkt_log_init(void *handle, void *ol_sc) { }
 void htt_pktlogmod_exit(ol_txrx_pdev_handle handle, void *sc)  { }
 #endif
 
@@ -1081,8 +1155,9 @@ void htt_pktlogmod_exit(ol_txrx_pdev_handle handle, void *sc)  { }
  * Return: 0 for success
  */
 int
-ol_txrx_pdev_post_attach(ol_txrx_pdev_handle pdev)
+ol_txrx_pdev_post_attach(void *ppdev)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	uint16_t i;
 	uint16_t fail_idx = 0;
 	int ret = 0;
@@ -1561,8 +1636,9 @@ ol_attach_fail:
  *
  * Return: 0 - success 1 - failure
  */
-A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle pdev)
+A_STATUS ol_txrx_pdev_attach_target(void *ppdev)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	return htt_attach_target(pdev->htt_pdev) == A_OK ? 0:1;
 }
 
@@ -1580,8 +1656,9 @@ A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle pdev)
  * (ol_txrx_vdev_detach) before the physical device itself is deleted.
  *
  */
-void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
+void ol_txrx_pdev_detach(void *ppdev, int force)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	int i;
 	struct hif_opaque_softc *osc =  cds_get_context(QDF_MODULE_ID_HIF);
 
@@ -1741,11 +1818,12 @@ ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
  *
  * Return: success: handle to new data vdev object, failure: NULL
  */
-ol_txrx_vdev_handle
-ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
+void *
+ol_txrx_vdev_attach(void *ppdev,
 		    uint8_t *vdev_mac_addr,
 		    uint8_t vdev_id, enum wlan_op_mode op_mode)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	struct ol_txrx_vdev_t *vdev;
 	QDF_STATUS qdf_status;
 
@@ -1824,7 +1902,7 @@ ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
 	 */
 	htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
 
-	return vdev;
+	return (void *)vdev;
 }
 
 /**
@@ -1846,10 +1924,11 @@ ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
  *  vdev objects, so the data SW can use the OS shim vdev handle
  *  when passing rx data received by a vdev up to the OS shim.
  */
-void ol_txrx_vdev_register(ol_txrx_vdev_handle vdev,
+void ol_txrx_vdev_register(void *pvdev,
 				void *osif_vdev,
 				struct ol_txrx_ops *txrx_ops)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
 		qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
 		qdf_assert(0);
@@ -1929,9 +2008,10 @@ void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
  * deletions have completed.
  */
 void
-ol_txrx_vdev_detach(ol_txrx_vdev_handle vdev,
+ol_txrx_vdev_detach(void *pvdev,
 		    ol_txrx_vdev_delete_cb callback, void *context)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 
 	/* preconditions */
@@ -2081,9 +2161,10 @@ void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
  * Return: handle to new data peer object, or NULL if the attach
  * fails
  */
-ol_txrx_peer_handle
-ol_txrx_peer_attach(ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
+void *
+ol_txrx_peer_attach(void *pvdev, uint8_t *peer_mac_addr)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_peer_t *peer;
 	struct ol_txrx_peer_t *temp_peer;
 	uint8_t i;
@@ -2219,7 +2300,7 @@ ol_txrx_peer_attach(ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
 
 	ol_txrx_local_peer_id_alloc(pdev, peer);
 
-	return peer;
+	return (void *)peer;
 }
 
 /*
@@ -2260,8 +2341,9 @@ static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
  * Return: the mac_addr from peer
  */
 uint8_t *
-ol_txrx_peer_get_peer_mac_addr(ol_txrx_peer_handle peer)
+ol_txrx_peer_get_peer_mac_addr(void *ppeer)
 {
+	ol_txrx_peer_handle peer = ppeer;
 	if (!peer)
 		return NULL;
 
@@ -2278,9 +2360,10 @@ ol_txrx_peer_get_peer_mac_addr(ol_txrx_peer_handle peer)
  * Return: NONE
  */
 void
-ol_txrx_get_pn_info(ol_txrx_peer_handle peer, uint8_t **last_pn_valid,
+ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
 		    uint64_t **last_pn, uint32_t **rmf_pn_replays)
 {
+	ol_txrx_peer_handle peer = ppeer;
 	*last_pn_valid = &peer->last_rmf_pn_valid;
 	*last_pn = &peer->last_rmf_pn;
 	*rmf_pn_replays = &peer->rmf_pn_replays;
@@ -2292,8 +2375,9 @@ ol_txrx_get_pn_info(ol_txrx_peer_handle peer, uint8_t **last_pn_valid,
  *
  * Return: operation mode.
  */
-int ol_txrx_get_opmode(ol_txrx_vdev_handle vdev)
+int ol_txrx_get_opmode(void *pvdev)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	return vdev->opmode;
 }
 
@@ -2303,8 +2387,9 @@ int ol_txrx_get_opmode(ol_txrx_vdev_handle vdev)
  *
  * Return: return peer state
  */
-int ol_txrx_get_peer_state(ol_txrx_peer_handle peer)
+int ol_txrx_get_peer_state(void *ppeer)
 {
+	ol_txrx_peer_handle peer = ppeer;
 	return peer->state;
 }
 
@@ -2314,10 +2399,10 @@ int ol_txrx_get_peer_state(ol_txrx_peer_handle peer)
  *
  * Return: vdev handle from peer
  */
-ol_txrx_vdev_handle
-ol_txrx_get_vdev_for_peer(ol_txrx_peer_handle peer)
+void *ol_txrx_get_vdev_for_peer(void *ppeer)
 {
-	return peer->vdev;
+	ol_txrx_peer_handle peer = ppeer;
+	return (void *)peer->vdev;
 }
 
 /**
@@ -2327,8 +2412,9 @@ ol_txrx_get_vdev_for_peer(ol_txrx_peer_handle peer)
  * Return: vdev mac address
  */
 uint8_t *
-ol_txrx_get_vdev_mac_addr(ol_txrx_vdev_handle vdev)
+ol_txrx_get_vdev_mac_addr(void *pvdev)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	if (!vdev)
 		return NULL;
 
@@ -2365,10 +2451,11 @@ ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
  *
  * Return: Handle to control pdev
  */
-ol_pdev_handle
-ol_txrx_get_ctrl_pdev_from_vdev(ol_txrx_vdev_handle vdev)
+void *
+ol_txrx_get_ctrl_pdev_from_vdev(void *pvdev)
 {
-	return vdev->pdev->ctrl_pdev;
+	ol_txrx_vdev_handle vdev = pvdev;
+	return (void *)vdev->pdev->ctrl_pdev;
 }
 
 /**
@@ -2378,8 +2465,9 @@ ol_txrx_get_ctrl_pdev_from_vdev(ol_txrx_vdev_handle vdev)
  * Return: Rx Fwd disabled status
  */
 uint8_t
-ol_txrx_is_rx_fwd_disabled(ol_txrx_vdev_handle vdev)
+ol_txrx_is_rx_fwd_disabled(void *pvdev)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
 					vdev->pdev->ctrl_pdev;
 	return cfg->rx_fwd_disabled;
@@ -2394,9 +2482,10 @@ ol_txrx_is_rx_fwd_disabled(ol_txrx_vdev_handle vdev)
  * Return: -1 for failure or total peer nums after adjustment.
  */
 int16_t
-ol_txrx_update_ibss_add_peer_num_of_vdev(ol_txrx_vdev_handle vdev,
+ol_txrx_update_ibss_add_peer_num_of_vdev(void *pvdev,
 					 int16_t peer_num_delta)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	int16_t new_peer_num;
 
 	new_peer_num = vdev->ibss_peer_num + peer_num_delta;
@@ -2416,9 +2505,10 @@ ol_txrx_update_ibss_add_peer_num_of_vdev(ol_txrx_vdev_handle vdev,
  *
  * Return: Old timer value set in vdev.
  */
-uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(ol_txrx_vdev_handle vdev,
+uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(void *pvdev,
 						uint16_t timer_value_sec)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
 
 	vdev->ibss_peer_heart_beat_timer = timer_value_sec;
@@ -2437,10 +2527,11 @@ uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(ol_txrx_vdev_handle vdev,
  * Return: NONE
  */
 void
-ol_txrx_remove_peers_for_vdev(ol_txrx_vdev_handle vdev,
+ol_txrx_remove_peers_for_vdev(void *pvdev,
 			      ol_txrx_vdev_peer_remove_cb callback,
 			      void *callback_context, bool remove_last_peer)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	ol_txrx_peer_handle peer, temp;
 	/* remove all remote peers for vdev */
 	qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
@@ -2485,10 +2576,11 @@ ol_txrx_remove_peers_for_vdev(ol_txrx_vdev_handle vdev,
  * Return: NONE
  */
 void
-ol_txrx_remove_peers_for_vdev_no_lock(ol_txrx_vdev_handle vdev,
+ol_txrx_remove_peers_for_vdev_no_lock(void *pvdev,
 			      ol_txrx_vdev_peer_remove_cb callback,
 			      void *callback_context)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	ol_txrx_peer_handle peer = NULL;
 
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
@@ -2507,9 +2599,10 @@ ol_txrx_remove_peers_for_vdev_no_lock(ol_txrx_vdev_handle vdev,
  *
  * Return: NONE
  */
-void ol_txrx_set_ocb_chan_info(ol_txrx_vdev_handle vdev,
+void ol_txrx_set_ocb_chan_info(void *pvdev,
 			  struct ol_txrx_ocb_set_chan ocb_set_chan)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
 	vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
 }
@@ -2521,8 +2614,9 @@ void ol_txrx_set_ocb_chan_info(ol_txrx_vdev_handle vdev,
  * Return: handle to struct ol_txrx_ocb_chan_info
  */
 struct ol_txrx_ocb_chan_info *
-ol_txrx_get_ocb_chan_info(ol_txrx_vdev_handle vdev)
+ol_txrx_get_ocb_chan_info(void *pvdev)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	return vdev->ocb_channel_info;
 }
 
@@ -2919,10 +3013,10 @@ ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
  *
  * Return: QDF Status
  */
-QDF_STATUS ol_txrx_clear_peer(uint8_t sta_id)
+QDF_STATUS ol_txrx_clear_peer(void *ppdev, uint8_t sta_id)
 {
 	struct ol_txrx_peer_t *peer;
-	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+	struct ol_txrx_pdev_t *pdev = ppdev;
 
 	if (!pdev) {
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
@@ -2955,8 +3049,9 @@ QDF_STATUS ol_txrx_clear_peer(uint8_t sta_id)
  *
  * Return: None
  */
-void ol_txrx_peer_detach(ol_txrx_peer_handle peer)
+void ol_txrx_peer_detach(void *ppeer)
 {
+	ol_txrx_peer_handle peer = ppeer;
 	struct ol_txrx_vdev_t *vdev = peer->vdev;
 
 	/* redirect peer's rx delivery function to point to a discard func */
@@ -3015,8 +3110,9 @@ void ol_txrx_peer_detach(ol_txrx_peer_handle peer)
  *
  * Return: None
  */
-void ol_txrx_peer_detach_force_delete(ol_txrx_peer_handle peer)
+void ol_txrx_peer_detach_force_delete(void *ppeer)
 {
+	ol_txrx_peer_handle peer = ppeer;
 	ol_txrx_pdev_handle pdev = peer->vdev->pdev;
 
 	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s peer %p, peer->ref_cnt %d",
@@ -3126,8 +3222,10 @@ QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
  *
  * Return: QDF_STATUS
  */
-QDF_STATUS ol_txrx_runtime_suspend(ol_txrx_pdev_handle txrx_pdev)
+QDF_STATUS ol_txrx_runtime_suspend(void *ppdev)
 {
+	ol_txrx_pdev_handle txrx_pdev = ppdev;
+
 	if (ol_txrx_get_tx_pending(txrx_pdev))
 		return QDF_STATUS_E_BUSY;
 	else
@@ -3142,8 +3240,10 @@ QDF_STATUS ol_txrx_runtime_suspend(ol_txrx_pdev_handle txrx_pdev)
  *
  * Return: QDF_STATUS_SUCCESS
  */
-QDF_STATUS ol_txrx_runtime_resume(ol_txrx_pdev_handle txrx_pdev)
+QDF_STATUS ol_txrx_runtime_resume(void *ppdev)
 {
+	ol_txrx_pdev_handle txrx_pdev = ppdev;
+
 	return QDF_STATUS_SUCCESS;
 }
 #endif
@@ -3181,8 +3281,9 @@ QDF_STATUS ol_txrx_bus_resume(void)
  *
  * Return: count of pending frames
  */
-int ol_txrx_get_tx_pending(ol_txrx_pdev_handle pdev_handle)
+int ol_txrx_get_tx_pending(void *ppdev)
 {
+	ol_txrx_pdev_handle pdev_handle = ppdev;
 	struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
 	uint32_t total;
 
@@ -3257,9 +3358,10 @@ ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
 }
 
 A_STATUS
-ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
+ol_txrx_fw_stats_get(void *pvdev, struct ol_txrx_stats_req *req,
 			bool per_vdev, bool response_expected)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 	uint64_t cookie;
 	struct ol_txrx_stats_req_internal *non_volatile_req;
@@ -3835,8 +3937,9 @@ ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
 }
 #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
 
-void ol_vdev_rx_set_intrabss_fwd(ol_txrx_vdev_handle vdev, bool val)
+void ol_vdev_rx_set_intrabss_fwd(void *pvdev, bool val)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
 	if (NULL == vdev)
 		return;
 
@@ -4012,24 +4115,6 @@ ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
 
 	return 0;
 }
-
-/**
- * ol_txrx_flow_control_cb() - call osif flow control callback
- * @vdev: vdev handle
- * @tx_resume: tx resume flag
- *
- * Return: none
- */
-inline void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
-				    bool tx_resume)
-{
-	qdf_spin_lock_bh(&vdev->flow_control_lock);
-	if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
-		vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
-	qdf_spin_unlock_bh(&vdev->flow_control_lock);
-
-	return;
-}
 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 
 #ifdef IPA_OFFLOAD
@@ -4058,9 +4143,11 @@ inline void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
  * Return: none
  */
 void
-ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
-		 struct ol_txrx_ipa_resources *ipa_res)
+ol_txrx_ipa_uc_get_resource(void *ppdev,
+		struct ol_txrx_ipa_resources *ipa_res)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
+
 	htt_ipa_uc_get_resource(pdev->htt_pdev,
 				&ipa_res->ce_sr_base_paddr,
 				&ipa_res->ce_sr_ring_size,
@@ -4090,10 +4177,11 @@ ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
  * Return: none
  */
 void
-ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
+ol_txrx_ipa_uc_set_doorbell_paddr(void *ppdev,
 				  qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
 				  qdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	htt_ipa_uc_set_doorbell_paddr(pdev->htt_pdev,
 				      ipa_tx_uc_doorbell_paddr,
 				      ipa_rx_uc_doorbell_paddr);
@@ -4111,8 +4199,9 @@ ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
  * Return: none
  */
 void
-ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev, bool uc_active, bool is_tx)
+ol_txrx_ipa_uc_set_active(void *ppdev, bool uc_active, bool is_tx)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	htt_h2t_ipa_uc_set_active(pdev->htt_pdev, uc_active, is_tx);
 }
 
@@ -4123,9 +4212,9 @@ ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev, bool uc_active, bool is_tx)
  *
  * Return: none
  */
-void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev,
-				uint8_t *op_msg)
+void ol_txrx_ipa_uc_op_response(void *ppdev, uint8_t *op_msg)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	if (pdev->ipa_uc_op_cb) {
 		pdev->ipa_uc_op_cb(op_msg, pdev->osif_dev);
 	} else {
@@ -4144,9 +4233,10 @@ void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev,
  *
  * Return: none
  */
-void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
+void ol_txrx_ipa_uc_register_op_cb(void *ppdev,
 				   ipa_uc_op_cb_type op_cb, void *osif_dev)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	pdev->ipa_uc_op_cb = op_cb;
 	pdev->osif_dev = osif_dev;
 }
@@ -4157,8 +4247,9 @@ void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
  *
  * Return: none
  */
-void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev)
+void ol_txrx_ipa_uc_get_stat(void *ppdev)
 {
+	ol_txrx_pdev_handle pdev = ppdev;
 	htt_h2t_ipa_uc_get_stats(pdev->htt_pdev);
 }
 #endif /* IPA_UC_OFFLOAD */
@@ -4774,7 +4865,7 @@ void ol_deregister_lro_flush_cb(void (lro_deinit_cb)(void *))
  * Return: vdev handle
  *            NULL if not found.
  */
-ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
+void *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
 {
 	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 	ol_txrx_vdev_handle vdev = NULL;
@@ -4787,7 +4878,7 @@ ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
 			break;
 	}
 
-	return vdev;
+	return (void *)vdev;
 }
 
 /**
@@ -4797,11 +4888,457 @@ ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
  *
  * Return: QDF STATUS
  */
-QDF_STATUS ol_txrx_set_wisa_mode(ol_txrx_vdev_handle vdev, bool enable)
+QDF_STATUS ol_txrx_set_wisa_mode(void *pvdev, bool enable)
 {
+	ol_txrx_vdev_handle vdev = pvdev;
+
 	if (!vdev)
 		return QDF_STATUS_E_INVAL;
 
 	vdev->is_wisa_mode_enable = enable;
 	return QDF_STATUS_SUCCESS;
 }
+
+/**
+ * ol_txrx_get_vdev_id() - get interface id from interface context
+ * @pvdev: vdev handle
+ *
+ * Return: virtual interface id
+ */
+uint16_t ol_txrx_get_vdev_id(void *pvdev)
+{
+	ol_txrx_vdev_handle vdev = pvdev;
+	return vdev->vdev_id;
+}
+
+/**
+ * ol_txrx_last_assoc_received() - get time of last assoc received
+ * @ppeer: peer handle
+ *
+ * Return: pointer of the time of last assoc received
+ */
+qdf_time_t *ol_txrx_last_assoc_received(void *ppeer)
+{
+	ol_txrx_peer_handle peer = ppeer;
+
+	return &peer->last_assoc_rcvd;
+}
+
+/**
+ * ol_txrx_last_disassoc_received() - get time of last disassoc received
+ * @ppeer: peer handle
+ *
+ * Return: pointer of the time of last disassoc received
+ */
+qdf_time_t *ol_txrx_last_disassoc_received(void *ppeer)
+{
+	ol_txrx_peer_handle peer = ppeer;
+
+	return &peer->last_disassoc_rcvd;
+}
+
+/**
+ * ol_txrx_last_deauth_received() - get time of last deauth received
+ * @ppeer: peer handle
+ *
+ * Return: pointer of the time of last deauth received
+ */
+qdf_time_t *ol_txrx_last_deauth_received(void *ppeer)
+{
+	ol_txrx_peer_handle peer = ppeer;
+
+	return &peer->last_deauth_rcvd;
+}
+
+/**
+ * ol_txrx_soc_attach_target() - attach soc target
+ * @soc: soc handle
+ *
+ * MCL legacy OL do nothing here
+ *
+ * Return: 0
+ */
+int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
+{
+	/* MCL legacy OL do nothing here */
+	return 0;
+}
+
+/**
+ * ol_txrx_soc_detach() - detach soc target
+ * @soc: soc handle
+ *
+ * MCL legacy OL do nothing here
+ *
+ * Return: noe
+ */
+void ol_txrx_soc_detach(void *soc)
+{
+	/* MCL legacy OL do nothing here */
+	return;
+}
+
+/**
+ * ol_txrx_pkt_log_con_service() - connect packet log service
+ * @ppdev: physical device handle
+ * @scn: device context
+ *
+ * Return: noe
+ */
+void ol_txrx_pkt_log_con_service(void *ppdev, void *scn)
+{
+	ol_txrx_pdev_handle pdev = ppdev;
+
+	htt_pkt_log_init(pdev, scn);
+	pktlog_htc_attach();
+}
+
+/* OL wrapper functions for CDP abstraction */
+/**
+ * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
+ * @peer: peer handle
+ * @drop: rx packets drop or deliver
+ *
+ * Return: none
+ */
+void ol_txrx_wrapper_flush_rx_frames(void *peer, bool drop)
+{
+	ol_txrx_flush_rx_frames((ol_txrx_peer_handle)peer, drop);
+}
+
+/**
+ * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
+ * @ppdev: pdev handle
+ * @vdev_id: interface id
+ *
+ * Return: virtual interface instance
+ */
+void *ol_txrx_wrapper_get_vdev_from_vdev_id(void *ppdev,
+		uint8_t vdev_id)
+{
+	return ol_txrx_get_vdev_from_vdev_id(vdev_id);
+}
+
+/**
+ * ol_txrx_wrapper_register_peer() - register peer
+ * @pdev: pdev handle
+ * @sta_desc: peer description
+ *
+ * Return: QDF STATUS
+ */
+QDF_STATUS ol_txrx_wrapper_register_peer(void *pdev,
+		struct ol_txrx_desc_type *sta_desc)
+{
+	return ol_txrx_register_peer(sta_desc);
+}
+
+/**
+ * ol_txrx_wrapper_peer_find_by_local_id() - Find a txrx peer handle
+ * @pdev - the data physical device object
+ * @local_peer_id - the ID txrx assigned locally to the peer in question
+ *
+ * The control SW typically uses the txrx peer handle to refer to the peer.
+ * In unusual circumstances, if it is infeasible for the control SW maintain
+ * the txrx peer handle but it can maintain a small integer local peer ID,
+ * this function allows the peer handled to be retrieved, based on the local
+ * peer ID.
+ *
+ * @return handle to the txrx peer object
+ */
+void *
+ol_txrx_wrapper_peer_find_by_local_id(void *pdev, uint8_t local_peer_id)
+{
+	return (void *)ol_txrx_peer_find_by_local_id(
+		(struct ol_txrx_pdev_t *)pdev, local_peer_id);
+}
+
+/**
+ * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
+ * @pdev: pdev handle
+ *
+ * Return: 1 high latency bus
+ *         0 low latency bus
+ */
+int ol_txrx_wrapper_cfg_is_high_latency(void *pdev)
+{
+	return ol_cfg_is_high_latency((ol_pdev_handle)pdev);
+}
+
+/**
+ * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
+ * @data_peer - which peer has changed its state
+ * @state - the new state of the peer
+ *
+ *  Specify the peer's authentication state (none, connected, authenticated)
+ *  to allow the data SW to determine whether to filter out invalid data frames.
+ *  (In the "connected" state, where security is enabled, but authentication
+ *  has not completed, tx and rx data frames other than EAPOL or WAPI should
+ *  be discarded.)
+ *  This function is only relevant for systems in which the tx and rx filtering
+ *  are done in the host rather than in the target.
+ *
+ * Return: QDF Status
+ */
+QDF_STATUS ol_txrx_wrapper_peer_state_update(void *pdev,
+		uint8_t *peer_mac, enum ol_txrx_peer_state state)
+{
+	return ol_txrx_peer_state_update((struct ol_txrx_pdev_t *)pdev,
+			peer_mac, state);
+}
+
+/**
+ * ol_txrx_wrapper_find_peer_by_addr() - find peer instance by address
+ * @pdev: pdev handle
+ * @peer_addr: peer address wnat to find
+ * @peer_id: peer id
+ *
+ * Return: peer instance pointer
+ */
+void *ol_txrx_wrapper_find_peer_by_addr(void *pdev,
+		uint8_t *peer_addr, uint8_t *peer_id)
+{
+	return ol_txrx_find_peer_by_addr((ol_txrx_pdev_handle)pdev,
+				peer_addr, peer_id);
+}
+
+/**
+ * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
+ * @cfg_ctx: cfg context
+ * @cfg_param: cfg parameters
+ *
+ * Return: none
+ */
+void ol_txrx_wrapper_set_flow_control_parameters(void *cfg_ctx,
+		void *cfg_param)
+{
+	return ol_tx_set_flow_control_parameters(
+		(struct txrx_pdev_cfg_t *)cfg_ctx,
+		(struct txrx_pdev_cfg_param_t *)cfg_param);
+}
+
+static struct cdp_cmn_ops ol_ops_cmn = {
+	.txrx_soc_attach_target = ol_txrx_soc_attach_target,
+	.txrx_vdev_attach = ol_txrx_vdev_attach,
+	.txrx_vdev_detach = ol_txrx_vdev_detach,
+	.txrx_pdev_attach = ol_txrx_pdev_attach,
+	.txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
+	.txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
+	.txrx_pdev_detach = ol_txrx_pdev_detach,
+	.txrx_peer_attach = ol_txrx_peer_attach,
+	.txrx_peer_detach = ol_txrx_peer_detach,
+	.txrx_vdev_register = ol_txrx_vdev_register,
+	.txrx_soc_detach = ol_txrx_soc_detach,
+	.txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
+	.txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
+	.txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
+	.txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
+	.txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
+	.txrx_get_tx_pending = ol_txrx_get_tx_pending,
+	.txrx_fw_stats_get = ol_txrx_fw_stats_get
+	/* TODO: Add other functions */
+};
+
+static struct cdp_misc_ops ol_ops_misc = {
+	.set_ibss_vdev_heart_beat_timer =
+		ol_txrx_set_ibss_vdev_heart_beat_timer,
+#ifdef CONFIG_HL_SUPPORT
+	.set_wmm_param = ol_txrx_set_wmm_param,
+#endif /* CONFIG_HL_SUPPORT */
+	.bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
+	.bad_peer_txctl_update_threshold =
+		ol_txrx_bad_peer_txctl_update_threshold,
+	.hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
+	.tx_non_std = ol_tx_non_std,
+	.get_vdev_id = ol_txrx_get_vdev_id,
+	.set_wisa_mode = ol_txrx_set_wisa_mode,
+#ifdef FEATURE_RUNTIME_PM
+	.runtime_suspend = ol_txrx_runtime_suspend,
+	.runtime_resume = ol_txrx_runtime_resume,
+#endif /* FEATURE_RUNTIME_PM */
+	.get_opmode = ol_txrx_get_opmode,
+	.mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
+	.update_mac_id = ol_txrx_update_mac_id,
+	.flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
+	.get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
+	.pkt_log_init = htt_pkt_log_init,
+	.pkt_log_con_service = ol_txrx_pkt_log_con_service
+};
+
+static struct cdp_flowctl_ops ol_ops_flowctl = {
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	.register_pause_cb = ol_txrx_register_pause_cb,
+	.set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
+	.dump_flow_pool_info = ol_tx_dump_flow_pool_info
+#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
+};
+
+static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+	.register_tx_flow_control = ol_txrx_register_tx_flow_control,
+	.deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
+	.flow_control_cb = ol_txrx_flow_control_cb,
+	.get_tx_resource = ol_txrx_get_tx_resource,
+	.ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
+	.vdev_flush = ol_txrx_vdev_flush,
+	.vdev_pause = ol_txrx_vdev_pause,
+	.vdev_unpause = ol_txrx_vdev_unpause
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+};
+
+static struct cdp_ipa_ops ol_ops_ipa = {
+#ifdef IPA_OFFLOAD
+	.ipa_get_resource = ol_txrx_ipa_uc_get_resource,
+	.ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
+	.ipa_set_active = ol_txrx_ipa_uc_set_active,
+	.ipa_op_response = ol_txrx_ipa_uc_op_response,
+	.ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
+	.ipa_get_stat = ol_txrx_ipa_uc_get_stat,
+	.ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
+	.ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base
+#endif /* IPA_OFFLOAD */
+};
+
+static struct cdp_lro_ops ol_ops_lro = {
+#ifdef FEATURE_LRO
+	.register_lro_flush_cb = ol_register_lro_flush_cb,
+	.deregister_lro_flush_cb = ol_deregister_lro_flush_cb
+#endif /* FEATURE_LRO */
+};
+
+static struct cdp_bus_ops ol_ops_bus = {
+	.bus_suspend = ol_txrx_bus_suspend,
+	.bus_resume = ol_txrx_bus_resume
+};
+
+static struct cdp_ocb_ops ol_ops_ocb = {
+	.set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
+	.get_ocb_chan_info = ol_txrx_get_ocb_chan_info
+};
+
+static struct cdp_throttle_ops ol_ops_throttle = {
+	.throttle_init_period = ol_tx_throttle_init_period,
+	.throttle_set_level = ol_tx_throttle_set_level
+};
+
+static struct cdp_mob_stats_ops ol_ops_mob_stats = {
+	.display_stats = ol_txrx_display_stats,
+	.clear_stats = ol_txrx_clear_stats,
+	.stats = ol_txrx_stats
+};
+
+static struct cdp_cfg_ops ol_ops_cfg = {
+	.set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
+	.set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
+	.cfg_attach = ol_pdev_cfg_attach,
+	.vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
+	.is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
+	.tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
+	.is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
+	.set_flow_control_parameters =
+		ol_txrx_wrapper_set_flow_control_parameters,
+	.set_flow_steering = ol_set_cfg_flow_steering,
+};
+
+static struct cdp_peer_ops ol_ops_peer = {
+	.register_peer = ol_txrx_wrapper_register_peer,
+	.clear_peer = ol_txrx_clear_peer,
+	.find_peer_by_addr = ol_txrx_wrapper_find_peer_by_addr,
+	.find_peer_by_addr_and_vdev = ol_txrx_find_peer_by_addr_and_vdev,
+	.local_peer_id = ol_txrx_local_peer_id,
+	.peer_find_by_local_id = ol_txrx_wrapper_peer_find_by_local_id,
+	.peer_state_update = ol_txrx_wrapper_peer_state_update,
+	.get_vdevid = ol_txrx_get_vdevid,
+	.get_vdev_by_sta_id = ol_txrx_get_vdev_by_sta_id,
+	.register_ocb_peer = ol_txrx_register_ocb_peer,
+	.peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
+	.get_peer_state = ol_txrx_get_peer_state,
+	.get_vdev_for_peer = ol_txrx_get_vdev_for_peer,
+	.update_ibss_add_peer_num_of_vdev =
+		ol_txrx_update_ibss_add_peer_num_of_vdev,
+	.remove_peers_for_vdev = ol_txrx_remove_peers_for_vdev,
+	.remove_peers_for_vdev_no_lock = ol_txrx_remove_peers_for_vdev_no_lock,
+	.copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
+	.add_last_real_peer = ol_txrx_add_last_real_peer,
+	.last_assoc_received = ol_txrx_last_assoc_received,
+	.last_disassoc_received = ol_txrx_last_disassoc_received,
+	.last_deauth_received = ol_txrx_last_deauth_received,
+	.is_vdev_restore_last_peer = is_vdev_restore_last_peer,
+	.update_last_real_peer = ol_txrx_update_last_real_peer,
+	.peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
+};
+
+static struct cdp_tx_delay_ops ol_ops_delay = {
+#ifdef QCA_COMPUTE_TX_DELAY
+	.tx_delay = ol_tx_delay,
+	.tx_delay_hist = ol_tx_delay_hist,
+	.tx_packet_count = ol_tx_packet_count,
+	.tx_set_compute_interval = ol_tx_set_compute_interval
+#endif /* QCA_COMPUTE_TX_DELAY */
+};
+
+static struct cdp_pmf_ops ol_ops_pmf = {
+	.get_pn_info = ol_txrx_get_pn_info
+};
+
+/* WINplatform specific structures */
+static struct cdp_ctrl_ops ol_ops_ctrl = {
+	/* EMPTY FOR MCL */
+};
+
+static struct cdp_me_ops ol_ops_me = {
+	/* EMPTY FOR MCL */
+};
+
+static struct cdp_mon_ops ol_ops_mon = {
+	/* EMPTY FOR MCL */
+};
+
+static struct cdp_host_stats_ops ol_ops_host_stats = {
+	/* EMPTY FOR MCL */
+};
+
+static struct cdp_wds_ops ol_ops_wds = {
+	/* EMPTY FOR MCL */
+};
+
+static struct cdp_raw_ops ol_ops_raw = {
+	/* EMPTY FOR MCL */
+};
+
+static struct cdp_ops ol_txrx_ops = {
+	.cmn_drv_ops = &ol_ops_cmn,
+	.ctrl_ops = &ol_ops_ctrl,
+	.me_ops = &ol_ops_me,
+	.mon_ops = &ol_ops_mon,
+	.host_stats_ops = &ol_ops_host_stats,
+	.wds_ops = &ol_ops_wds,
+	.raw_ops = &ol_ops_raw,
+	.misc_ops = &ol_ops_misc,
+	.cfg_ops = &ol_ops_cfg,
+	.flowctl_ops = &ol_ops_flowctl,
+	.l_flowctl_ops = &ol_ops_l_flowctl,
+	.ipa_ops = &ol_ops_ipa,
+	.lro_ops = &ol_ops_lro,
+	.bus_ops = &ol_ops_bus,
+	.ocb_ops = &ol_ops_ocb,
+	.peer_ops = &ol_ops_peer,
+	.throttle_ops = &ol_ops_throttle,
+	.mob_stats_ops = &ol_ops_mob_stats,
+	.delay_ops = &ol_ops_delay,
+	.pmf_ops = &ol_ops_pmf
+};
+
+struct cdp_soc_t *ol_txrx_soc_attach(struct ol_if_ops *dp_ol_if_ops)
+{
+	struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
+	if (!soc) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: OL SOC memory allocation failed\n", __func__);
+		return NULL;
+	}
+
+	soc->ops = &ol_txrx_ops;
+	return soc;
+}
+
+

+ 11 - 18
core/dp/txrx/ol_txrx.h

@@ -68,11 +68,11 @@ ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev);
 #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
 
 void
-ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag);
+ol_txrx_hl_tdls_flag_reset(void *vdev, bool flag);
 #else
 
 static inline void
-ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag)
+ol_txrx_hl_tdls_flag_reset(void *vdev, bool flag)
 {
 	return;
 }
@@ -84,43 +84,37 @@ void
 ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr);
 
 void
-ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
-			   ol_txrx_vdev_handle vdev,
+ol_txrx_add_last_real_peer(void *pdev, void *vdev,
 			   uint8_t *peer_id);
 
 bool
 is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer);
 
 void
-ol_txrx_update_last_real_peer(
-	ol_txrx_pdev_handle pdev,
-	struct ol_txrx_peer_t *peer,
+ol_txrx_update_last_real_peer(void *ppdev, void *ppeer,
 	uint8_t *peer_id, bool restore_last_peer);
 #else
 
 static inline void
-ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr)
+ol_txrx_copy_mac_addr_raw(void *vdev, uint8_t *bss_addr)
 {
 	return;
 }
 
 static inline void
-ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
-			   ol_txrx_vdev_handle vdev, uint8_t *peer_id)
+ol_txrx_add_last_real_peer(void *pdev, void *vdev, uint8_t *peer_id)
 {
 	return;
 }
 
 static inline bool
-is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer)
+is_vdev_restore_last_peer(void *peer)
 {
 	return  false;
 }
 
 static inline void
-ol_txrx_update_last_real_peer(
-	ol_txrx_pdev_handle pdev,
-	struct ol_txrx_peer_t *peer,
+ol_txrx_update_last_real_peer(void *ppdev, void *ppeer,
 	uint8_t *peer_id, bool restore_last_peer)
 
 {
@@ -128,11 +122,10 @@ ol_txrx_update_last_real_peer(
 }
 #endif
 
-ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
+void *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
 
-void htt_pkt_log_init(struct ol_txrx_pdev_t *handle, void *scn);
-QDF_STATUS ol_txrx_set_wisa_mode(ol_txrx_vdev_handle vdev,
+void htt_pkt_log_init(void *handle, void *scn);
+QDF_STATUS ol_txrx_set_wisa_mode(void *vdev,
 			bool enable);
 void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id);
-void ol_txrx_peer_detach_force_delete(ol_txrx_peer_handle peer);
 #endif /* _OL_TXRX__H_ */

+ 0 - 75
core/dp/txrx/ol_txrx_types.h

@@ -60,10 +60,6 @@
  */
 #define MAX_NUM_PEER_ID_PER_PEER 16
 
-#define OL_TXRX_INVALID_NUM_PEERS (-1)
-
-#define OL_TXRX_MAC_ADDR_LEN 6
-
 /* OL_TXRX_NUM_EXT_TIDS -
  * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
  */
@@ -351,15 +347,6 @@ struct ol_mac_addr {
 
 struct ol_tx_sched_t;
 
-
-#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
-/*
- * Each AP will occupy one ID, so it will occupy two IDs for AP-AP mode.
- * And the remainder IDs will be assigned to other 32 clients.
- */
-#define OL_TXRX_NUM_LOCAL_PEER_IDS (2 + 32)
-#endif
-
 #ifndef ol_txrx_local_peer_id_t
 #define ol_txrx_local_peer_id_t uint8_t /* default */
 #endif
@@ -385,16 +372,6 @@ struct ol_tx_delay_data {
 #endif /* QCA_COMPUTE_TX_DELAY */
 
 /* Thermal Mitigation */
-
-enum throttle_level {
-	THROTTLE_LEVEL_0,
-	THROTTLE_LEVEL_1,
-	THROTTLE_LEVEL_2,
-	THROTTLE_LEVEL_3,
-	/* Invalid */
-	THROTTLE_LEVEL_MAX,
-};
-
 enum throttle_phase {
 	THROTTLE_PHASE_OFF,
 	THROTTLE_PHASE_ON,
@@ -954,11 +931,6 @@ struct ol_txrx_pdev_t {
 	struct ol_txrx_peer_t *self_peer;
 };
 
-struct ol_txrx_ocb_chan_info {
-	uint32_t chan_freq;
-	uint16_t disable_rx_stats_hdr:1;
-};
-
 struct ol_txrx_vdev_t {
 	struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
 					the parent of this virtual device */
@@ -1222,53 +1194,6 @@ struct ol_txrx_peer_t {
 	qdf_time_t last_deauth_rcvd;
 };
 
-enum ol_rx_err_type {
-	OL_RX_ERR_DEFRAG_MIC,
-	OL_RX_ERR_PN,
-	OL_RX_ERR_UNKNOWN_PEER,
-	OL_RX_ERR_MALFORMED,
-	OL_RX_ERR_TKIP_MIC,
-	OL_RX_ERR_DECRYPT,
-	OL_RX_ERR_MPDU_LENGTH,
-	OL_RX_ERR_ENCRYPT_REQUIRED,
-	OL_RX_ERR_DUP,
-	OL_RX_ERR_UNKNOWN,
-	OL_RX_ERR_FCS,
-	OL_RX_ERR_PRIVACY,
-	OL_RX_ERR_NONE_FRAG,
-	OL_RX_ERR_NONE = 0xFF
-};
-
-/**
- * ol_mic_error_info - carries the information associated with
- * a MIC error
- * @vdev_id: virtual device ID
- * @key_id: Key ID
- * @pn: packet number
- * @sa: source address
- * @da: destination address
- * @ta: transmitter address
- */
-struct ol_mic_error_info {
-	uint8_t vdev_id;
-	uint32_t key_id;
-	uint64_t pn;
-	uint8_t sa[OL_TXRX_MAC_ADDR_LEN];
-	uint8_t da[OL_TXRX_MAC_ADDR_LEN];
-	uint8_t ta[OL_TXRX_MAC_ADDR_LEN];
-};
-
-/**
- * ol_error_info - carries the information associated with an
- * error indicated by the firmware
- * @mic_err: MIC error information
- */
-struct ol_error_info {
-	union {
-		struct ol_mic_error_info mic_err;
-	} u;
-};
-
 struct ol_rx_remote_data {
 	qdf_nbuf_t msdu;
 	uint8_t mac_id;