ソースを参照

qcacld-3.0: Separate out HL and LL TX data path

Separate out HL and LL Tx Data Path in different files
to compile out features cleanly.

Change-Id: I9033a5bc83bbc95ff00be3f46dc6eda65eab9e71
CRs-Fixed: 2287346
Nirav Shah 7 年 前
コミット
52d85aacf5

+ 26 - 14
Kbuild

@@ -943,21 +943,37 @@ TXRX_OBJS := $(TXRX_DIR)/ol_txrx.o \
                 $(TXRX_DIR)/ol_rx_reorder_timeout.o \
                 $(TXRX_DIR)/ol_rx_reorder.o \
                 $(TXRX_DIR)/ol_rx_pn.o \
-                $(TXRX_DIR)/ol_tx_queue.o \
                 $(TXRX_DIR)/ol_txrx_peer_find.o \
                 $(TXRX_DIR)/ol_txrx_encap.o \
-                $(TXRX_DIR)/ol_tx_send.o \
-                $(TXRX_DIR)/ol_tx_sched.o \
-                $(TXRX_DIR)/ol_tx_classify.o
+                $(TXRX_DIR)/ol_tx_send.o
 
 ifeq ($(CONFIG_WDI_EVENT_ENABLE), y)
 TXRX_OBJS +=     $(TXRX_DIR)/ol_txrx_event.o
 endif
 
+ifeq ($(CONFIG_LL_DP_SUPPORT), y)
+
+TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_ll.o
+
+ifeq ($(CONFIG_WLAN_FASTPATH), y)
+TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_ll_fastpath.o
+else
+TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_ll_legacy.o
+endif
+
 ifeq ($(CONFIG_WLAN_TX_FLOW_CONTROL_V2), y)
 TXRX_OBJS +=     $(TXRX_DIR)/ol_txrx_flow_control.o
 endif
 
+endif #CONFIG_LL_DP_SUPPORT
+
+ifeq ($(CONFIG_HL_DP_SUPPORT), y)
+TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_hl.o
+TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_classify.o
+TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_sched.o
+TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_queue.o
+endif #CONFIG_HL_DP_SUPPORT
+
 ifeq ($(CONFIG_WLAN_TX_FLOW_CONTROL_LEGACY), y)
 TXRX_OBJS +=     $(TXRX_DIR)/ol_txrx_legacy_flow_control.o
 endif
@@ -966,12 +982,6 @@ ifeq ($(CONFIG_IPA_OFFLOAD), y)
 TXRX_OBJS +=     $(TXRX_DIR)/ol_txrx_ipa.o
 endif
 
-ifeq ($(CONFIG_WLAN_FASTPATH), y)
-TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_ll_fastpath.o
-else
-TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_ll_legacy.o
-endif
-
 ifeq ($(CONFIG_QCA_SUPPORT_TX_THROTTLE), y)
 TXRX_OBJS +=     $(TXRX_DIR)/ol_tx_throttle.o
 endif
@@ -1810,10 +1820,13 @@ cppflags-$(CONFIG_HIF_PCI) += -DHIF_PCI
 
 cppflags-$(CONFIG_HIF_SNOC) += -DHIF_SNOC
 
+cppflags-$(CONFIG_HL_DP_SUPPORT) += -DCONFIG_HL_SUPPORT
+
+cppflags-$(CONFIG_LL_DP_SUPPORT) += -DCONFIG_LL_DP_SUPPORT
+
 #Enable High Latency related Flags
 ifeq ($(CONFIG_QCA_WIFI_SDIO), y)
-cppflags-y += -DCONFIG_HL_SUPPORT \
-            -DCONFIG_AR6320_SUPPORT \
+cppflags-y += -DCONFIG_AR6320_SUPPORT \
             -DSDIO_3_0 \
             -DHIF_SDIO \
             -DCONFIG_DISABLE_CDC_MAX_PERF_WAR=0 \
@@ -1845,8 +1858,7 @@ cppflags-$(CONFIG_FEATURE_SKB_PRE_ALLOC) += -DFEATURE_SKB_PRE_ALLOC
 #Enable USB specific APIS
 ifeq ($(CONFIG_HIF_USB), y)
 cppflags-y += -DHIF_USB \
-            -DDEBUG_HL_LOGGING \
-            -DCONFIG_HL_SUPPORT
+            -DDEBUG_HL_LOGGING
 endif
 
 #Enable Genoa specific features.

+ 6 - 0
configs/default_defconfig

@@ -232,6 +232,12 @@ ifeq ($(CONFIG_SLUB_DEBUG), y)
 	CONFIG_WLAN_OBJMGR_DEBUG:= y
 endif
 
+ifeq (y,$(findstring y,$(CONFIG_QCA_WIFI_SDIO) $(CONFIG_HIF_USB)))
+CONFIG_HL_DP_SUPPORT := y
+else
+CONFIG_LL_DP_SUPPORT := y
+endif
+
 ifeq ($(CONFIG_HIF_PCI), y)
 ifneq ($(CONFIG_WLAN_TX_FLOW_CONTROL_V2), y)
 ifneq ($(CONFIG_LITHIUM), y)

+ 1 - 0
configs/genoa.pci.debug_defconfig

@@ -19,6 +19,7 @@ CONFIG_WLAN_NAPI := y
 CONFIG_ATH_11AC_TXCOMPACT := y
 CONFIG_FEATURE_MONITOR_MODE_SUPPORT := y
 CONFIG_QCA_LL_TX_FLOW_CONTROL_RESIZE := y
+CONFIG_LL_DP_SUPPORT := y
 
 ifeq ($(CONFIG_IPA), y)
 CONFIG_IPA_OFFLOAD := y

+ 1 - 0
configs/genoa.pci.perf_defconfig

@@ -18,6 +18,7 @@ CONFIG_WLAN_FASTPATH := y
 CONFIG_WLAN_NAPI := y
 CONFIG_ATH_11AC_TXCOMPACT := y
 CONFIG_QCA_LL_TX_FLOW_CONTROL_RESIZE := y
+CONFIG_LL_DP_SUPPORT := y
 
 ifeq ($(CONFIG_IPA), y)
 CONFIG_IPA_OFFLOAD := y

+ 1 - 1
configs/genoa.sdio.debug_defconfig

@@ -15,7 +15,7 @@ CONFIG_QCA_HL_NETDEV_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_DBS_GROUP_CREDIT_SHARING := y
 CONFIG_CREDIT_REP_THROUGH_CREDIT_UPDATE := y
-
+CONFIG_HL_DP_SUPPORT := y
 
 # Debug specific features
 CONFIG_MPC_UT_FRAMEWORK := y

+ 1 - 0
configs/genoa.sdio.perf_defconfig

@@ -15,6 +15,7 @@ CONFIG_QCA_HL_NETDEV_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_DBS_GROUP_CREDIT_SHARING := y
 CONFIG_CREDIT_REP_THROUGH_CREDIT_UPDATE := y
+CONFIG_HL_DP_SUPPORT := y
 
 # Debug specific features
 CONFIG_MPC_UT_FRAMEWORK := n

+ 1 - 0
configs/genoa.snoc.debug_defconfig

@@ -26,6 +26,7 @@ CONFIG_ATH_11AC_TXCOMPACT := y
 CONFIG_RX_OL := y
 CONFIG_DESC_DUP_DETECT_DEBUG := y
 CONFIG_DEBUG_RX_RING_BUFFER := y
+CONFIG_LL_DP_SUPPORT := y
 
 ifeq ($(CONFIG_INET_LRO), y)
 CONFIG_WLAN_LRO := y

+ 1 - 0
configs/genoa.snoc.perf_defconfig

@@ -26,6 +26,7 @@ CONFIG_ATH_11AC_TXCOMPACT := y
 CONFIG_RX_OL := y
 CONFIG_DESC_DUP_DETECT_DEBUG := n
 CONFIG_DEBUG_RX_RING_BUFFER := n
+CONFIG_LL_DP_SUPPORT := y
 
 ifeq ($(CONFIG_INET_LRO), y)
 CONFIG_WLAN_LRO := y

+ 1 - 0
configs/genoa.usb.debug_defconfig

@@ -13,6 +13,7 @@ CONFIG_QCA_HL_NETDEV_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_DBS_GROUP_CREDIT_SHARING := y
 CONFIG_CREDIT_REP_THROUGH_CREDIT_UPDATE := y
+CONFIG_HL_DP_SUPPORT := y
 
 # Debug specific features
 CONFIG_MPC_UT_FRAMEWORK := y

+ 1 - 0
configs/genoa.usb.perf_defconfig

@@ -13,6 +13,7 @@ CONFIG_QCA_HL_NETDEV_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_DBS_GROUP_CREDIT_SHARING := y
 CONFIG_CREDIT_REP_THROUGH_CREDIT_UPDATE := y
+CONFIG_HL_DP_SUPPORT := y
 
 # Debug specific features
 CONFIG_MPC_UT_FRAMEWORK := n

+ 0 - 1212
core/dp/txrx/ol_tx.c

@@ -36,195 +36,11 @@
 #include <ol_tx_queue.h>        /* ol_tx_enqueue */
 #include <ol_tx_sched.h>      /* ol_tx_sched */
 
-
 /* internal header files relevant only for specific systems (Pronto) */
 #include <ol_txrx_encap.h>      /* OL_TX_ENCAP, etc */
 #include <ol_tx.h>
 #include <cdp_txrx_ipa.h>
 
-/*
- * The TXRX module doesn't accept tx frames unless the target has
- * enough descriptors for them.
- * For LL, the TXRX descriptor pool is sized to match the target's
- * descriptor pool.  Hence, if the descriptor allocation in TXRX
- * succeeds, that guarantees that the target has room to accept
- * the new tx frame.
- */
-struct ol_tx_desc_t *
-ol_tx_prepare_ll(ol_txrx_vdev_handle vdev,
-		 qdf_nbuf_t msdu,
-		 struct ol_txrx_msdu_info_t *msdu_info)
-{
-	struct ol_tx_desc_t *tx_desc;
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-
-	(msdu_info)->htt.info.frame_type = pdev->htt_pkt_type;
-	tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info);
-	if (qdf_unlikely(!tx_desc)) {
-		/*
-		 * If TSO packet, free associated
-		 * remaining TSO segment descriptors
-		 */
-		if (qdf_nbuf_is_tso(msdu))
-			ol_free_remaining_tso_segs(
-					vdev, msdu_info, true);
-		TXRX_STATS_MSDU_LIST_INCR(
-				pdev, tx.dropped.host_reject, msdu);
-		return NULL;
-	}
-
-	return tx_desc;
-}
-
-#if defined(FEATURE_TSO)
-void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
-				       struct ol_txrx_msdu_info_t *msdu_info,
-				       bool is_tso_seg_mapping_done)
-{
-	struct qdf_tso_seg_elem_t *next_seg;
-	struct qdf_tso_seg_elem_t *free_seg = msdu_info->tso_info.curr_seg;
-	struct ol_txrx_pdev_t *pdev;
-	bool is_last_seg = false;
-
-	if (qdf_unlikely(!vdev)) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			"%s:vdev is null", __func__);
-		return;
-	} else {
-		pdev = vdev->pdev;
-		if (qdf_unlikely(!pdev)) {
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-				"%s:pdev is null", __func__);
-			return;
-		}
-	}
-
-	if (is_tso_seg_mapping_done) {
-		/*
-		 * TSO segment are mapped already, therefore,
-		 * 1. unmap the tso segments,
-		 * 2. free tso num segment if it is a last segment, and
-		 * 3. free the tso segments.
-		 */
-		 struct qdf_tso_num_seg_elem_t *tso_num_desc =
-				msdu_info->tso_info.tso_num_seg_list;
-
-		if (qdf_unlikely(tso_num_desc == NULL)) {
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s %d TSO common info is NULL!",
-			  __func__, __LINE__);
-			return;
-		}
-
-		while (free_seg) {
-			qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
-			tso_num_desc->num_seg.tso_cmn_num_seg--;
-
-			is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg ==
-				       0) ? true : false;
-			qdf_nbuf_unmap_tso_segment(pdev->osdev, free_seg,
-						   is_last_seg);
-			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
-
-			if (is_last_seg) {
-				ol_tso_num_seg_free(pdev,
-					msdu_info->tso_info.tso_num_seg_list);
-				msdu_info->tso_info.tso_num_seg_list = NULL;
-			}
-
-			next_seg = free_seg->next;
-			free_seg->force_free = 1;
-			ol_tso_free_segment(pdev, free_seg);
-			free_seg = next_seg;
-		}
-	} else {
-		/*
-		 * TSO segment are not mapped therefore,
-		 * free the tso segments only.
-		 */
-		while (free_seg) {
-			next_seg = free_seg->next;
-			free_seg->force_free = 1;
-			ol_tso_free_segment(pdev, free_seg);
-			free_seg = next_seg;
-		}
-	}
-}
-
-/**
- * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
- * related information in the msdu_info meta data
- * @vdev: virtual device handle
- * @msdu: network buffer
- * @msdu_info: meta data associated with the msdu
- *
- * Return: 0 - success, >0 - error
- */
-uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
-			  qdf_nbuf_t msdu,
-			  struct ol_txrx_msdu_info_t *msdu_info)
-{
-	msdu_info->tso_info.curr_seg = NULL;
-	if (qdf_nbuf_is_tso(msdu)) {
-		int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
-		struct qdf_tso_num_seg_elem_t *tso_num_seg;
-
-		msdu_info->tso_info.tso_num_seg_list = NULL;
-		msdu_info->tso_info.tso_seg_list = NULL;
-		msdu_info->tso_info.num_segs = num_seg;
-		while (num_seg) {
-			struct qdf_tso_seg_elem_t *tso_seg =
-				ol_tso_alloc_segment(vdev->pdev);
-			if (tso_seg) {
-				qdf_tso_seg_dbg_record(tso_seg,
-						       TSOSEG_LOC_PREPARETSO);
-				tso_seg->next =
-					msdu_info->tso_info.tso_seg_list;
-				msdu_info->tso_info.tso_seg_list
-					= tso_seg;
-				num_seg--;
-			} else {
-				/* Free above alocated TSO segements till now */
-				msdu_info->tso_info.curr_seg =
-					msdu_info->tso_info.tso_seg_list;
-				ol_free_remaining_tso_segs(vdev, msdu_info,
-							   false);
-				return 1;
-			}
-		}
-		tso_num_seg = ol_tso_num_seg_alloc(vdev->pdev);
-		if (tso_num_seg) {
-			tso_num_seg->next = msdu_info->tso_info.
-						tso_num_seg_list;
-			msdu_info->tso_info.tso_num_seg_list = tso_num_seg;
-		} else {
-			/* Free the already allocated num of segments */
-			msdu_info->tso_info.curr_seg =
-				msdu_info->tso_info.tso_seg_list;
-			ol_free_remaining_tso_segs(vdev, msdu_info, false);
-			return 1;
-		}
-
-		if (qdf_unlikely(!qdf_nbuf_get_tso_info(vdev->pdev->osdev,
-					msdu, &(msdu_info->tso_info)))) {
-			/* Free the already allocated num of segments */
-			msdu_info->tso_info.curr_seg =
-				msdu_info->tso_info.tso_seg_list;
-			ol_free_remaining_tso_segs(vdev, msdu_info, false);
-			return 1;
-		}
-
-		msdu_info->tso_info.curr_seg =
-			msdu_info->tso_info.tso_seg_list;
-		num_seg = msdu_info->tso_info.num_segs;
-	} else {
-		msdu_info->tso_info.is_tso = 0;
-		msdu_info->tso_info.num_segs = 1;
-	}
-	return 0;
-}
-#endif
-
 /**
  * ol_tx_data() - send data frame
  * @vdev: virtual device handle
@@ -305,754 +121,6 @@ qdf_nbuf_t ol_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
 }
 #endif
 
-#if defined(FEATURE_TSO)
-/**
- * ol_tx_tso_update_stats() - update TSO stats
- * @pdev: pointer to ol_txrx_pdev_t structure
- * @msdu_info: tso msdu_info for the msdu
- * @msdu: tso mdsu for which stats are updated
- * @tso_msdu_idx: stats index in the global TSO stats array where stats will be
- *                updated
- *
- * Return: None
- */
-void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
-			    struct qdf_tso_info_t  *tso_info, qdf_nbuf_t msdu,
-			    uint32_t tso_msdu_idx)
-{
-	TXRX_STATS_TSO_HISTOGRAM(pdev, tso_info->num_segs);
-	TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, tso_msdu_idx,
-					qdf_nbuf_tcp_tso_size(msdu));
-	TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev,
-					tso_msdu_idx, qdf_nbuf_len(msdu));
-	TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, tso_msdu_idx,
-					qdf_nbuf_get_nr_frags(msdu));
-}
-
-/**
- * ol_tx_tso_get_stats_idx() - retrieve global TSO stats index and increment it
- * @pdev: pointer to ol_txrx_pdev_t structure
- *
- * Retrieve  the current value of the global variable and increment it. This is
- * done in a spinlock as the global TSO stats may be accessed in parallel by
- * multiple TX streams.
- *
- * Return: The current value of TSO stats index.
- */
-uint32_t ol_tx_tso_get_stats_idx(struct ol_txrx_pdev_t *pdev)
-{
-	uint32_t msdu_stats_idx = 0;
-
-	qdf_spin_lock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
-	msdu_stats_idx = pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx;
-	pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx++;
-	pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx &=
-					NUM_MAX_TSO_MSDUS_MASK;
-	qdf_spin_unlock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
-
-	TXRX_STATS_TSO_RESET_MSDU(pdev, msdu_stats_idx);
-
-	return msdu_stats_idx;
-}
-#endif
-
-static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
-{
-	return
-		tx_spec &
-		(OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT);
-}
-
-static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
-{
-	uint8_t sub_type = 0x1; /* 802.11 MAC header present */
-
-	if (tx_spec & OL_TX_SPEC_NO_AGGR)
-		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
-	if (tx_spec & OL_TX_SPEC_NO_ENCRYPT)
-		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
-	if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT)
-		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
-	return sub_type;
-}
-
-static qdf_nbuf_t
-ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
-		 enum ol_tx_spec tx_spec,
-		 qdf_nbuf_t msdu_list)
-{
-	qdf_nbuf_t msdu = msdu_list;
-	htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
-	struct ol_txrx_msdu_info_t msdu_info;
-
-	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
-	msdu_info.htt.action.tx_comp_req = 0;
-
-	/*
-	 * The msdu_list variable could be used instead of the msdu var,
-	 * but just to clarify which operations are done on a single MSDU
-	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
-	 * within the list.
-	 */
-	while (msdu) {
-		qdf_nbuf_t next;
-		struct ol_tx_desc_t *tx_desc = NULL;
-
-		msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
-		msdu_info.peer = NULL;
-		msdu_info.tso_info.is_tso = 0;
-
-		tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info);
-		if (!tx_desc)
-			return msdu;
-
-		/*
-		 * The netbuf may get linked into a different list inside the
-		 * ol_tx_send function, so store the next pointer before the
-		 * tx_send call.
-		 */
-		next = qdf_nbuf_next(msdu);
-
-		if (tx_spec != OL_TX_SPEC_STD) {
-			if (tx_spec & OL_TX_SPEC_NO_FREE) {
-				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
-			} else if (tx_spec & OL_TX_SPEC_TSO) {
-				tx_desc->pkt_type = OL_TX_FRM_TSO;
-			} else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
-				uint8_t sub_type =
-					ol_txrx_tx_raw_subtype(tx_spec);
-				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
-						htt_pkt_type_native_wifi,
-						sub_type);
-			} else if (ol_txrx_tx_is_raw(tx_spec)) {
-				/* different types of raw frames */
-				uint8_t sub_type =
-					ol_txrx_tx_raw_subtype(tx_spec);
-				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
-						htt_pkt_type_raw, sub_type);
-			}
-		}
-		/*
-		 * If debug display is enabled, show the meta-data being
-		 * downloaded to the target via the HTT tx descriptor.
-		 */
-		htt_tx_desc_display(tx_desc->htt_tx_desc);
-		ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
-		msdu = next;
-	}
-	return NULL;            /* all MSDUs were accepted */
-}
-
-#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
-static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
-				      ol_txrx_vdev_handle vdev,
-				      struct ol_tx_desc_t *tx_desc,
-				      qdf_nbuf_t msdu,
-				      struct ol_txrx_msdu_info_t *tx_msdu_info)
-{
-	if (OL_TX_ENCAP(vdev, tx_desc, msdu, tx_msdu_info) != A_OK) {
-		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
-		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
-		if (tx_msdu_info->peer) {
-			/* remove the peer reference added above */
-			ol_txrx_peer_release_ref(tx_msdu_info->peer,
-						 PEER_DEBUG_ID_OL_INTERNAL);
-		}
-		return -EINVAL;
-	}
-
-	return 0;
-}
-#else
-static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
-				      ol_txrx_vdev_handle vdev,
-				      struct ol_tx_desc_t *tx_desc,
-				      qdf_nbuf_t msdu,
-				      struct ol_txrx_msdu_info_t *tx_msdu_info)
-{
-	/* no-op */
-	return 0;
-}
-#endif
-
-/* tx filtering is handled within the target FW */
-#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
-
-/**
- * parse_ocb_tx_header() - Function to check for OCB
- * @msdu:   Pointer to OS packet (qdf_nbuf_t)
- * @tx_ctrl: TX control header on a packet and extract it if present
- *
- * Return: true if ocb parsing is successful
- */
-#define OCB_HEADER_VERSION     1
-static bool parse_ocb_tx_header(qdf_nbuf_t msdu,
-				struct ocb_tx_ctrl_hdr_t *tx_ctrl)
-{
-	struct ether_header *eth_hdr_p;
-	struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
-
-	/* Check if TX control header is present */
-	eth_hdr_p = (struct ether_header *)qdf_nbuf_data(msdu);
-	if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
-		/* TX control header is not present. Nothing to do.. */
-		return true;
-
-	/* Remove the ethernet header */
-	qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
-
-	/* Parse the TX control header */
-	tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
-
-	if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
-		if (tx_ctrl)
-			qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
-				     sizeof(*tx_ctrl_hdr));
-	} else {
-		/* The TX control header is invalid. */
-		return false;
-	}
-
-	/* Remove the TX control header */
-	qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
-	return true;
-}
-
-
-#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_TX_DESC_HI_PRIO_RESERVE)
-
-/**
- * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
- *			   for a HL system.
- * @pdev: the data physical device sending the data
- * @vdev: the virtual device sending the data
- * @msdu: the tx frame
- * @msdu_info: the tx meta data
- *
- * Return: the tx decriptor
- */
-static inline
-struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
-	struct ol_txrx_vdev_t *vdev,
-	qdf_nbuf_t msdu,
-	struct ol_txrx_msdu_info_t *msdu_info)
-{
-	struct ol_tx_desc_t *tx_desc = NULL;
-
-	if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
-			TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
-		tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
-	} else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
-		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
-				QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
-		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
-			QDF_NBUF_CB_PACKET_TYPE_EAPOL)) {
-			tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
-			ol_txrx_info("Provided tx descriptor from reserve pool for DHCP/EAPOL\n");
-		}
-	}
-	return tx_desc;
-}
-#elif defined(CONFIG_HL_SUPPORT) && defined(QCA_HL_NETDEV_FLOW_CONTROL)
-bool ol_tx_desc_is_high_prio(qdf_nbuf_t msdu)
-{
-	enum qdf_proto_subtype proto_subtype;
-	bool high_prio = false;
-
-	if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
-		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
-			QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
-		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
-			QDF_NBUF_CB_PACKET_TYPE_EAPOL))
-			high_prio = true;
-	} else if (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
-			QDF_NBUF_CB_PACKET_TYPE_ARP) {
-		high_prio = true;
-	} else if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
-		   QDF_NBUF_CB_PACKET_TYPE_ICMPv6)) {
-		proto_subtype = qdf_nbuf_get_icmpv6_subtype(msdu);
-		switch (proto_subtype) {
-		case QDF_PROTO_ICMPV6_NA:
-		case QDF_PROTO_ICMPV6_NS:
-			high_prio = true;
-		default:
-			high_prio = false;
-		}
-	}
-	return high_prio;
-}
-
-static inline
-struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
-					 struct ol_txrx_vdev_t *vdev,
-					 qdf_nbuf_t msdu,
-					 struct ol_txrx_msdu_info_t *msdu_info)
-{
-	struct ol_tx_desc_t *tx_desc =
-		ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
-
-	if (!tx_desc)
-		return NULL;
-
-	qdf_spin_lock_bh(&pdev->tx_mutex);
-	/* return if TX flow control disabled */
-	if (vdev->tx_desc_limit == 0) {
-		qdf_spin_unlock_bh(&pdev->tx_mutex);
-		return tx_desc;
-	}
-
-	if (!qdf_atomic_read(&vdev->os_q_paused) &&
-	    (qdf_atomic_read(&vdev->tx_desc_count) >= vdev->queue_stop_th)) {
-		/*
-		 * Pause normal priority
-		 * netdev queues if tx desc limit crosses
-		 */
-		pdev->pause_cb(vdev->vdev_id,
-			       WLAN_STOP_NON_PRIORITY_QUEUE,
-			       WLAN_DATA_FLOW_CONTROL);
-		qdf_atomic_set(&vdev->os_q_paused, 1);
-	} else if (ol_tx_desc_is_high_prio(msdu) && !vdev->prio_q_paused &&
-		   (qdf_atomic_read(&vdev->tx_desc_count)
-		    == vdev->tx_desc_limit)) {
-			/* Pause high priority queue */
-		pdev->pause_cb(vdev->vdev_id,
-			       WLAN_NETIF_PRIORITY_QUEUE_OFF,
-			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
-		vdev->prio_q_paused = 1;
-	}
-	qdf_spin_unlock_bh(&pdev->tx_mutex);
-
-	return tx_desc;
-}
-#else
-static inline
-struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
-					 struct ol_txrx_vdev_t *vdev,
-					 qdf_nbuf_t msdu,
-					 struct ol_txrx_msdu_info_t *msdu_info)
-{
-	struct ol_tx_desc_t *tx_desc = NULL;
-
-	tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
-	return tx_desc;
-}
-#endif /* CONFIG_HL_SUPPORT && QCA_HL_NETDEV_FLOW_CONTROL */
-
-#if defined(CONFIG_HL_SUPPORT)
-
-/**
- * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
- *				 for management frame
- * @pdev: the data physical device sending the data
- * @vdev: the virtual device sending the data
- * @tx_mgmt_frm: the tx management frame
- * @tx_msdu_info: the tx meta data
- *
- * Return: the tx decriptor
- */
-static inline
-struct ol_tx_desc_t *
-ol_txrx_mgmt_tx_desc_alloc(
-	struct ol_txrx_pdev_t *pdev,
-	struct ol_txrx_vdev_t *vdev,
-	qdf_nbuf_t tx_mgmt_frm,
-	struct ol_txrx_msdu_info_t *tx_msdu_info)
-{
-	struct ol_tx_desc_t *tx_desc;
-
-	tx_msdu_info->htt.action.tx_comp_req = 1;
-	tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
-	return tx_desc;
-}
-
-/**
- * ol_txrx_mgmt_send_frame() - send a management frame
- * @vdev: virtual device sending the frame
- * @tx_desc: tx desc
- * @tx_mgmt_frm: management frame to send
- * @tx_msdu_info: the tx meta data
- * @chanfreq: download change frequency
- *
- * Return:
- *      0 -> the frame is accepted for transmission, -OR-
- *      1 -> the frame was not accepted
- */
-static inline
-int ol_txrx_mgmt_send_frame(
-	struct ol_txrx_vdev_t *vdev,
-	struct ol_tx_desc_t *tx_desc,
-	qdf_nbuf_t tx_mgmt_frm,
-	struct ol_txrx_msdu_info_t *tx_msdu_info,
-	uint16_t chanfreq)
-{
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-	struct ol_tx_frms_queue_t *txq;
-	int status = 1;
-
-	/*
-	 * 1.  Look up the peer and queue the frame in the peer's mgmt queue.
-	 * 2.  Invoke the download scheduler.
-	 */
-	txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
-	if (!txq) {
-		/* TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
-		 *			     msdu);
-		 */
-		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
-		ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
-					     1 /* error */);
-		goto out; /* can't accept the tx mgmt frame */
-	}
-	/* Initialize the HTT tx desc l2 header offset field.
-	 * Even though tx encap does not apply to mgmt frames,
-	 * htt_tx_desc_mpdu_header still needs to be called,
-	 * to specifiy that there was no L2 header added by tx encap,
-	 * so the frame's length does not need to be adjusted to account for
-	 * an added L2 header.
-	 */
-	htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
-	if (qdf_unlikely(htt_tx_desc_init(
-			pdev->htt_pdev, tx_desc->htt_tx_desc,
-			tx_desc->htt_tx_desc_paddr,
-			ol_tx_desc_id(pdev, tx_desc),
-			tx_mgmt_frm,
-			&tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0)))
-		goto out;
-	htt_tx_desc_display(tx_desc->htt_tx_desc);
-	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
-
-	ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
-	ol_tx_sched(vdev->pdev);
-	status = 0;
-out:
-	if (tx_msdu_info->peer) {
-		/* remove the peer reference added above */
-		ol_txrx_peer_release_ref(tx_msdu_info->peer,
-					 PEER_DEBUG_ID_OL_INTERNAL);
-	}
-
-	return status;
-}
-
-#else
-
-static inline
-struct ol_tx_desc_t *
-ol_txrx_mgmt_tx_desc_alloc(
-	struct ol_txrx_pdev_t *pdev,
-	struct ol_txrx_vdev_t *vdev,
-	qdf_nbuf_t tx_mgmt_frm,
-	struct ol_txrx_msdu_info_t *tx_msdu_info)
-{
-	struct ol_tx_desc_t *tx_desc;
-
-	/* For LL tx_comp_req is not used so initialized to 0 */
-	tx_msdu_info->htt.action.tx_comp_req = 0;
-	tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
-	/* FIX THIS -
-	 * The FW currently has trouble using the host's fragments table
-	 * for management frames.  Until this is fixed, rather than
-	 * specifying the fragment table to the FW, specify just the
-	 * address of the initial fragment.
-	 */
-#if defined(HELIUMPLUS)
-	/* ol_txrx_dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
-	 *			  tx_desc);
-	 */
-#endif /* defined(HELIUMPLUS) */
-	if (tx_desc) {
-		/*
-		 * Following the call to ol_tx_desc_ll, frag 0 is the
-		 * HTT tx HW descriptor, and the frame payload is in
-		 * frag 1.
-		 */
-		htt_tx_desc_frags_table_set(
-				pdev->htt_pdev,
-				tx_desc->htt_tx_desc,
-				qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
-				0, 0);
-#if defined(HELIUMPLUS) && defined(HELIUMPLUS_DEBUG)
-		ol_txrx_dump_frag_desc(
-				"after htt_tx_desc_frags_table_set",
-				tx_desc);
-#endif /* defined(HELIUMPLUS) */
-	}
-
-	return tx_desc;
-}
-
-static inline
-int ol_txrx_mgmt_send_frame(
-	struct ol_txrx_vdev_t *vdev,
-	struct ol_tx_desc_t *tx_desc,
-	qdf_nbuf_t tx_mgmt_frm,
-	struct ol_txrx_msdu_info_t *tx_msdu_info,
-	uint16_t chanfreq)
-{
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-
-	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
-	QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
-					QDF_NBUF_TX_PKT_MGMT_TRACK;
-	ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
-			  htt_pkt_type_mgmt);
-
-	return 0;
-}
-#endif
-
-/**
- * ol_tx_hl_base() - send tx frames for a HL system.
- * @vdev: the virtual device sending the data
- * @tx_spec: indicate what non-standard transmission actions to apply
- * @msdu_list: the tx frames to send
- * @tx_comp_req: tx completion req
- *
- * Return: NULL if all MSDUs are accepted
- */
-static inline qdf_nbuf_t
-ol_tx_hl_base(
-	ol_txrx_vdev_handle vdev,
-	enum ol_tx_spec tx_spec,
-	qdf_nbuf_t msdu_list,
-	int tx_comp_req)
-{
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-	qdf_nbuf_t msdu = msdu_list;
-	struct ol_txrx_msdu_info_t tx_msdu_info;
-	struct ocb_tx_ctrl_hdr_t tx_ctrl;
-	htt_pdev_handle htt_pdev = pdev->htt_pdev;
-
-	tx_msdu_info.tso_info.is_tso = 0;
-
-	/*
-	 * The msdu_list variable could be used instead of the msdu var,
-	 * but just to clarify which operations are done on a single MSDU
-	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
-	 * within the list.
-	 */
-	while (msdu) {
-		qdf_nbuf_t next;
-		struct ol_tx_frms_queue_t *txq;
-		struct ol_tx_desc_t *tx_desc = NULL;
-
-		qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
-		tx_msdu_info.peer = NULL;
-		/*
-		 * The netbuf will get stored into a (peer-TID) tx queue list
-		 * inside the ol_tx_classify_store function or else dropped,
-		 * so store the next pointer immediately.
-		 */
-		next = qdf_nbuf_next(msdu);
-
-		tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
-
-		if (!tx_desc) {
-			/*
-			 * If we're out of tx descs, there's no need to try
-			 * to allocate tx descs for the remaining MSDUs.
-			 */
-			TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
-						  msdu);
-			return msdu; /* the list of unaccepted MSDUs */
-		}
-
-		/* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
-
-		if (tx_spec != OL_TX_SPEC_STD) {
-#if defined(FEATURE_WLAN_TDLS)
-			if (tx_spec & OL_TX_SPEC_NO_FREE) {
-				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
-			} else if (tx_spec & OL_TX_SPEC_TSO) {
-#else
-				if (tx_spec & OL_TX_SPEC_TSO) {
-#endif
-					tx_desc->pkt_type = OL_TX_FRM_TSO;
-				}
-				if (ol_txrx_tx_is_raw(tx_spec)) {
-					/* CHECK THIS: does this need
-					 * to happen after htt_tx_desc_init?
-					 */
-					/* different types of raw frames */
-					u_int8_t sub_type =
-						ol_txrx_tx_raw_subtype(
-								tx_spec);
-					htt_tx_desc_type(htt_pdev,
-							 tx_desc->htt_tx_desc,
-							 htt_pkt_type_raw,
-							 sub_type);
-				}
-			}
-
-			tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
-			tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
-			tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
-			tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
-			tx_msdu_info.htt.action.tx_comp_req = tx_comp_req;
-
-			/* If the vdev is in OCB mode,
-			 * parse the tx control header.
-			 */
-			if (vdev->opmode == wlan_op_mode_ocb) {
-				if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
-					/* There was an error parsing
-					 * the header.Skip this packet.
-					 */
-					goto MSDU_LOOP_BOTTOM;
-				}
-			}
-
-			txq = ol_tx_classify(vdev, tx_desc, msdu,
-							&tx_msdu_info);
-
-			if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
-				/* drop this frame,
-				 * but try sending subsequent frames
-				 */
-				/*TXRX_STATS_MSDU_LIST_INCR(pdev,
-							tx.dropped.no_txq,
-							msdu);*/
-				qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
-				ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
-				if (tx_msdu_info.peer) {
-					/* remove the peer reference
-					 * added above */
-					ol_txrx_peer_release_ref(
-						tx_msdu_info.peer,
-						PEER_DEBUG_ID_OL_INTERNAL);
-				}
-				goto MSDU_LOOP_BOTTOM;
-			}
-
-			if (tx_msdu_info.peer) {
-				/*
-				 * If the state is not associated then drop all
-				 * the data packets received for that peer
-				 */
-				if (tx_msdu_info.peer->state ==
-						OL_TXRX_PEER_STATE_DISC) {
-					qdf_atomic_inc(
-						&pdev->tx_queue.rsrc_cnt);
-					ol_tx_desc_frame_free_nonstd(pdev,
-								     tx_desc,
-								     1);
-					ol_txrx_peer_release_ref(
-						tx_msdu_info.peer,
-						PEER_DEBUG_ID_OL_INTERNAL);
-					msdu = next;
-					continue;
-				} else if (tx_msdu_info.peer->state !=
-						OL_TXRX_PEER_STATE_AUTH) {
-					if (tx_msdu_info.htt.info.ethertype !=
-						ETHERTYPE_PAE &&
-						tx_msdu_info.htt.info.ethertype
-							!= ETHERTYPE_WAI) {
-						qdf_atomic_inc(
-							&pdev->tx_queue.
-								rsrc_cnt);
-						ol_tx_desc_frame_free_nonstd(
-								pdev,
-								tx_desc, 1);
-						ol_txrx_peer_release_ref(
-						 tx_msdu_info.peer,
-						 PEER_DEBUG_ID_OL_INTERNAL);
-						msdu = next;
-						continue;
-					}
-				}
-			}
-			/*
-			 * Initialize the HTT tx desc l2 header offset field.
-			 * htt_tx_desc_mpdu_header  needs to be called to
-			 * make sure, the l2 header size is initialized
-			 * correctly to handle cases where TX ENCAP is disabled
-			 * or Tx Encap fails to perform Encap
-			 */
-			htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
-
-			/*
-			 * Note: when the driver is built without support for
-			 * SW tx encap,the following macro is a no-op.
-			 * When the driver is built with support for SW tx
-			 * encap, it performs encap, and if an error is
-			 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
-			 */
-			if (ol_tx_encap_wrapper(pdev, vdev, tx_desc, msdu,
-						&tx_msdu_info))
-				goto MSDU_LOOP_BOTTOM;
-
-			/* initialize the HW tx descriptor */
-			htt_tx_desc_init(
-					pdev->htt_pdev, tx_desc->htt_tx_desc,
-					tx_desc->htt_tx_desc_paddr,
-					ol_tx_desc_id(pdev, tx_desc),
-					msdu,
-					&tx_msdu_info.htt,
-					&tx_msdu_info.tso_info,
-					&tx_ctrl,
-					vdev->opmode == wlan_op_mode_ocb);
-			/*
-			 * If debug display is enabled, show the meta-data
-			 * being downloaded to the target via the
-			 * HTT tx descriptor.
-			 */
-			htt_tx_desc_display(tx_desc->htt_tx_desc);
-
-			ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
-			if (tx_msdu_info.peer) {
-				OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
-							msdu);
-				/* remove the peer reference added above */
-				ol_txrx_peer_release_ref
-						(tx_msdu_info.peer,
-						 PEER_DEBUG_ID_OL_INTERNAL);
-			}
-MSDU_LOOP_BOTTOM:
-			msdu = next;
-		}
-		ol_tx_sched(pdev);
-		return NULL; /* all MSDUs were accepted */
-}
-
-qdf_nbuf_t
-ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
-{
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
-		pdev->cfg.request_tx_comp;
-
-	return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
-}
-
-static qdf_nbuf_t
-ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
-		 enum ol_tx_spec tx_spec,
-		 qdf_nbuf_t msdu_list)
-{
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
-		pdev->cfg.request_tx_comp;
-
-	if (!tx_comp_req) {
-		if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
-		    (pdev->tx_data_callback.func))
-			tx_comp_req = 1;
-	}
-	return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req);
-}
-
-qdf_nbuf_t
-ol_tx_non_std(struct cdp_vdev *pvdev,
-	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-
-	if (vdev->pdev->cfg.is_high_latency)
-		return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
-	else
-		return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
-}
-
 void
 ol_txrx_data_tx_cb_set(struct cdp_vdev *pvdev,
 		       ol_txrx_data_tx_cb callback, void *ctxt)
@@ -1077,47 +145,6 @@ ol_txrx_mgmt_tx_cb_set(struct cdp_pdev *ppdev, uint8_t type,
 	pdev->tx_mgmt_cb.ctxt = ctxt;
 }
 
-#if defined(HELIUMPLUS)
-void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
-{
-	uint32_t                *frag_ptr_i_p;
-	int                     i;
-
-	qdf_print("OL TX Descriptor 0x%pK msdu_id %d\n",
-		 tx_desc, tx_desc->id);
-	qdf_print("HTT TX Descriptor vaddr: 0x%pK paddr: %pad",
-		 tx_desc->htt_tx_desc, &tx_desc->htt_tx_desc_paddr);
-	qdf_print("%s %d: Fragment Descriptor 0x%pK (paddr=%pad)",
-		 __func__, __LINE__, tx_desc->htt_frag_desc,
-		 &tx_desc->htt_frag_desc_paddr);
-
-	/*
-	 * it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
-	 * is already de-referrable (=> in virtual address space)
-	 */
-	frag_ptr_i_p = tx_desc->htt_frag_desc;
-
-	/* Dump 6 words of TSO flags */
-	print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags:  ",
-		       DUMP_PREFIX_NONE, 8, 4,
-		       frag_ptr_i_p, 24, true);
-
-	frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
-
-	i = 0;
-	while (*frag_ptr_i_p) {
-		print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr:  ",
-			       DUMP_PREFIX_NONE, 8, 4,
-			       frag_ptr_i_p, 8, true);
-		i++;
-		if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
-			break;
-		/* jump to next  pointer - skip length */
-		frag_ptr_i_p += 2;
-	}
-}
-#endif /* HELIUMPLUS */
-
 int
 ol_txrx_mgmt_send_ext(struct cdp_vdev *pvdev,
 		  qdf_nbuf_t tx_mgmt_frm,
@@ -1194,242 +221,3 @@ ol_txrx_mgmt_send_ext(struct cdp_vdev *pvdev,
 
 	return 0;               /* accepted the tx mgmt frame */
 }
-
-qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
-			  qdf_nbuf_t msdu, uint16_t peer_id)
-{
-	struct ol_tx_desc_t *tx_desc = NULL;
-	struct ol_txrx_msdu_info_t msdu_info;
-
-	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
-	msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
-	msdu_info.peer = NULL;
-	msdu_info.htt.action.tx_comp_req = 0;
-	msdu_info.tso_info.is_tso = 0;
-
-	tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info);
-	if (!tx_desc)
-		return msdu;
-
-	HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
-
-	htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
-
-	ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
-
-	return NULL;
-}
-
-#if defined(FEATURE_TSO)
-/**
- * ol_tso_seg_list_init() - function to initialise the tso seg freelist
- * @pdev: the data physical device sending the data
- * @num_seg: number of segments needs to be intialised
- *
- * Return: none
- */
-void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
-{
-	int i = 0;
-	struct qdf_tso_seg_elem_t *c_element;
-
-	/* Host should not allocate any c_element. */
-	if (num_seg <= 0) {
-		ol_txrx_err("%s: ERROR: Pool size passed is 0",
-			   __func__);
-		QDF_BUG(0);
-		pdev->tso_seg_pool.pool_size = i;
-		qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
-		return;
-	}
-
-	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
-	pdev->tso_seg_pool.freelist = c_element;
-	for (i = 0; i < (num_seg - 1); i++) {
-		if (qdf_unlikely(!c_element)) {
-			ol_txrx_err("%s: ERROR: c_element NULL for seg %d",
-				   __func__, i);
-			QDF_BUG(0);
-			pdev->tso_seg_pool.pool_size = i;
-			pdev->tso_seg_pool.num_free = i;
-			qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
-			return;
-		}
-		/* set the freelist bit and magic cookie*/
-		c_element->on_freelist = 1;
-		c_element->cookie = TSO_SEG_MAGIC_COOKIE;
-#ifdef TSOSEG_DEBUG
-		c_element->dbg.txdesc = NULL;
-		qdf_atomic_init(&c_element->dbg.cur); /* history empty */
-		qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT1);
-#endif /* TSOSEG_DEBUG */
-		c_element->next =
-			qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
-		c_element = c_element->next;
-	}
-	/*
-	 * NULL check for the last c_element of the list or
-	 * first c_element if num_seg is equal to 1.
-	 */
-	if (qdf_unlikely(!c_element)) {
-		ol_txrx_err("%s: ERROR: c_element NULL for seg %d",
-			   __func__, i);
-		QDF_BUG(0);
-		pdev->tso_seg_pool.pool_size = i;
-		pdev->tso_seg_pool.num_free = i;
-		qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
-		return;
-	}
-	c_element->on_freelist = 1;
-	c_element->cookie = TSO_SEG_MAGIC_COOKIE;
-#ifdef TSOSEG_DEBUG
-	qdf_tso_seg_dbg_init(c_element);
-	qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT2);
-#endif /* TSOSEG_DEBUG */
-	c_element->next = NULL;
-	pdev->tso_seg_pool.pool_size = num_seg;
-	pdev->tso_seg_pool.num_free = num_seg;
-	qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
-}
-
-/**
- * ol_tso_seg_list_deinit() - function to de-initialise the tso seg freelist
- * @pdev: the data physical device sending the data
- *
- * Return: none
- */
-void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
-{
-	int i;
-	struct qdf_tso_seg_elem_t *c_element;
-	struct qdf_tso_seg_elem_t *temp;
-
-	/* pool size 0 implies that tso seg list is not initialised*/
-	if (pdev->tso_seg_pool.freelist == NULL &&
-	    pdev->tso_seg_pool.pool_size == 0)
-		return;
-
-	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
-	c_element = pdev->tso_seg_pool.freelist;
-	i = pdev->tso_seg_pool.pool_size;
-
-	pdev->tso_seg_pool.freelist = NULL;
-	pdev->tso_seg_pool.num_free = 0;
-	pdev->tso_seg_pool.pool_size = 0;
-
-	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
-	qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
-
-	while (i-- > 0 && c_element) {
-		temp = c_element->next;
-		if (c_element->on_freelist != 1) {
-			qdf_tso_seg_dbg_bug("seg already freed (double?)");
-			return;
-		} else if (c_element->cookie != TSO_SEG_MAGIC_COOKIE) {
-			qdf_tso_seg_dbg_bug("seg cookie is bad (corruption?)");
-			return;
-		}
-		/* free this seg, so reset the cookie value*/
-		c_element->cookie = 0;
-		qdf_mem_free(c_element);
-		c_element = temp;
-	}
-}
-
-/**
- * ol_tso_num_seg_list_init() - function to initialise the freelist of elements
- *				use to count the num of tso segments in jumbo
- *				skb packet freelist
- * @pdev: the data physical device sending the data
- * @num_seg: number of elements needs to be intialised
- *
- * Return: none
- */
-void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
-{
-	int i = 0;
-	struct qdf_tso_num_seg_elem_t *c_element;
-
-	/* Host should not allocate any c_element. */
-	if (num_seg <= 0) {
-		ol_txrx_err("%s: ERROR: Pool size passed is 0",
-			   __func__);
-		QDF_BUG(0);
-		pdev->tso_num_seg_pool.num_seg_pool_size = i;
-		qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
-		return;
-	}
-
-	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
-	pdev->tso_num_seg_pool.freelist = c_element;
-	for (i = 0; i < (num_seg - 1); i++) {
-		if (qdf_unlikely(!c_element)) {
-			ol_txrx_err("%s: ERROR: c_element NULL for num of seg %d",
-				__func__, i);
-			QDF_BUG(0);
-			pdev->tso_num_seg_pool.num_seg_pool_size = i;
-			pdev->tso_num_seg_pool.num_free = i;
-			qdf_spinlock_create(&pdev->tso_num_seg_pool.
-							tso_num_seg_mutex);
-			return;
-		}
-		c_element->next =
-			qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
-		c_element = c_element->next;
-	}
-	/*
-	 * NULL check for the last c_element of the list or
-	 * first c_element if num_seg is equal to 1.
-	 */
-	if (qdf_unlikely(!c_element)) {
-		ol_txrx_err("%s: ERROR: c_element NULL for num of seg %d",
-			   __func__, i);
-		QDF_BUG(0);
-		pdev->tso_num_seg_pool.num_seg_pool_size = i;
-		pdev->tso_num_seg_pool.num_free = i;
-		qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
-		return;
-	}
-	c_element->next = NULL;
-	pdev->tso_num_seg_pool.num_seg_pool_size = num_seg;
-	pdev->tso_num_seg_pool.num_free = num_seg;
-	qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
-}
-
-/**
- * ol_tso_num_seg_list_deinit() - function to de-initialise the freelist of
- *				  elements use to count the num of tso segment
- *				  in a jumbo skb packet freelist
- * @pdev: the data physical device sending the data
- *
- * Return: none
- */
-void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
-{
-	int i;
-	struct qdf_tso_num_seg_elem_t *c_element;
-	struct qdf_tso_num_seg_elem_t *temp;
-
-	/* pool size 0 implies that tso num seg list is not initialised*/
-	if (pdev->tso_num_seg_pool.freelist == NULL &&
-	    pdev->tso_num_seg_pool.num_seg_pool_size == 0)
-		return;
-
-	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
-	c_element = pdev->tso_num_seg_pool.freelist;
-	i = pdev->tso_num_seg_pool.num_seg_pool_size;
-
-	pdev->tso_num_seg_pool.freelist = NULL;
-	pdev->tso_num_seg_pool.num_free = 0;
-	pdev->tso_num_seg_pool.num_seg_pool_size = 0;
-
-	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
-	qdf_spinlock_destroy(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
-
-	while (i-- > 0 && c_element) {
-		temp = c_element->next;
-		qdf_mem_free(c_element);
-		c_element = temp;
-	}
-}
-#endif /* FEATURE_TSO */

+ 48 - 2
core/dp/txrx/ol_tx.h

@@ -42,10 +42,12 @@
 qdf_nbuf_t ol_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb);
 #endif
 
+#ifdef CONFIG_LL_DP_SUPPORT
 struct ol_tx_desc_t *
 ol_tx_prepare_ll(ol_txrx_vdev_handle vdev,
 		 qdf_nbuf_t msdu,
 		 struct ol_txrx_msdu_info_t *msdu_info);
+#endif
 
 qdf_nbuf_t ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
 #ifdef WLAN_FEATURE_FASTPATH
@@ -89,6 +91,27 @@ void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
 }
 #endif
 
+static inline
+int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
+{
+	return	tx_spec &
+		(OL_TX_SPEC_RAW | OL_TX_SPEC_NO_AGGR | OL_TX_SPEC_NO_ENCRYPT);
+}
+
+static inline
+uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
+{
+	uint8_t sub_type = 0x1; /* 802.11 MAC header present */
+
+	if (tx_spec & OL_TX_SPEC_NO_AGGR)
+		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
+	if (tx_spec & OL_TX_SPEC_NO_ENCRYPT)
+		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
+	if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT)
+		sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
+	return sub_type;
+}
+
 /**
  * ol_tx_hl() - transmit tx frames for a HL system.
  * @vdev: the virtual device transmit the data
@@ -120,9 +143,32 @@ ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
  *
  *  Return: null - success, skb - failure
  */
-qdf_nbuf_t
+#ifdef CONFIG_HL_SUPPORT
+qdf_nbuf_t ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
+			    enum ol_tx_spec tx_spec,
+			    qdf_nbuf_t msdu_list);
+
+static inline qdf_nbuf_t
 ol_tx_non_std(struct cdp_vdev *pvdev,
-	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
+	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+
+	return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
+}
+#else
+qdf_nbuf_t ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
+			    enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
+
+static inline qdf_nbuf_t
+ol_tx_non_std(struct cdp_vdev *pvdev,
+	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+
+	return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
+}
+#endif
 
 void ol_txrx_mgmt_tx_complete(void *ctxt, qdf_nbuf_t netbuf, int err);
 

+ 1 - 6
core/dp/txrx/ol_tx_classify.c

@@ -43,8 +43,6 @@
  * target-specific aliases.
  */
 
-#if defined(CONFIG_HL_SUPPORT)
-
 #define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
 #define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
 
@@ -332,7 +330,7 @@ ol_tx_tid(
 	return tid;
 }
 
-#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+#if defined(FEATURE_WLAN_TDLS)
 static inline
 struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
 						struct ol_txrx_vdev_t *vdev,
@@ -371,8 +369,6 @@ static struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
 
 	return peer;
 }
-
-
 #endif
 
 struct ol_tx_frms_queue_t *
@@ -888,4 +884,3 @@ ol_tx_classify_mgmt_extension(
 	return QDF_STATUS_SUCCESS;
 }
 #endif
-#endif /* defined(CONFIG_HL_SUPPORT) */

+ 1243 - 0
core/dp/txrx/ol_tx_hl.c

@@ -0,0 +1,1243 @@
+/*
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <qdf_atomic.h>         /* qdf_atomic_inc, etc. */
+#include <qdf_lock.h>           /* qdf_os_spinlock */
+#include <qdf_time.h>           /* qdf_system_ticks, etc. */
+#include <qdf_nbuf.h>           /* qdf_nbuf_t */
+#include <qdf_net_types.h>      /* QDF_NBUF_TX_EXT_TID_INVALID */
+
+#include <cds_queue.h>          /* TAILQ */
+#ifdef QCA_COMPUTE_TX_DELAY
+#include <enet.h>               /* ethernet_hdr_t, etc. */
+#include <ipv6_defs.h>          /* ipv6_traffic_class */
+#endif
+
+#include <ol_txrx_api.h>        /* ol_txrx_vdev_handle, etc. */
+#include <ol_htt_tx_api.h>      /* htt_tx_compl_desc_id */
+#include <ol_txrx_htt_api.h>    /* htt_tx_status */
+
+#include <ol_ctrl_txrx_api.h>
+#include <cdp_txrx_tx_delay.h>
+#include <ol_txrx_types.h>      /* ol_txrx_vdev_t, etc */
+#include <ol_tx_desc.h>         /* ol_tx_desc_find, ol_tx_desc_frame_free */
+#ifdef QCA_COMPUTE_TX_DELAY
+#include <ol_tx_classify.h>     /* ol_tx_dest_addr_find */
+#endif
+#include <ol_txrx_internal.h>   /* OL_TX_DESC_NO_REFS, etc. */
+#include <ol_osif_txrx_api.h>
+#include <ol_tx.h>              /* ol_tx_reinject */
+#include <ol_tx_send.h>
+
+#include <ol_cfg.h>             /* ol_cfg_is_high_latency */
+#include <ol_tx_sched.h>
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+#include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
+#endif
+#include <ol_tx_queue.h>
+#include <ol_txrx.h>
+#include <pktlog_ac_fmt.h>
+#include <cdp_txrx_handle.h>
+
+#ifdef QCA_HL_NETDEV_FLOW_CONTROL
+static u16 ol_txrx_tx_desc_alloc_table[TXRX_FC_MAX] = {
+	[TXRX_FC_5GH_80M_2x2] = 2000,
+	[TXRX_FC_2GH_40M_2x2] = 800,
+};
+#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
+
+/* tx filtering is handled within the target FW */
+#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
+
+u_int16_t
+ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
+{
+	uint16_t desc_pool_size;
+	uint16_t steady_state_tx_lifetime_ms;
+	uint16_t safety_factor;
+
+	/*
+	 * Steady-state tx latency:
+	 *     roughly 1-2 ms flight time
+	 *   + roughly 1-2 ms prep time,
+	 *   + roughly 1-2 ms target->host notification time.
+	 * = roughly 6 ms total
+	 * Thus, steady state number of frames =
+	 * steady state max throughput / frame size * tx latency, e.g.
+	 * 1 Gbps / 1500 bytes * 6 ms = 500
+	 *
+	 */
+	steady_state_tx_lifetime_ms = 6;
+
+	safety_factor = 8;
+
+	desc_pool_size =
+		ol_cfg_max_thruput_mbps(ctrl_pdev) *
+		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
+		(8 * OL_TX_AVG_FRM_BYTES) *
+		steady_state_tx_lifetime_ms *
+		safety_factor;
+
+	/* minimum */
+	if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
+		desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
+
+	/* maximum */
+	if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
+		desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
+
+	return desc_pool_size;
+}
+
+#ifdef CONFIG_TX_DESC_HI_PRIO_RESERVE
+
+/**
+ * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
+ *                        for a HL system.
+ * @pdev: the data physical device sending the data
+ * @vdev: the virtual device sending the data
+ * @msdu: the tx frame
+ * @msdu_info: the tx meta data
+ *
+ * Return: the tx decriptor
+ */
+static inline
+struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
+					 struct ol_txrx_vdev_t *vdev,
+					 qdf_nbuf_t msdu,
+					 struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc = NULL;
+
+	if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
+	    TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
+		tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+	} else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
+		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+		    QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
+		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+		    QDF_NBUF_CB_PACKET_TYPE_EAPOL)) {
+			tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+			ol_txrx_info("Got tx desc from resv pool\n");
+		}
+	}
+	return tx_desc;
+}
+
+#elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
+bool ol_tx_desc_is_high_prio(qdf_nbuf_t msdu)
+{
+	enum qdf_proto_subtype proto_subtype;
+	bool high_prio = false;
+
+	if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
+		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+		    QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
+		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+		    QDF_NBUF_CB_PACKET_TYPE_EAPOL))
+			high_prio = true;
+	} else if (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+		   QDF_NBUF_CB_PACKET_TYPE_ARP) {
+		high_prio = true;
+	} else if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+		   QDF_NBUF_CB_PACKET_TYPE_ICMPv6)) {
+		proto_subtype = qdf_nbuf_get_icmpv6_subtype(msdu);
+		switch (proto_subtype) {
+		case QDF_PROTO_ICMPV6_NA:
+		case QDF_PROTO_ICMPV6_NS:
+			high_prio = true;
+		default:
+			high_prio = false;
+		}
+	}
+	return high_prio;
+}
+
+static inline
+struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
+					 struct ol_txrx_vdev_t *vdev,
+					 qdf_nbuf_t msdu,
+					 struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc =
+			ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+
+	if (!tx_desc)
+		return NULL;
+
+	qdf_spin_lock_bh(&pdev->tx_mutex);
+	/* return if TX flow control disabled */
+	if (vdev->tx_desc_limit == 0) {
+		qdf_spin_unlock_bh(&pdev->tx_mutex);
+		return tx_desc;
+	}
+
+	if (!qdf_atomic_read(&vdev->os_q_paused) &&
+	    (qdf_atomic_read(&vdev->tx_desc_count) >= vdev->queue_stop_th)) {
+		/*
+		 * Pause normal priority
+		 * netdev queues if tx desc limit crosses
+		 */
+		pdev->pause_cb(vdev->vdev_id,
+			       WLAN_STOP_NON_PRIORITY_QUEUE,
+			       WLAN_DATA_FLOW_CONTROL);
+		qdf_atomic_set(&vdev->os_q_paused, 1);
+	} else if (ol_tx_desc_is_high_prio(msdu) && !vdev->prio_q_paused &&
+		   (qdf_atomic_read(&vdev->tx_desc_count)
+		   == vdev->tx_desc_limit)) {
+		/* Pause high priority queue */
+		pdev->pause_cb(vdev->vdev_id,
+			       WLAN_NETIF_PRIORITY_QUEUE_OFF,
+			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
+		vdev->prio_q_paused = 1;
+	}
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
+
+	return tx_desc;
+}
+
+#else
+
+static inline
+struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
+					 struct ol_txrx_vdev_t *vdev,
+					 qdf_nbuf_t msdu,
+					 struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc = NULL;
+
+	tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+	return tx_desc;
+}
+#endif
+
+#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
+/**
+ * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
+ *				 margin replenishment
+ * @desc_pool_size: tx desc pool size
+ *
+ * Return: threshold low
+ */
+static inline uint16_t
+ol_txrx_rsrc_threshold_lo(int desc_pool_size)
+{
+	int threshold_low;
+
+	/*
+	 * 5% margin of unallocated desc is too much for per
+	 * vdev mechanism.
+	 * Define the value separately.
+	 */
+	threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
+
+	return threshold_low;
+}
+
+/**
+ * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
+ *				 during tx desc margin replenishment
+ * @desc_pool_size: tx desc pool size
+ *
+ * Return: threshold high
+ */
+static inline uint16_t
+ol_txrx_rsrc_threshold_hi(int desc_pool_size)
+{
+	int threshold_high;
+	/* when freeing up descriptors,
+	 * keep going until there's a 7.5% margin
+	 */
+	threshold_high = ((15 * desc_pool_size) / 100) / 2;
+
+	return threshold_high;
+}
+
+#else
+
+static inline uint16_t
+ol_txrx_rsrc_threshold_lo(int desc_pool_size)
+{
+	int threshold_low;
+	/* always maintain a 5% margin of unallocated descriptors */
+	threshold_low = (5 * desc_pool_size) / 100;
+
+	return threshold_low;
+}
+
+static inline uint16_t
+ol_txrx_rsrc_threshold_hi(int desc_pool_size)
+{
+	int threshold_high;
+	/* when freeing up descriptors, keep going until
+	 * there's a 15% margin
+	 */
+	threshold_high = (15 * desc_pool_size) / 100;
+
+	return threshold_high;
+}
+#endif
+
+void ol_tx_init_pdev(ol_txrx_pdev_handle pdev)
+{
+	uint16_t desc_pool_size, i;
+
+	desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
+
+	qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
+	qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
+
+	pdev->tx_queue.rsrc_threshold_lo =
+		ol_txrx_rsrc_threshold_lo(desc_pool_size);
+	pdev->tx_queue.rsrc_threshold_hi =
+		ol_txrx_rsrc_threshold_hi(desc_pool_size);
+
+	for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
+		qdf_atomic_init(&pdev->txq_grps[i].credit);
+
+	ol_tx_target_credit_init(pdev, desc_pool_size);
+}
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
+				      ol_txrx_vdev_handle vdev,
+				      struct ol_tx_desc_t *tx_desc,
+				      qdf_nbuf_t msdu,
+				      struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	if (OL_TX_ENCAP(vdev, tx_desc, msdu, tx_msdu_info) != A_OK) {
+		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
+		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
+		if (tx_msdu_info->peer) {
+			/* remove the peer reference added above */
+			ol_txrx_peer_release_ref(tx_msdu_info->peer,
+						 PEER_DEBUG_ID_OL_INTERNAL);
+		}
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#else
+static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
+				      ol_txrx_vdev_handle vdev,
+				      struct ol_tx_desc_t *tx_desc,
+				      qdf_nbuf_t msdu,
+				      struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	/* no-op */
+	return 0;
+}
+#endif
+
+/**
+ * parse_ocb_tx_header() - Function to check for OCB
+ * @msdu:   Pointer to OS packet (qdf_nbuf_t)
+ * @tx_ctrl: TX control header on a packet and extract it if present
+ *
+ * Return: true if ocb parsing is successful
+ */
+#define OCB_HEADER_VERSION     1
+static bool parse_ocb_tx_header(qdf_nbuf_t msdu,
+				struct ocb_tx_ctrl_hdr_t *tx_ctrl)
+{
+	struct ether_header *eth_hdr_p;
+	struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
+
+	/* Check if TX control header is present */
+	eth_hdr_p = (struct ether_header *)qdf_nbuf_data(msdu);
+	if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
+		/* TX control header is not present. Nothing to do.. */
+		return true;
+
+	/* Remove the ethernet header */
+	qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
+
+	/* Parse the TX control header */
+	tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
+
+	if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
+		if (tx_ctrl)
+			qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
+				     sizeof(*tx_ctrl_hdr));
+	} else {
+		/* The TX control header is invalid. */
+		return false;
+	}
+
+	/* Remove the TX control header */
+	qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
+	return true;
+}
+
+/**
+ * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
+ *				 for management frame
+ * @pdev: the data physical device sending the data
+ * @vdev: the virtual device sending the data
+ * @tx_mgmt_frm: the tx management frame
+ * @tx_msdu_info: the tx meta data
+ *
+ * Return: the tx decriptor
+ */
+struct ol_tx_desc_t *
+ol_txrx_mgmt_tx_desc_alloc(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_vdev_t *vdev,
+	qdf_nbuf_t tx_mgmt_frm,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc;
+
+	tx_msdu_info->htt.action.tx_comp_req = 1;
+	tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
+	return tx_desc;
+}
+
+/**
+ * ol_txrx_mgmt_send_frame() - send a management frame
+ * @vdev: virtual device sending the frame
+ * @tx_desc: tx desc
+ * @tx_mgmt_frm: management frame to send
+ * @tx_msdu_info: the tx meta data
+ * @chanfreq: download change frequency
+ *
+ * Return:
+ *      0 -> the frame is accepted for transmission, -OR-
+ *      1 -> the frame was not accepted
+ */
+int ol_txrx_mgmt_send_frame(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t tx_mgmt_frm,
+	struct ol_txrx_msdu_info_t *tx_msdu_info,
+	uint16_t chanfreq)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	struct ol_tx_frms_queue_t *txq;
+	int status = 1;
+
+	/*
+	 * 1.  Look up the peer and queue the frame in the peer's mgmt queue.
+	 * 2.  Invoke the download scheduler.
+	 */
+	txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
+	if (!txq) {
+		/* TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
+		 *			     msdu);
+		 */
+		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
+		ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
+					     1 /* error */);
+		goto out; /* can't accept the tx mgmt frame */
+	}
+	/* Initialize the HTT tx desc l2 header offset field.
+	 * Even though tx encap does not apply to mgmt frames,
+	 * htt_tx_desc_mpdu_header still needs to be called,
+	 * to specifiy that there was no L2 header added by tx encap,
+	 * so the frame's length does not need to be adjusted to account for
+	 * an added L2 header.
+	 */
+	htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
+	if (qdf_unlikely(htt_tx_desc_init(
+			pdev->htt_pdev, tx_desc->htt_tx_desc,
+			tx_desc->htt_tx_desc_paddr,
+			ol_tx_desc_id(pdev, tx_desc),
+			tx_mgmt_frm,
+			&tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0)))
+		goto out;
+	htt_tx_desc_display(tx_desc->htt_tx_desc);
+	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
+
+	ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
+	ol_tx_sched(vdev->pdev);
+	status = 0;
+out:
+	if (tx_msdu_info->peer) {
+		/* remove the peer reference added above */
+		ol_txrx_peer_release_ref(tx_msdu_info->peer,
+					 PEER_DEBUG_ID_OL_INTERNAL);
+	}
+
+	return status;
+}
+
+/**
+ * ol_tx_hl_base() - send tx frames for a HL system.
+ * @vdev: the virtual device sending the data
+ * @tx_spec: indicate what non-standard transmission actions to apply
+ * @msdu_list: the tx frames to send
+ * @tx_comp_req: tx completion req
+ *
+ * Return: NULL if all MSDUs are accepted
+ */
+static inline qdf_nbuf_t
+ol_tx_hl_base(
+	ol_txrx_vdev_handle vdev,
+	enum ol_tx_spec tx_spec,
+	qdf_nbuf_t msdu_list,
+	int tx_comp_req)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	qdf_nbuf_t msdu = msdu_list;
+	struct ol_txrx_msdu_info_t tx_msdu_info;
+	struct ocb_tx_ctrl_hdr_t tx_ctrl;
+	htt_pdev_handle htt_pdev = pdev->htt_pdev;
+
+	tx_msdu_info.tso_info.is_tso = 0;
+
+	/*
+	 * The msdu_list variable could be used instead of the msdu var,
+	 * but just to clarify which operations are done on a single MSDU
+	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
+	 * within the list.
+	 */
+	while (msdu) {
+		qdf_nbuf_t next;
+		struct ol_tx_frms_queue_t *txq;
+		struct ol_tx_desc_t *tx_desc = NULL;
+
+		qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
+		tx_msdu_info.peer = NULL;
+		/*
+		 * The netbuf will get stored into a (peer-TID) tx queue list
+		 * inside the ol_tx_classify_store function or else dropped,
+		 * so store the next pointer immediately.
+		 */
+		next = qdf_nbuf_next(msdu);
+
+		tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
+
+		if (!tx_desc) {
+			/*
+			 * If we're out of tx descs, there's no need to try
+			 * to allocate tx descs for the remaining MSDUs.
+			 */
+			TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
+						  msdu);
+			return msdu; /* the list of unaccepted MSDUs */
+		}
+
+		/* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
+
+		if (tx_spec != OL_TX_SPEC_STD) {
+#if defined(FEATURE_WLAN_TDLS)
+			if (tx_spec & OL_TX_SPEC_NO_FREE) {
+				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
+			} else if (tx_spec & OL_TX_SPEC_TSO) {
+#else
+				if (tx_spec & OL_TX_SPEC_TSO) {
+#endif
+					tx_desc->pkt_type = OL_TX_FRM_TSO;
+				}
+				if (ol_txrx_tx_is_raw(tx_spec)) {
+					/* CHECK THIS: does this need
+					 * to happen after htt_tx_desc_init?
+					 */
+					/* different types of raw frames */
+					u_int8_t sub_type =
+						ol_txrx_tx_raw_subtype(
+								tx_spec);
+					htt_tx_desc_type(htt_pdev,
+							 tx_desc->htt_tx_desc,
+							 htt_pkt_type_raw,
+							 sub_type);
+				}
+			}
+
+			tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
+			tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
+			tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
+			tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
+			tx_msdu_info.htt.action.tx_comp_req = tx_comp_req;
+
+			/* If the vdev is in OCB mode,
+			 * parse the tx control header.
+			 */
+			if (vdev->opmode == wlan_op_mode_ocb) {
+				if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
+					/* There was an error parsing
+					 * the header.Skip this packet.
+					 */
+					goto MSDU_LOOP_BOTTOM;
+				}
+			}
+
+			txq = ol_tx_classify(vdev, tx_desc, msdu,
+					     &tx_msdu_info);
+
+			if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
+				/* drop this frame,
+				 * but try sending subsequent frames
+				 */
+				/* TXRX_STATS_MSDU_LIST_INCR(pdev,
+				 * tx.dropped.no_txq, msdu);
+				 */
+				qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
+				ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
+				if (tx_msdu_info.peer) {
+					/* remove the peer reference
+					 * added above
+					 */
+					ol_txrx_peer_release_ref(
+						tx_msdu_info.peer,
+						PEER_DEBUG_ID_OL_INTERNAL);
+				}
+				goto MSDU_LOOP_BOTTOM;
+			}
+
+			if (tx_msdu_info.peer) {
+				/*
+				 * If the state is not associated then drop all
+				 * the data packets received for that peer
+				 */
+				if (tx_msdu_info.peer->state ==
+						OL_TXRX_PEER_STATE_DISC) {
+					qdf_atomic_inc(
+						&pdev->tx_queue.rsrc_cnt);
+					ol_tx_desc_frame_free_nonstd(pdev,
+								     tx_desc,
+								     1);
+					ol_txrx_peer_release_ref(
+						tx_msdu_info.peer,
+						PEER_DEBUG_ID_OL_INTERNAL);
+					msdu = next;
+					continue;
+				} else if (tx_msdu_info.peer->state !=
+						OL_TXRX_PEER_STATE_AUTH) {
+					if (tx_msdu_info.htt.info.ethertype !=
+						ETHERTYPE_PAE &&
+						tx_msdu_info.htt.info.ethertype
+							!= ETHERTYPE_WAI) {
+						qdf_atomic_inc(
+							&pdev->tx_queue.
+								rsrc_cnt);
+						ol_tx_desc_frame_free_nonstd(
+								pdev,
+								tx_desc, 1);
+						ol_txrx_peer_release_ref(
+						 tx_msdu_info.peer,
+						 PEER_DEBUG_ID_OL_INTERNAL);
+						msdu = next;
+						continue;
+					}
+				}
+			}
+			/*
+			 * Initialize the HTT tx desc l2 header offset field.
+			 * htt_tx_desc_mpdu_header  needs to be called to
+			 * make sure, the l2 header size is initialized
+			 * correctly to handle cases where TX ENCAP is disabled
+			 * or Tx Encap fails to perform Encap
+			 */
+			htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
+
+			/*
+			 * Note: when the driver is built without support for
+			 * SW tx encap,the following macro is a no-op.
+			 * When the driver is built with support for SW tx
+			 * encap, it performs encap, and if an error is
+			 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
+			 */
+			if (ol_tx_encap_wrapper(pdev, vdev, tx_desc, msdu,
+						&tx_msdu_info))
+				goto MSDU_LOOP_BOTTOM;
+
+			/* initialize the HW tx descriptor */
+			htt_tx_desc_init(
+					pdev->htt_pdev, tx_desc->htt_tx_desc,
+					tx_desc->htt_tx_desc_paddr,
+					ol_tx_desc_id(pdev, tx_desc),
+					msdu,
+					&tx_msdu_info.htt,
+					&tx_msdu_info.tso_info,
+					&tx_ctrl,
+					vdev->opmode == wlan_op_mode_ocb);
+			/*
+			 * If debug display is enabled, show the meta-data
+			 * being downloaded to the target via the
+			 * HTT tx descriptor.
+			 */
+			htt_tx_desc_display(tx_desc->htt_tx_desc);
+
+			ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
+			if (tx_msdu_info.peer) {
+				OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
+							msdu);
+				/* remove the peer reference added above */
+				ol_txrx_peer_release_ref
+						(tx_msdu_info.peer,
+						 PEER_DEBUG_ID_OL_INTERNAL);
+			}
+MSDU_LOOP_BOTTOM:
+			msdu = next;
+		}
+		ol_tx_sched(pdev);
+		return NULL; /* all MSDUs were accepted */
+}
+
+qdf_nbuf_t
+ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
+				pdev->cfg.request_tx_comp;
+
+	return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
+}
+
+qdf_nbuf_t ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
+			    enum ol_tx_spec tx_spec,
+			    qdf_nbuf_t msdu_list)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
+				pdev->cfg.request_tx_comp;
+
+	if (!tx_comp_req) {
+		if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
+		    (pdev->tx_data_callback.func))
+			tx_comp_req = 1;
+	}
+	return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req);
+}
+
+#ifdef FEATURE_WLAN_TDLS
+/**
+ * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
+ * @vdev: the data virtual device
+ * @bss_addr: bss address
+ *
+ * Return: None
+ */
+void ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t  *)pvdev;
+
+	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
+	if (bss_addr && vdev->last_real_peer &&
+	    !qdf_mem_cmp((u8 *)bss_addr,
+			     vdev->last_real_peer->mac_addr.raw,
+			     IEEE80211_ADDR_LEN))
+		qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
+			     vdev->last_real_peer->mac_addr.raw,
+			     OL_TXRX_MAC_ADDR_LEN);
+	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
+}
+
+/**
+ * ol_txrx_add_last_real_peer() - add last peer
+ * @pdev: the data physical device
+ * @vdev: virtual device
+ * @peer_id: peer id
+ *
+ * Return: None
+ */
+void
+ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
+			   struct cdp_vdev *pvdev, uint8_t *peer_id)
+{
+	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+	ol_txrx_peer_handle peer;
+
+	peer = ol_txrx_find_peer_by_addr(
+		(struct cdp_pdev *)pdev,
+		vdev->hl_tdls_ap_mac_addr.raw,
+		peer_id);
+
+	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
+	if (!vdev->last_real_peer && peer &&
+	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
+		vdev->last_real_peer = peer;
+	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
+}
+
+/**
+ * is_vdev_restore_last_peer() - check for vdev last peer
+ * @peer: peer object
+ *
+ * Return: true if last peer is not null
+ */
+bool is_vdev_restore_last_peer(void *ppeer)
+{
+	struct ol_txrx_peer_t *peer = ppeer;
+	struct ol_txrx_vdev_t *vdev;
+
+	vdev = peer->vdev;
+	return vdev->last_real_peer && (vdev->last_real_peer == peer);
+}
+
+/**
+ * ol_txrx_update_last_real_peer() - check for vdev last peer
+ * @pdev: the data physical device
+ * @peer: peer device
+ * @peer_id: peer id
+ * @restore_last_peer: restore last peer flag
+ *
+ * Return: None
+ */
+void ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
+				   uint8_t *peer_id, bool restore_last_peer)
+{
+	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
+	struct ol_txrx_peer_t *peer = ppeer;
+	struct ol_txrx_vdev_t *vdev;
+
+	if (!restore_last_peer)
+		return;
+
+	vdev = peer->vdev;
+	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
+					 vdev->hl_tdls_ap_mac_addr.raw,
+					 peer_id);
+
+	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
+	if (!vdev->last_real_peer && peer &&
+	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
+		vdev->last_real_peer = peer;
+	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
+}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
+/**
+ * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+void ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
+{
+	qdf_spinlock_create(&pdev->txq_log_spinlock);
+	pdev->txq_log.size = OL_TXQ_LOG_SIZE;
+	pdev->txq_log.oldest_record_offset = 0;
+	pdev->txq_log.offset = 0;
+	pdev->txq_log.allow_wrap = 1;
+	pdev->txq_log.wrapped = 0;
+}
+
+/**
+ * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+void ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
+{
+	qdf_spinlock_destroy(&pdev->txq_log_spinlock);
+}
+#endif
+
+#if defined(DEBUG_HL_LOGGING)
+
+/**
+ * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+void ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
+{
+	qdf_spinlock_create(&pdev->grp_stat_spinlock);
+	pdev->grp_stats.last_valid_index = -1;
+	pdev->grp_stats.wrap_around = 0;
+}
+
+/**
+ * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
+ * @pdev: the physical device object
+ *
+ * Return: None
+ */
+void ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
+{
+	qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
+}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+
+/**
+ * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
+ * @vdev: the virtual device object
+ * @flag: flag
+ *
+ * Return: None
+ */
+void
+ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
+
+	vdev->hlTdlsFlag = flag;
+}
+#endif
+
+/**
+ * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
+ * @vdev: the virtual device object
+ *
+ * Return: None
+ */
+void ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
+{
+	uint8_t i;
+
+	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+		TAILQ_INIT(&vdev->txqs[i].head);
+		vdev->txqs[i].paused_count.total = 0;
+		vdev->txqs[i].frms = 0;
+		vdev->txqs[i].bytes = 0;
+		vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
+		vdev->txqs[i].flag = ol_tx_queue_empty;
+		/* aggregation is not applicable for vdev tx queues */
+		vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
+		ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
+		ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
+	}
+}
+
+/**
+ * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
+ * @vdev: the virtual device object
+ *
+ * Return: None
+ */
+void ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	struct ol_tx_frms_queue_t *txq;
+	int i;
+
+	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+		txq = &vdev->txqs[i];
+		ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
+	}
+}
+
+/**
+ * ol_txrx_peer_txqs_init() - initialise peer tx queues
+ * @pdev: the physical device object
+ * @peer: peer object
+ *
+ * Return: None
+ */
+void ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
+			    struct ol_txrx_peer_t *peer)
+{
+	uint8_t i;
+	struct ol_txrx_vdev_t *vdev = peer->vdev;
+
+	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+		TAILQ_INIT(&peer->txqs[i].head);
+		peer->txqs[i].paused_count.total = 0;
+		peer->txqs[i].frms = 0;
+		peer->txqs[i].bytes = 0;
+		peer->txqs[i].ext_tid = i;
+		peer->txqs[i].flag = ol_tx_queue_empty;
+		peer->txqs[i].aggr_state = ol_tx_aggr_untried;
+		ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
+		ol_txrx_set_txq_peer(&peer->txqs[i], peer);
+	}
+	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+
+	/* aggregation is not applicable for mgmt and non-QoS tx queues */
+	for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
+		peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
+
+	ol_txrx_peer_pause(peer);
+}
+
+/**
+ * ol_txrx_peer_tx_queue_free() - free peer tx queues
+ * @pdev: the physical device object
+ * @peer: peer object
+ *
+ * Return: None
+ */
+void ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
+				struct ol_txrx_peer_t *peer)
+{
+	struct ol_tx_frms_queue_t *txq;
+	uint8_t i;
+
+	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+		txq = &peer->txqs[i];
+		ol_tx_queue_free(pdev, txq, i, true);
+	}
+}
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+/**
+ * ol_txrx_update_group_credit() - update group credit for tx queue
+ * @group: for which credit needs to be updated
+ * @credit: credits
+ * @absolute: TXQ group absolute
+ *
+ * Return: allocated pool size
+ */
+void ol_txrx_update_group_credit(
+		struct ol_tx_queue_group_t *group,
+		int32_t credit,
+		u_int8_t absolute)
+{
+	if (absolute)
+		qdf_atomic_set(&group->credit, credit);
+	else
+		qdf_atomic_add(credit, &group->credit);
+}
+
+/**
+ * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
+ *				      vdev id mask and ac mask is not matching
+ * @pdev: the data physical device
+ * @group_id: TXQ group id
+ * @credit: TXQ group credit count
+ * @absolute: TXQ group absolute
+ * @vdev_id_mask: TXQ vdev group id mask
+ * @ac_mask: TQX access category mask
+ *
+ * Return: None
+ */
+void ol_txrx_update_tx_queue_groups(
+		ol_txrx_pdev_handle pdev,
+		u_int8_t group_id,
+		int32_t credit,
+		u_int8_t absolute,
+		u_int32_t vdev_id_mask,
+		u_int32_t ac_mask
+		)
+{
+	struct ol_tx_queue_group_t *group;
+	u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
+	u_int32_t membership;
+	struct ol_txrx_vdev_t *vdev;
+
+	if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
+		ol_txrx_warn("%s: invalid group_id=%u, ignore update.\n",
+			__func__,
+			group_id);
+		return;
+	}
+
+	group = &pdev->txq_grps[group_id];
+
+	membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
+
+	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+	/*
+	 * if the membership (vdev id mask and ac mask)
+	 * matches then no need to update tx qeue groups.
+	 */
+	if (group->membership == membership)
+		/* Update Credit Only */
+		goto credit_update;
+
+	credit += ol_txrx_distribute_group_credits(pdev, group_id,
+						   vdev_id_mask);
+	/*
+	 * membership (vdev id mask and ac mask) is not matching
+	 * TODO: ignoring ac mask for now
+	 */
+	qdf_assert(ac_mask == 0xffff);
+	group_vdev_id_mask =
+		OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
+
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		group_vdev_bit_mask =
+			OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
+					group_vdev_id_mask, vdev->vdev_id);
+		vdev_bit_mask =
+			OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
+					vdev_id_mask, vdev->vdev_id);
+
+		if (group_vdev_bit_mask != vdev_bit_mask) {
+			/*
+			 * Change in vdev tx queue group
+			 */
+			if (!vdev_bit_mask) {
+				/* Set Group Pointer (vdev and peer) to NULL */
+				ol_tx_set_vdev_group_ptr(
+						pdev, vdev->vdev_id, NULL);
+			} else {
+				/* Set Group Pointer (vdev and peer) */
+				ol_tx_set_vdev_group_ptr(
+						pdev, vdev->vdev_id, group);
+			}
+		}
+	}
+	/* Update membership */
+	group->membership = membership;
+credit_update:
+	/* Update Credit */
+	ol_txrx_update_group_credit(group, credit, absolute);
+	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+}
+#endif
+
+#if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
+	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
+#define MIN_INIT_GROUP_CREDITS	10
+int ol_txrx_distribute_group_credits(struct ol_txrx_pdev_t *pdev,
+				     u8 group_id,
+				     u32 vdevid_mask_new)
+{
+	struct ol_tx_queue_group_t *grp = &pdev->txq_grps[group_id];
+	struct ol_tx_queue_group_t *grp_nxt = &pdev->txq_grps[!group_id];
+	int creds_nxt = qdf_atomic_read(&grp_nxt->credit);
+	int vdevid_mask = OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp->membership);
+	int vdevid_mask_othgrp =
+		OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp_nxt->membership);
+	int creds_distribute = 0;
+
+	/* if vdev added to the group is the first vdev */
+	if ((vdevid_mask == 0) && (vdevid_mask_new != 0)) {
+		/* if other group has members */
+		if (vdevid_mask_othgrp) {
+			if (creds_nxt < MIN_INIT_GROUP_CREDITS)
+				creds_distribute = creds_nxt / 2;
+			else
+				creds_distribute = MIN_INIT_GROUP_CREDITS;
+
+			ol_txrx_update_group_credit(grp_nxt, -creds_distribute,
+						    0);
+		} else {
+			/*
+			 * Other grp has no members, give all credits to this
+			 * grp.
+			 */
+			creds_distribute =
+				qdf_atomic_read(&pdev->target_tx_credit);
+		}
+	/* if all vdevs are removed from this grp */
+	} else if ((vdevid_mask != 0) && (vdevid_mask_new == 0)) {
+		if (vdevid_mask_othgrp)
+			/* Transfer credits to other grp */
+			ol_txrx_update_group_credit(grp_nxt,
+						    qdf_atomic_read(&grp->
+						    credit),
+						    0);
+		/* Set current grp credits to zero */
+		ol_txrx_update_group_credit(grp, 0, 1);
+	}
+
+	return creds_distribute;
+}
+#endif /*
+	* FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL &&
+	* FEATURE_HL_DBS_GROUP_CREDIT_SHARING
+	*/
+
+#ifdef QCA_HL_NETDEV_FLOW_CONTROL
+/**
+ * ol_txrx_register_hl_flow_control() -register hl netdev flow control callback
+ * @vdev_id: vdev_id
+ * @flowControl: flow control callback
+ *
+ * Return: 0 for success or error code
+ */
+int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc,
+				     tx_pause_callback flowcontrol)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+	u32 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
+
+	/*
+	 * Assert if the tx descriptor pool size meets the requirements
+	 * Maximum 2 sessions are allowed on a band.
+	 */
+	QDF_ASSERT((2 * ol_txrx_tx_desc_alloc_table[TXRX_FC_5GH_80M_2x2] +
+		    ol_txrx_tx_desc_alloc_table[TXRX_FC_2GH_40M_2x2])
+		    <= desc_pool_size);
+
+	if (!pdev || !flowcontrol) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "pdev or pause_cb is NULL");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	pdev->pause_cb = flowcontrol;
+	return 0;
+}
+
+int ol_txrx_set_vdev_os_queue_status(u8 vdev_id,
+				     enum netif_action_type action)
+{
+	struct ol_txrx_vdev_t *vdev =
+	(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
+
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Invalid vdev_id %d", __func__, vdev_id);
+		return -EINVAL;
+	}
+
+	switch (action) {
+	case WLAN_NETIF_PRIORITY_QUEUE_ON:
+		qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
+		vdev->prio_q_paused = 0;
+		qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
+		break;
+	case WLAN_WAKE_NON_PRIORITY_QUEUE:
+		qdf_atomic_set(&vdev->os_q_paused, 0);
+		break;
+	default:
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Invalid action %d", __func__, action);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * ol_txrx_set_vdev_tx_desc_limit() - Set TX descriptor limits for a vdev
+ * @vdev_id: vdev id for the vdev under consideration.
+ * @chan: Channel on which the vdev has been started.
+ */
+int ol_txrx_set_vdev_tx_desc_limit(u8 vdev_id, u8 chan)
+{
+	struct ol_txrx_vdev_t *vdev =
+	(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
+	enum ol_txrx_fc_limit_id fc_limit_id;
+	u32 td_limit;
+
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Invalid vdev_id %d", __func__, vdev_id);
+		return -EINVAL;
+	}
+
+	/* TODO: Handle no of spatial streams and channel BW */
+	if (WLAN_REG_IS_5GHZ_CH(chan))
+		fc_limit_id = TXRX_FC_5GH_80M_2x2;
+	else
+		fc_limit_id = TXRX_FC_2GH_40M_2x2;
+
+	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
+	td_limit = ol_txrx_tx_desc_alloc_table[fc_limit_id];
+	vdev->tx_desc_limit = td_limit;
+	vdev->queue_stop_th = td_limit - TXRX_HL_TX_DESC_HI_PRIO_RESERVED;
+	vdev->queue_restart_th = td_limit - TXRX_HL_TX_DESC_QUEUE_RESTART_TH;
+	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
+
+	return 0;
+}
+#endif /* QCA_HL_NETDEV_FLOW_CONTROL */

+ 779 - 0
core/dp/txrx/ol_tx_ll.c

@@ -0,0 +1,779 @@
+/*
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <qdf_atomic.h>         /* qdf_atomic_inc, etc. */
+#include <qdf_lock.h>           /* qdf_os_spinlock */
+#include <qdf_time.h>           /* qdf_system_ticks, etc. */
+#include <qdf_nbuf.h>           /* qdf_nbuf_t */
+#include <qdf_net_types.h>      /* QDF_NBUF_TX_EXT_TID_INVALID */
+
+#include <cds_queue.h>          /* TAILQ */
+#ifdef QCA_COMPUTE_TX_DELAY
+#include <enet.h>               /* ethernet_hdr_t, etc. */
+#include <ipv6_defs.h>          /* ipv6_traffic_class */
+#endif
+
+#include <ol_txrx_api.h>        /* ol_txrx_vdev_handle, etc. */
+#include <ol_htt_tx_api.h>      /* htt_tx_compl_desc_id */
+#include <ol_txrx_htt_api.h>    /* htt_tx_status */
+
+#include <ol_ctrl_txrx_api.h>
+#include <cdp_txrx_tx_delay.h>
+#include <ol_txrx_types.h>      /* ol_txrx_vdev_t, etc */
+#include <ol_tx_desc.h>         /* ol_tx_desc_find, ol_tx_desc_frame_free */
+#ifdef QCA_COMPUTE_TX_DELAY
+#include <ol_tx_classify.h>     /* ol_tx_dest_addr_find */
+#endif
+#include <ol_txrx_internal.h>   /* OL_TX_DESC_NO_REFS, etc. */
+#include <ol_osif_txrx_api.h>
+#include <ol_tx.h>              /* ol_tx_reinject */
+#include <ol_tx_send.h>
+
+#include <ol_cfg.h>             /* ol_cfg_is_high_latency */
+#include <ol_tx_sched.h>
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+#include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
+#endif
+#include <ol_tx_queue.h>
+#include <ol_txrx.h>
+#include <pktlog_ac_fmt.h>
+#include <cdp_txrx_handle.h>
+
+void ol_tx_init_pdev(ol_txrx_pdev_handle pdev)
+{
+	qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
+		       &pdev->target_tx_credit);
+}
+
+qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
+			  qdf_nbuf_t msdu, uint16_t peer_id)
+{
+	struct ol_tx_desc_t *tx_desc = NULL;
+	struct ol_txrx_msdu_info_t msdu_info;
+
+	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
+	msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
+	msdu_info.peer = NULL;
+	msdu_info.htt.action.tx_comp_req = 0;
+	msdu_info.tso_info.is_tso = 0;
+
+	tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info);
+	if (!tx_desc)
+		return msdu;
+
+	HTT_TX_DESC_POSTPONED_SET(*((uint32_t *)(tx_desc->htt_tx_desc)), true);
+
+	htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
+
+	ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
+
+	return NULL;
+}
+
+/*
+ * The TXRX module doesn't accept tx frames unless the target has
+ * enough descriptors for them.
+ * For LL, the TXRX descriptor pool is sized to match the target's
+ * descriptor pool.  Hence, if the descriptor allocation in TXRX
+ * succeeds, that guarantees that the target has room to accept
+ * the new tx frame.
+ */
+struct ol_tx_desc_t *
+ol_tx_prepare_ll(ol_txrx_vdev_handle vdev,
+		 qdf_nbuf_t msdu,
+		 struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc;
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+
+	(msdu_info)->htt.info.frame_type = pdev->htt_pkt_type;
+	tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info);
+	if (qdf_unlikely(!tx_desc)) {
+		/*
+		 * If TSO packet, free associated
+		 * remaining TSO segment descriptors
+		 */
+		if (qdf_nbuf_is_tso(msdu))
+			ol_free_remaining_tso_segs(
+					vdev, msdu_info, true);
+		TXRX_STATS_MSDU_LIST_INCR(
+				pdev, tx.dropped.host_reject, msdu);
+		return NULL;
+	}
+
+	return tx_desc;
+}
+
+qdf_nbuf_t
+ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
+		 enum ol_tx_spec tx_spec,
+		 qdf_nbuf_t msdu_list)
+{
+	qdf_nbuf_t msdu = msdu_list;
+	htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
+	struct ol_txrx_msdu_info_t msdu_info;
+
+	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
+	msdu_info.htt.action.tx_comp_req = 0;
+
+	/*
+	 * The msdu_list variable could be used instead of the msdu var,
+	 * but just to clarify which operations are done on a single MSDU
+	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
+	 * within the list.
+	 */
+	while (msdu) {
+		qdf_nbuf_t next;
+		struct ol_tx_desc_t *tx_desc = NULL;
+
+		msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
+		msdu_info.peer = NULL;
+		msdu_info.tso_info.is_tso = 0;
+
+		tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info);
+		if (!tx_desc)
+			return msdu;
+
+		/*
+		 * The netbuf may get linked into a different list inside the
+		 * ol_tx_send function, so store the next pointer before the
+		 * tx_send call.
+		 */
+		next = qdf_nbuf_next(msdu);
+
+		if (tx_spec != OL_TX_SPEC_STD) {
+			if (tx_spec & OL_TX_SPEC_NO_FREE) {
+				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
+			} else if (tx_spec & OL_TX_SPEC_TSO) {
+				tx_desc->pkt_type = OL_TX_FRM_TSO;
+			} else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
+				uint8_t sub_type =
+					ol_txrx_tx_raw_subtype(tx_spec);
+				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
+						 htt_pkt_type_native_wifi,
+						 sub_type);
+			} else if (ol_txrx_tx_is_raw(tx_spec)) {
+				/* different types of raw frames */
+				uint8_t sub_type =
+					ol_txrx_tx_raw_subtype(tx_spec);
+				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
+						 htt_pkt_type_raw, sub_type);
+			}
+		}
+		/*
+		 * If debug display is enabled, show the meta-data being
+		 * downloaded to the target via the HTT tx descriptor.
+		 */
+		htt_tx_desc_display(tx_desc->htt_tx_desc);
+		ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
+		msdu = next;
+	}
+	return NULL;            /* all MSDUs were accepted */
+}
+
+#if defined(HELIUMPLUS)
+void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
+{
+	uint32_t                *frag_ptr_i_p;
+	int                     i;
+
+	ol_txrx_err("OL TX Descriptor 0x%pK msdu_id %d\n",
+		    tx_desc, tx_desc->id);
+	ol_txrx_err("HTT TX Descriptor vaddr: 0x%pK paddr: %pad",
+		    tx_desc->htt_tx_desc, &tx_desc->htt_tx_desc_paddr);
+	ol_txrx_err("Fragment Descriptor 0x%pK (paddr=%pad)",
+		    tx_desc->htt_frag_desc, &tx_desc->htt_frag_desc_paddr);
+
+	/*
+	 * it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
+	 * is already de-referrable (=> in virtual address space)
+	 */
+	frag_ptr_i_p = tx_desc->htt_frag_desc;
+
+	/* Dump 6 words of TSO flags */
+	print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags:  ",
+		       DUMP_PREFIX_NONE, 8, 4,
+		       frag_ptr_i_p, 24, true);
+
+	frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
+
+	i = 0;
+	while (*frag_ptr_i_p) {
+		print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr:  ",
+			       DUMP_PREFIX_NONE, 8, 4,
+			       frag_ptr_i_p, 8, true);
+		i++;
+		if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
+			break;
+		/* jump to next  pointer - skip length */
+		frag_ptr_i_p += 2;
+	}
+}
+#endif /* HELIUMPLUS */
+
+struct ol_tx_desc_t *
+ol_txrx_mgmt_tx_desc_alloc(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_vdev_t *vdev,
+	qdf_nbuf_t tx_mgmt_frm,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc;
+
+	/* For LL tx_comp_req is not used so initialized to 0 */
+	tx_msdu_info->htt.action.tx_comp_req = 0;
+	tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
+	/* FIX THIS -
+	 * The FW currently has trouble using the host's fragments table
+	 * for management frames.  Until this is fixed, rather than
+	 * specifying the fragment table to the FW, specify just the
+	 * address of the initial fragment.
+	 */
+#if defined(HELIUMPLUS)
+	/* ol_txrx_dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
+	 *			  tx_desc);
+	 */
+#endif /* defined(HELIUMPLUS) */
+	if (tx_desc) {
+		/*
+		 * Following the call to ol_tx_desc_ll, frag 0 is the
+		 * HTT tx HW descriptor, and the frame payload is in
+		 * frag 1.
+		 */
+		htt_tx_desc_frags_table_set(
+				pdev->htt_pdev,
+				tx_desc->htt_tx_desc,
+				qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
+				0, 0);
+#if defined(HELIUMPLUS) && defined(HELIUMPLUS_DEBUG)
+		ol_txrx_dump_frag_desc(
+				"after htt_tx_desc_frags_table_set",
+				tx_desc);
+#endif /* defined(HELIUMPLUS) */
+	}
+
+	return tx_desc;
+}
+
+int ol_txrx_mgmt_send_frame(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t tx_mgmt_frm,
+	struct ol_txrx_msdu_info_t *tx_msdu_info,
+	uint16_t chanfreq)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+
+	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
+	QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
+					QDF_NBUF_TX_PKT_MGMT_TRACK;
+	ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
+			  htt_pkt_type_mgmt);
+
+	return 0;
+}
+
+#if defined(FEATURE_TSO)
+void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
+				struct ol_txrx_msdu_info_t *msdu_info,
+				bool is_tso_seg_mapping_done)
+{
+	struct qdf_tso_seg_elem_t *next_seg;
+	struct qdf_tso_seg_elem_t *free_seg = msdu_info->tso_info.curr_seg;
+	struct ol_txrx_pdev_t *pdev;
+	bool is_last_seg = false;
+
+	if (qdf_unlikely(!vdev)) {
+		ol_txrx_err("vdev is null");
+		return;
+	}
+
+	pdev = vdev->pdev;
+	if (qdf_unlikely(!pdev)) {
+		ol_txrx_err("pdev is null");
+		return;
+	}
+
+	/*
+	 * TSO segment are mapped already, therefore,
+	 * 1. unmap the tso segments,
+	 * 2. free tso num segment if it is a last segment, and
+	 * 3. free the tso segments.
+	 */
+
+	if (is_tso_seg_mapping_done) {
+		struct qdf_tso_num_seg_elem_t *tso_num_desc =
+				msdu_info->tso_info.tso_num_seg_list;
+
+		if (qdf_unlikely(!tso_num_desc)) {
+			ol_txrx_err("TSO common info is NULL!");
+			return;
+		}
+
+		while (free_seg) {
+			qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+			tso_num_desc->num_seg.tso_cmn_num_seg--;
+
+			is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg ==
+				       0) ? true : false;
+			qdf_nbuf_unmap_tso_segment(pdev->osdev, free_seg,
+						   is_last_seg);
+			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+
+			if (is_last_seg) {
+				ol_tso_num_seg_free(pdev,
+						    msdu_info->tso_info.
+						    tso_num_seg_list);
+				msdu_info->tso_info.tso_num_seg_list = NULL;
+			}
+
+			next_seg = free_seg->next;
+			free_seg->force_free = 1;
+			ol_tso_free_segment(pdev, free_seg);
+			free_seg = next_seg;
+		}
+	} else {
+		/*
+		 * TSO segment are not mapped therefore,
+		 * free the tso segments only.
+		 */
+		while (free_seg) {
+			next_seg = free_seg->next;
+			free_seg->force_free = 1;
+			ol_tso_free_segment(pdev, free_seg);
+			free_seg = next_seg;
+		}
+	}
+}
+
+/**
+ * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
+ * related information in the msdu_info meta data
+ * @vdev: virtual device handle
+ * @msdu: network buffer
+ * @msdu_info: meta data associated with the msdu
+ *
+ * Return: 0 - success, >0 - error
+ */
+uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
+			  qdf_nbuf_t msdu,
+			  struct ol_txrx_msdu_info_t *msdu_info)
+{
+	msdu_info->tso_info.curr_seg = NULL;
+	if (qdf_nbuf_is_tso(msdu)) {
+		int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
+		struct qdf_tso_num_seg_elem_t *tso_num_seg;
+
+		msdu_info->tso_info.tso_num_seg_list = NULL;
+		msdu_info->tso_info.tso_seg_list = NULL;
+		msdu_info->tso_info.num_segs = num_seg;
+		while (num_seg) {
+			struct qdf_tso_seg_elem_t *tso_seg =
+				ol_tso_alloc_segment(vdev->pdev);
+			if (tso_seg) {
+				qdf_tso_seg_dbg_record(tso_seg,
+						       TSOSEG_LOC_PREPARETSO);
+				tso_seg->next =
+					msdu_info->tso_info.tso_seg_list;
+				msdu_info->tso_info.tso_seg_list
+					= tso_seg;
+				num_seg--;
+			} else {
+				/* Free above alocated TSO segements till now */
+				msdu_info->tso_info.curr_seg =
+					msdu_info->tso_info.tso_seg_list;
+				ol_free_remaining_tso_segs(vdev, msdu_info,
+							   false);
+				return 1;
+			}
+		}
+		tso_num_seg = ol_tso_num_seg_alloc(vdev->pdev);
+		if (tso_num_seg) {
+			tso_num_seg->next = msdu_info->tso_info.
+						tso_num_seg_list;
+			msdu_info->tso_info.tso_num_seg_list = tso_num_seg;
+		} else {
+			/* Free the already allocated num of segments */
+			msdu_info->tso_info.curr_seg =
+				msdu_info->tso_info.tso_seg_list;
+			ol_free_remaining_tso_segs(vdev, msdu_info, false);
+			return 1;
+		}
+
+		if (qdf_unlikely(!qdf_nbuf_get_tso_info(vdev->pdev->osdev,
+						msdu, &msdu_info->tso_info))) {
+			/* Free the already allocated num of segments */
+			msdu_info->tso_info.curr_seg =
+				msdu_info->tso_info.tso_seg_list;
+			ol_free_remaining_tso_segs(vdev, msdu_info, false);
+			return 1;
+		}
+
+		msdu_info->tso_info.curr_seg =
+			msdu_info->tso_info.tso_seg_list;
+		num_seg = msdu_info->tso_info.num_segs;
+	} else {
+		msdu_info->tso_info.is_tso = 0;
+		msdu_info->tso_info.num_segs = 1;
+	}
+	return 0;
+}
+
+/**
+ * ol_tx_tso_update_stats() - update TSO stats
+ * @pdev: pointer to ol_txrx_pdev_t structure
+ * @msdu_info: tso msdu_info for the msdu
+ * @msdu: tso mdsu for which stats are updated
+ * @tso_msdu_idx: stats index in the global TSO stats array where stats will be
+ *                updated
+ *
+ * Return: None
+ */
+void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
+			    struct qdf_tso_info_t  *tso_info, qdf_nbuf_t msdu,
+			    uint32_t tso_msdu_idx)
+{
+	TXRX_STATS_TSO_HISTOGRAM(pdev, tso_info->num_segs);
+	TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, tso_msdu_idx,
+				       qdf_nbuf_tcp_tso_size(msdu));
+	TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev,
+					tso_msdu_idx, qdf_nbuf_len(msdu));
+	TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, tso_msdu_idx,
+					qdf_nbuf_get_nr_frags(msdu));
+}
+
+/**
+ * ol_tx_tso_get_stats_idx() - retrieve global TSO stats index and increment it
+ * @pdev: pointer to ol_txrx_pdev_t structure
+ *
+ * Retrieve  the current value of the global variable and increment it. This is
+ * done in a spinlock as the global TSO stats may be accessed in parallel by
+ * multiple TX streams.
+ *
+ * Return: The current value of TSO stats index.
+ */
+uint32_t ol_tx_tso_get_stats_idx(struct ol_txrx_pdev_t *pdev)
+{
+	uint32_t msdu_stats_idx = 0;
+
+	qdf_spin_lock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
+	msdu_stats_idx = pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx;
+	pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx++;
+	pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx &=
+					NUM_MAX_TSO_MSDUS_MASK;
+	qdf_spin_unlock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
+
+	TXRX_STATS_TSO_RESET_MSDU(pdev, msdu_stats_idx);
+
+	return msdu_stats_idx;
+}
+
+/**
+ * ol_tso_seg_list_init() - function to initialise the tso seg freelist
+ * @pdev: the data physical device sending the data
+ * @num_seg: number of segments needs to be intialised
+ *
+ * Return: none
+ */
+void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
+{
+	int i = 0;
+	struct qdf_tso_seg_elem_t *c_element;
+
+	/* Host should not allocate any c_element. */
+	if (num_seg <= 0) {
+		ol_txrx_err("Pool size passed is 0");
+		QDF_BUG(0);
+		pdev->tso_seg_pool.pool_size = i;
+		qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
+		return;
+	}
+
+	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
+	pdev->tso_seg_pool.freelist = c_element;
+	for (i = 0; i < (num_seg - 1); i++) {
+		if (qdf_unlikely(!c_element)) {
+			ol_txrx_err("c_element NULL for seg %d", i);
+			QDF_BUG(0);
+			pdev->tso_seg_pool.pool_size = i;
+			pdev->tso_seg_pool.num_free = i;
+			qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
+			return;
+		}
+		/* set the freelist bit and magic cookie*/
+		c_element->on_freelist = 1;
+		c_element->cookie = TSO_SEG_MAGIC_COOKIE;
+#ifdef TSOSEG_DEBUG
+		c_element->dbg.txdesc = NULL;
+		qdf_atomic_init(&c_element->dbg.cur); /* history empty */
+		qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT1);
+#endif /* TSOSEG_DEBUG */
+		c_element->next =
+			qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
+		c_element = c_element->next;
+	}
+	/*
+	 * NULL check for the last c_element of the list or
+	 * first c_element if num_seg is equal to 1.
+	 */
+	if (qdf_unlikely(!c_element)) {
+		ol_txrx_err("c_element NULL for seg %d", i);
+		QDF_BUG(0);
+		pdev->tso_seg_pool.pool_size = i;
+		pdev->tso_seg_pool.num_free = i;
+		qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
+		return;
+	}
+	c_element->on_freelist = 1;
+	c_element->cookie = TSO_SEG_MAGIC_COOKIE;
+#ifdef TSOSEG_DEBUG
+	qdf_tso_seg_dbg_init(c_element);
+	qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT2);
+#endif /* TSOSEG_DEBUG */
+	c_element->next = NULL;
+	pdev->tso_seg_pool.pool_size = num_seg;
+	pdev->tso_seg_pool.num_free = num_seg;
+	qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
+}
+
+/**
+ * ol_tso_seg_list_deinit() - function to de-initialise the tso seg freelist
+ * @pdev: the data physical device sending the data
+ *
+ * Return: none
+ */
+void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
+{
+	int i;
+	struct qdf_tso_seg_elem_t *c_element;
+	struct qdf_tso_seg_elem_t *temp;
+
+	/* pool size 0 implies that tso seg list is not initialised*/
+	if (!pdev->tso_seg_pool.freelist &&
+	    pdev->tso_seg_pool.pool_size == 0)
+		return;
+
+	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+	c_element = pdev->tso_seg_pool.freelist;
+	i = pdev->tso_seg_pool.pool_size;
+
+	pdev->tso_seg_pool.freelist = NULL;
+	pdev->tso_seg_pool.num_free = 0;
+	pdev->tso_seg_pool.pool_size = 0;
+
+	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
+
+	while (i-- > 0 && c_element) {
+		temp = c_element->next;
+		if (c_element->on_freelist != 1) {
+			qdf_tso_seg_dbg_bug("seg already freed (double?)");
+			return;
+		} else if (c_element->cookie != TSO_SEG_MAGIC_COOKIE) {
+			qdf_tso_seg_dbg_bug("seg cookie is bad (corruption?)");
+			return;
+		}
+		/* free this seg, so reset the cookie value*/
+		c_element->cookie = 0;
+		qdf_mem_free(c_element);
+		c_element = temp;
+	}
+}
+
+/**
+ * ol_tso_num_seg_list_init() - function to initialise the freelist of elements
+ *				use to count the num of tso segments in jumbo
+ *				skb packet freelist
+ * @pdev: the data physical device sending the data
+ * @num_seg: number of elements needs to be intialised
+ *
+ * Return: none
+ */
+void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
+{
+	int i = 0;
+	struct qdf_tso_num_seg_elem_t *c_element;
+
+	/* Host should not allocate any c_element. */
+	if (num_seg <= 0) {
+		ol_txrx_err("Pool size passed is 0");
+		QDF_BUG(0);
+		pdev->tso_num_seg_pool.num_seg_pool_size = i;
+		qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+		return;
+	}
+
+	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
+	pdev->tso_num_seg_pool.freelist = c_element;
+	for (i = 0; i < (num_seg - 1); i++) {
+		if (qdf_unlikely(!c_element)) {
+			ol_txrx_err("c_element NULL for num of seg %d", i);
+			QDF_BUG(0);
+			pdev->tso_num_seg_pool.num_seg_pool_size = i;
+			pdev->tso_num_seg_pool.num_free = i;
+			qdf_spinlock_create(&pdev->tso_num_seg_pool.
+							tso_num_seg_mutex);
+			return;
+		}
+		c_element->next =
+			qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
+		c_element = c_element->next;
+	}
+	/*
+	 * NULL check for the last c_element of the list or
+	 * first c_element if num_seg is equal to 1.
+	 */
+	if (qdf_unlikely(!c_element)) {
+		ol_txrx_err("c_element NULL for num of seg %d", i);
+		QDF_BUG(0);
+		pdev->tso_num_seg_pool.num_seg_pool_size = i;
+		pdev->tso_num_seg_pool.num_free = i;
+		qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+		return;
+	}
+	c_element->next = NULL;
+	pdev->tso_num_seg_pool.num_seg_pool_size = num_seg;
+	pdev->tso_num_seg_pool.num_free = num_seg;
+	qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+}
+
+/**
+ * ol_tso_num_seg_list_deinit() - function to de-initialise the freelist of
+ *				  elements use to count the num of tso segment
+ *				  in a jumbo skb packet freelist
+ * @pdev: the data physical device sending the data
+ *
+ * Return: none
+ */
+void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
+{
+	int i;
+	struct qdf_tso_num_seg_elem_t *c_element;
+	struct qdf_tso_num_seg_elem_t *temp;
+
+	/* pool size 0 implies that tso num seg list is not initialised*/
+	if (!pdev->tso_num_seg_pool.freelist &&
+	    pdev->tso_num_seg_pool.num_seg_pool_size == 0)
+		return;
+
+	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+	c_element = pdev->tso_num_seg_pool.freelist;
+	i = pdev->tso_num_seg_pool.num_seg_pool_size;
+
+	pdev->tso_num_seg_pool.freelist = NULL;
+	pdev->tso_num_seg_pool.num_free = 0;
+	pdev->tso_num_seg_pool.num_seg_pool_size = 0;
+
+	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+	qdf_spinlock_destroy(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+
+	while (i-- > 0 && c_element) {
+		temp = c_element->next;
+		qdf_mem_free(c_element);
+		c_element = temp;
+	}
+}
+#endif /* FEATURE_TSO */
+
+#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
+void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
+{
+	qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
+}
+
+void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
+{
+	qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
+}
+
+void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
+{
+	int msdu_idx;
+	int seg_idx;
+
+	ol_txrx_info("TSO Statistics:");
+	ol_txrx_info("TSO pkts %lld, bytes %lld\n",
+		     pdev->stats.pub.tx.tso.tso_pkts.pkts,
+		     pdev->stats.pub.tx.tso.tso_pkts.bytes);
+
+	ol_txrx_info("TSO Histogram for numbers of segments:\n"
+		     "Single segment %d\n"
+		     "  2-5 segments %d\n"
+		     " 6-10 segments %d\n"
+		     "11-15 segments %d\n"
+		     "16-20 segments %d\n"
+		     "  20+ segments %d\n",
+		     pdev->stats.pub.tx.tso.tso_hist.pkts_1,
+		     pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
+		     pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
+		     pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
+		     pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
+		     pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
+
+	ol_txrx_info("TSO History Buffer: Total size %d, current_index %d",
+		     NUM_MAX_TSO_MSDUS, TXRX_STATS_TSO_MSDU_IDX(pdev));
+
+	for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
+		if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
+			continue;
+		ol_txrx_info("jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
+			     msdu_idx,
+			     TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
+			     TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
+			     TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
+			     TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
+
+		for (seg_idx = 0;
+			 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
+			   msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
+			 seg_idx++) {
+			struct qdf_tso_seg_t tso_seg =
+				 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
+
+			ol_txrx_info("seg idx: %d", seg_idx);
+			ol_txrx_info("tso_enable: %d",
+				     tso_seg.tso_flags.tso_enable);
+			ol_txrx_info("fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
+				     tso_seg.tso_flags.fin,
+				     tso_seg.tso_flags.syn,
+				     tso_seg.tso_flags.rst,
+				     tso_seg.tso_flags.psh,
+				     tso_seg.tso_flags.ack,
+				     tso_seg.tso_flags.urg,
+				     tso_seg.tso_flags.ece,
+				     tso_seg.tso_flags.cwr,
+				     tso_seg.tso_flags.ns);
+			ol_txrx_info("tcp_seq_num: 0x%x ip_id: %d",
+				     tso_seg.tso_flags.tcp_seq_num,
+				     tso_seg.tso_flags.ip_id);
+		}
+	}
+}
+
+void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
+{
+	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_pkts,
+		     sizeof(struct ol_txrx_stats_elem));
+#if defined(FEATURE_TSO)
+	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_info,
+		     sizeof(struct ol_txrx_stats_tso_info));
+	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_hist,
+		     sizeof(struct ol_txrx_tso_histogram));
+#endif
+}
+#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */

+ 6 - 8
core/dp/txrx/ol_tx_send.c

@@ -1031,10 +1031,8 @@ ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
 
 	tx_desc = ol_tx_desc_find_check(pdev, tx_desc_id);
 	if (tx_desc == NULL) {
-		ol_txrx_err(
-				"%s: invalid desc_id(%u), ignore it.\n",
-				__func__,
-				tx_desc_id);
+		ol_txrx_err("%s: invalid desc_id(%u), ignore it.\n",
+			    __func__, tx_desc_id);
 		return;
 	}
 
@@ -1297,7 +1295,6 @@ ol_tx_delay_hist(struct cdp_pdev *ppdev,
 }
 
 #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
-
 static uint8_t
 ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
 			    qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
@@ -1358,11 +1355,9 @@ ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
 		return QDF_NBUF_TX_EXT_TID_INVALID;
 	}
 }
-#endif
 
 static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
 {
-#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
 	struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
 	uint8_t tid;
 	qdf_nbuf_t msdu = tx_desc->netbuf;
@@ -1379,10 +1374,13 @@ static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
 		}
 	}
 	return tid;
+}
 #else
+static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
+{
 	return 0;
-#endif
 }
+#endif
 
 static inline int
 ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)

+ 2 - 866
core/dp/txrx/ol_txrx.c

@@ -112,117 +112,9 @@ ol_txrx_set_wmm_param(struct cdp_pdev *data_pdev,
 extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid,
 		    uint64_t **last_pn, uint32_t **rmf_pn_replays);
 
-#ifdef QCA_HL_NETDEV_FLOW_CONTROL
-static u16 ol_txrx_tx_desc_alloc_table[TXRX_FC_MAX] = {
-	[TXRX_FC_5GH_80M_2x2] = 2000,
-	[TXRX_FC_2GH_40M_2x2] = 800,
-};
-#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
-
 /* thresh for peer's cached buf queue beyond which the elements are dropped */
 #define OL_TXRX_CACHED_BUFQ_THRESH 128
 
-#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
-
-/**
- * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
- * @vdev: the data virtual device
- * @bss_addr: bss address
- *
- * Return: None
- */
-static void
-ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t  *)pvdev;
-
-	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
-	if (bss_addr && vdev->last_real_peer &&
-	    !qdf_mem_cmp((u8 *)bss_addr,
-			     vdev->last_real_peer->mac_addr.raw,
-			     IEEE80211_ADDR_LEN))
-		qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
-			     vdev->last_real_peer->mac_addr.raw,
-			     OL_TXRX_MAC_ADDR_LEN);
-	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
-}
-
-/**
- * ol_txrx_add_last_real_peer() - add last peer
- * @pdev: the data physical device
- * @vdev: virtual device
- * @peer_id: peer id
- *
- * Return: None
- */
-static void
-ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
-			   struct cdp_vdev *pvdev, uint8_t *peer_id)
-{
-	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-	ol_txrx_peer_handle peer;
-
-	peer = ol_txrx_find_peer_by_addr(
-		(struct cdp_pdev *)pdev,
-		vdev->hl_tdls_ap_mac_addr.raw,
-		peer_id);
-
-	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
-	if (!vdev->last_real_peer && peer &&
-	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
-		vdev->last_real_peer = peer;
-	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
-}
-
-/**
- * is_vdev_restore_last_peer() - check for vdev last peer
- * @peer: peer object
- *
- * Return: true if last peer is not null
- */
-static bool
-is_vdev_restore_last_peer(void *ppeer)
-{
-	struct ol_txrx_peer_t *peer = ppeer;
-	struct ol_txrx_vdev_t *vdev;
-
-	vdev = peer->vdev;
-	return vdev->last_real_peer && (vdev->last_real_peer == peer);
-}
-
-/**
- * ol_txrx_update_last_real_peer() - check for vdev last peer
- * @pdev: the data physical device
- * @peer: peer device
- * @peer_id: peer id
- * @restore_last_peer: restore last peer flag
- *
- * Return: None
- */
-static void
-ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
-	uint8_t *peer_id, bool restore_last_peer)
-{
-	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
-	struct ol_txrx_peer_t *peer = ppeer;
-	struct ol_txrx_vdev_t *vdev;
-
-	if (!restore_last_peer)
-		return;
-
-	vdev = peer->vdev;
-	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
-				vdev->hl_tdls_ap_mac_addr.raw, peer_id);
-
-	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
-	if (!vdev->last_real_peer && peer &&
-	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
-		vdev->last_real_peer = peer;
-	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
-}
-#endif
-
 /**
  * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
  *    fw is compatible for marking first packet after wow wakeup
@@ -243,48 +135,6 @@ static void ol_tx_mark_first_wakeup_packet(uint8_t value)
 	htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
 }
 
-u_int16_t
-ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
-{
-	u_int16_t desc_pool_size;
-	u_int16_t steady_state_tx_lifetime_ms;
-	u_int16_t safety_factor;
-
-	/*
-	 * Steady-state tx latency:
-	 *     roughly 1-2 ms flight time
-	 *   + roughly 1-2 ms prep time,
-	 *   + roughly 1-2 ms target->host notification time.
-	 * = roughly 6 ms total
-	 * Thus, steady state number of frames =
-	 * steady state max throughput / frame size * tx latency, e.g.
-	 * 1 Gbps / 1500 bytes * 6 ms = 500
-	 *
-	 */
-	steady_state_tx_lifetime_ms = 6;
-
-	safety_factor = 8;
-
-	desc_pool_size =
-		ol_cfg_max_thruput_mbps(ctrl_pdev) *
-		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
-		(8 * OL_TX_AVG_FRM_BYTES) *
-		steady_state_tx_lifetime_ms *
-		safety_factor;
-
-	/* minimum */
-	if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
-		desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
-
-	/* maximum */
-	if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
-		desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
-
-	return desc_pool_size;
-}
-
-/*=== function definitions ===*/
-
 /**
  * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
  *                                        wmi is enabled or not.
@@ -612,589 +462,6 @@ static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
 #define ol_txrx_local_peer_id_cleanup(pdev)     /* no-op */
 #endif
 
-#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
-
-/**
- * ol_txrx_update_group_credit() - update group credit for tx queue
- * @group: for which credit needs to be updated
- * @credit: credits
- * @absolute: TXQ group absolute
- *
- * Return: allocated pool size
- */
-void ol_txrx_update_group_credit(
-		struct ol_tx_queue_group_t *group,
-		int32_t credit,
-		u_int8_t absolute)
-{
-	if (absolute)
-		qdf_atomic_set(&group->credit, credit);
-	else
-		qdf_atomic_add(credit, &group->credit);
-}
-
-/**
- * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
- *				      vdev id mask and ac mask is not matching
- * @pdev: the data physical device
- * @group_id: TXQ group id
- * @credit: TXQ group credit count
- * @absolute: TXQ group absolute
- * @vdev_id_mask: TXQ vdev group id mask
- * @ac_mask: TQX access category mask
- *
- * Return: None
- */
-void ol_txrx_update_tx_queue_groups(
-		ol_txrx_pdev_handle pdev,
-		u_int8_t group_id,
-		int32_t credit,
-		u_int8_t absolute,
-		u_int32_t vdev_id_mask,
-		u_int32_t ac_mask
-		)
-{
-	struct ol_tx_queue_group_t *group;
-	u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
-	u_int32_t membership;
-	struct ol_txrx_vdev_t *vdev;
-
-	if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
-		ol_txrx_warn("%s: invalid group_id=%u, ignore update.\n",
-			__func__,
-			group_id);
-		return;
-	}
-
-	group = &pdev->txq_grps[group_id];
-
-	membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
-
-	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
-	/*
-	 * if the membership (vdev id mask and ac mask)
-	 * matches then no need to update tx qeue groups.
-	 */
-	if (group->membership == membership)
-		/* Update Credit Only */
-		goto credit_update;
-
-	credit += ol_txrx_distribute_group_credits(pdev, group_id,
-						   vdev_id_mask);
-	/*
-	 * membership (vdev id mask and ac mask) is not matching
-	 * TODO: ignoring ac mask for now
-	 */
-	qdf_assert(ac_mask == 0xffff);
-	group_vdev_id_mask =
-		OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
-
-	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
-		group_vdev_bit_mask =
-			OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
-					group_vdev_id_mask, vdev->vdev_id);
-		vdev_bit_mask =
-			OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
-					vdev_id_mask, vdev->vdev_id);
-
-		if (group_vdev_bit_mask != vdev_bit_mask) {
-			/*
-			 * Change in vdev tx queue group
-			 */
-			if (!vdev_bit_mask) {
-				/* Set Group Pointer (vdev and peer) to NULL */
-				ol_tx_set_vdev_group_ptr(
-						pdev, vdev->vdev_id, NULL);
-			} else {
-				/* Set Group Pointer (vdev and peer) */
-				ol_tx_set_vdev_group_ptr(
-						pdev, vdev->vdev_id, group);
-			}
-		}
-	}
-	/* Update membership */
-	group->membership = membership;
-credit_update:
-	/* Update Credit */
-	ol_txrx_update_group_credit(group, credit, absolute);
-	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
-}
-#endif
-
-#if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
-	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
-#define MIN_INIT_GROUP_CREDITS	10
-int ol_txrx_distribute_group_credits(struct ol_txrx_pdev_t *pdev,
-				     u8 group_id,
-				     u32 vdevid_mask_new)
-{
-	struct ol_tx_queue_group_t *grp = &pdev->txq_grps[group_id];
-	struct ol_tx_queue_group_t *grp_nxt = &pdev->txq_grps[!group_id];
-	int creds_nxt = qdf_atomic_read(&grp_nxt->credit);
-	int vdevid_mask = OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp->membership);
-	int vdevid_mask_othgrp =
-		OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp_nxt->membership);
-	int creds_distribute = 0;
-
-	/* if vdev added to the group is the first vdev */
-	if ((vdevid_mask == 0) && (vdevid_mask_new != 0)) {
-		/* if other group has members */
-		if (vdevid_mask_othgrp) {
-			if (creds_nxt < MIN_INIT_GROUP_CREDITS)
-				creds_distribute = creds_nxt / 2;
-			else
-				creds_distribute = MIN_INIT_GROUP_CREDITS;
-
-			ol_txrx_update_group_credit(grp_nxt, -creds_distribute,
-						    0);
-		} else {
-			/*
-			 * Other grp has no members, give all credits to this
-			 * grp.
-			 */
-			creds_distribute =
-				qdf_atomic_read(&pdev->target_tx_credit);
-		}
-	/* if all vdevs are removed from this grp */
-	} else if ((vdevid_mask != 0) && (vdevid_mask_new == 0)) {
-		if (vdevid_mask_othgrp)
-			/* Transfer credits to other grp */
-			ol_txrx_update_group_credit(grp_nxt,
-						    qdf_atomic_read(&grp->
-						    credit),
-						    0);
-		/* Set current grp credits to zero */
-		ol_txrx_update_group_credit(grp, 0, 1);
-	}
-
-	return creds_distribute;
-}
-#endif /*
-	* FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL &&
-	* FEATURE_HL_DBS_GROUP_CREDIT_SHARING
-	*/
-#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
-
-/**
- * ol_txrx_rsrc_threshold_lo() - set threshold low - when to start tx desc
- *				 margin replenishment
- * @desc_pool_size: tx desc pool size
- *
- * Return: threshold low
- */
-static inline uint16_t
-ol_txrx_rsrc_threshold_lo(int desc_pool_size)
-{
-	int threshold_low;
-
-	/*
-	 * 5% margin of unallocated desc is too much for per
-	 * vdev mechanism.
-	 * Define the value separately.
-	 */
-	threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
-
-	return threshold_low;
-}
-
-/**
- * ol_txrx_rsrc_threshold_hi() - set threshold high - where to stop
- *				 during tx desc margin replenishment
- * @desc_pool_size: tx desc pool size
- *
- * Return: threshold high
- */
-static inline uint16_t
-ol_txrx_rsrc_threshold_hi(int desc_pool_size)
-{
-	int threshold_high;
-	/* when freeing up descriptors,
-	 * keep going until there's a 7.5% margin
-	 */
-	threshold_high = ((15 * desc_pool_size)/100)/2;
-
-	return threshold_high;
-}
-#else
-
-static inline uint16_t
-ol_txrx_rsrc_threshold_lo(int desc_pool_size)
-{
-	int threshold_low;
-	/* always maintain a 5% margin of unallocated descriptors */
-	threshold_low = (5 * desc_pool_size)/100;
-
-	return threshold_low;
-}
-
-static inline uint16_t
-ol_txrx_rsrc_threshold_hi(int desc_pool_size)
-{
-	int threshold_high;
-	/* when freeing up descriptors, keep going until
-	 * there's a 15% margin
-	 */
-	threshold_high = (15 * desc_pool_size)/100;
-
-	return threshold_high;
-}
-#endif
-
-#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
-
-/**
- * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
- * @pdev: the physical device object
- *
- * Return: None
- */
-static void
-ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
-{
-	qdf_spinlock_create(&pdev->txq_log_spinlock);
-	pdev->txq_log.size = OL_TXQ_LOG_SIZE;
-	pdev->txq_log.oldest_record_offset = 0;
-	pdev->txq_log.offset = 0;
-	pdev->txq_log.allow_wrap = 1;
-	pdev->txq_log.wrapped = 0;
-}
-
-/**
- * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
- * @pdev: the physical device object
- *
- * Return: None
- */
-static inline void
-ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
-{
-	qdf_spinlock_destroy(&pdev->txq_log_spinlock);
-}
-
-#else
-
-static inline void
-ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
-{
-}
-
-static inline void
-ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
-{
-}
-
-
-#endif
-
-#if defined(DEBUG_HL_LOGGING)
-
-/**
- * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
- * @pdev: the physical device object
- *
- * Return: None
- */
-static inline void
-ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
-{
-	qdf_spinlock_create(&pdev->grp_stat_spinlock);
-	pdev->grp_stats.last_valid_index = -1;
-	pdev->grp_stats.wrap_around = 0;
-}
-
-/**
- * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
- * @pdev: the physical device object
- *
- * Return: None
- */
-static inline void
-ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
-{
-	qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
-}
-#else
-
-static inline void
-ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
-{
-}
-
-static inline void
-ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
-{
-}
-#endif
-
-#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
-
-/**
- * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
- * @vdev: the virtual device object
- * @flag: flag
- *
- * Return: None
- */
-void
-ol_txrx_hl_tdls_flag_reset(struct cdp_vdev *pvdev, bool flag)
-{
-	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
-
-	vdev->hlTdlsFlag = flag;
-}
-#endif
-
-#if defined(CONFIG_HL_SUPPORT)
-
-/**
- * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
- * @vdev: the virtual device object
- *
- * Return: None
- */
-static void
-ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
-{
-	u_int8_t i;
-
-	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
-		TAILQ_INIT(&vdev->txqs[i].head);
-		vdev->txqs[i].paused_count.total = 0;
-		vdev->txqs[i].frms = 0;
-		vdev->txqs[i].bytes = 0;
-		vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
-		vdev->txqs[i].flag = ol_tx_queue_empty;
-		/* aggregation is not applicable for vdev tx queues */
-		vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
-		ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
-		ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
-	}
-}
-
-/**
- * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
- * @vdev: the virtual device object
- *
- * Return: None
- */
-static void
-ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
-{
-	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-	struct ol_tx_frms_queue_t *txq;
-	int i;
-
-	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
-		txq = &vdev->txqs[i];
-		ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
-	}
-}
-
-/**
- * ol_txrx_peer_txqs_init() - initialise peer tx queues
- * @pdev: the physical device object
- * @peer: peer object
- *
- * Return: None
- */
-static void
-ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
-		       struct ol_txrx_peer_t *peer)
-{
-	uint8_t i;
-	struct ol_txrx_vdev_t *vdev = peer->vdev;
-
-	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
-	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
-		TAILQ_INIT(&peer->txqs[i].head);
-		peer->txqs[i].paused_count.total = 0;
-		peer->txqs[i].frms = 0;
-		peer->txqs[i].bytes = 0;
-		peer->txqs[i].ext_tid = i;
-		peer->txqs[i].flag = ol_tx_queue_empty;
-		peer->txqs[i].aggr_state = ol_tx_aggr_untried;
-		ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
-		ol_txrx_set_txq_peer(&peer->txqs[i], peer);
-	}
-	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
-
-	/* aggregation is not applicable for mgmt and non-QoS tx queues */
-	for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
-		peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
-
-	ol_txrx_peer_pause(peer);
-}
-
-/**
- * ol_txrx_peer_tx_queue_free() - free peer tx queues
- * @pdev: the physical device object
- * @peer: peer object
- *
- * Return: None
- */
-static void
-ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
-			   struct ol_txrx_peer_t *peer)
-{
-	struct ol_tx_frms_queue_t *txq;
-	uint8_t i;
-
-	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
-		txq = &peer->txqs[i];
-		ol_tx_queue_free(pdev, txq, i, true);
-	}
-}
-#else
-
-static inline void
-ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
-{
-}
-
-static inline void
-ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
-{
-}
-
-static inline void
-ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
-		       struct ol_txrx_peer_t *peer)
-{
-}
-
-static inline void
-ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
-			   struct ol_txrx_peer_t *peer)
-{
-}
-#endif
-
-#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
-static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
-{
-	qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
-}
-
-static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
-{
-	qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
-}
-
-static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
-{
-	int msdu_idx;
-	int seg_idx;
-
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-		  "TSO Statistics:");
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-		  "TSO pkts %lld, bytes %lld\n",
-		 pdev->stats.pub.tx.tso.tso_pkts.pkts,
-		 pdev->stats.pub.tx.tso.tso_pkts.bytes);
-
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-		  "TSO Histogram for numbers of segments:\n"
-		 "Single segment	%d\n"
-		 "  2-5 segments	%d\n"
-		 " 6-10 segments	%d\n"
-		 "11-15 segments	%d\n"
-		 "16-20 segments	%d\n"
-		 "  20+ segments	%d\n",
-		 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
-		 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
-		 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
-		 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
-		 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
-		 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
-
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-		  "TSO History Buffer: Total size %d, current_index %d",
-		 NUM_MAX_TSO_MSDUS,
-		 TXRX_STATS_TSO_MSDU_IDX(pdev));
-
-	for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
-		if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
-			continue;
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-			  "jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
-			 msdu_idx,
-			 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
-			 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
-			 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
-			 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
-
-		for (seg_idx = 0;
-			 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
-			   msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
-			 seg_idx++) {
-			struct qdf_tso_seg_t tso_seg =
-				 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
-
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-				  "seg idx: %d", seg_idx);
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-				  "tso_enable: %d",
-				  tso_seg.tso_flags.tso_enable);
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-				  "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
-				  tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
-				  tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
-				  tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
-				  tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
-				  tso_seg.tso_flags.ns);
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
-				  "tcp_seq_num: 0x%x ip_id: %d",
-				  tso_seg.tso_flags.tcp_seq_num,
-				  tso_seg.tso_flags.ip_id);
-		}
-	}
-}
-
-static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
-{
-	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_pkts,
-		     sizeof(struct ol_txrx_stats_elem));
-#if defined(FEATURE_TSO)
-	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_info,
-		     sizeof(struct ol_txrx_stats_tso_info));
-	qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_hist,
-		     sizeof(struct ol_txrx_tso_histogram));
-#endif
-}
-
-#else
-
-static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
-{
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-	 "TSO is not supported\n");
-}
-
-static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
-{
-	/*
-	 * keeping the body empty and not keeping an error print as print will
-	 * will show up everytime during driver load if TSO is not enabled.
-	 */
-}
-
-static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
-{
-	/*
-	 * keeping the body empty and not keeping an error print as print will
-	 * will show up everytime during driver unload if TSO is not enabled.
-	 */
-}
-
-static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
-{
-	/*
-	 * keeping the body empty and not keeping an error print as print will
-	 * will show up everytime during driver unload if TSO is not enabled.
-	 */
-}
-#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
-
 #if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
 /**
  * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
@@ -1608,27 +875,8 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
 	 * HL - wait for a HTT target credit initialization
 	 * during htt_attach.
 	 */
-	if (pdev->cfg.is_high_latency) {
-		desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
-		qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
-		qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
-
-		pdev->tx_queue.rsrc_threshold_lo =
-			ol_txrx_rsrc_threshold_lo(desc_pool_size);
-		pdev->tx_queue.rsrc_threshold_hi =
-			ol_txrx_rsrc_threshold_hi(desc_pool_size);
-
-		for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
-			qdf_atomic_init(&pdev->txq_grps[i].credit);
-
-		ol_txrx_init_txq_group_limit_lend(pdev);
-
-		ol_tx_target_credit_init(pdev, desc_pool_size);
-	} else {
-		qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
-			       &pdev->target_tx_credit);
-		desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
-	}
+	desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
+	ol_tx_init_pdev(pdev);
 
 	ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
 
@@ -2481,22 +1729,6 @@ static void ol_txrx_vdev_register(struct cdp_vdev *pvdev, void *osif_vdev,
 	txrx_ops->tx.tx = ol_tx_data;
 }
 
-#ifdef currently_unused
-/**
- * ol_txrx_set_curchan - Setup the current operating channel of
- * the device
- * @pdev - the data physical device object
- * @chan_mhz - the channel frequency (mhz) packets on
- *
- * Mainly used when populating monitor mode status that requires
- * the current operating channel
- *
- */
-void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
-{
-}
-#endif
-
 void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
 {
 	vdev->safemode = val;
@@ -5208,102 +4440,6 @@ static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
 	return num_dropped;
 }
 
-#ifdef QCA_HL_NETDEV_FLOW_CONTROL
-/**
- * ol_txrx_register_hl_flow_control() -register hl netdev flow control callback
- * @vdev_id: vdev_id
- * @flowControl: flow control callback
- *
- * Return: 0 for success or error code
- */
-static int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc,
-					    tx_pause_callback flowcontrol)
-{
-	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-	u32 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
-
-	/*
-	 * Assert if the tx descriptor pool size meets the requirements
-	 * Maximum 2 sessions are allowed on a band.
-	 */
-	QDF_ASSERT((2 * ol_txrx_tx_desc_alloc_table[TXRX_FC_5GH_80M_2x2] +
-		    ol_txrx_tx_desc_alloc_table[TXRX_FC_2GH_40M_2x2])
-		    <= desc_pool_size);
-
-	if (!pdev || !flowcontrol) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "pdev or pause_cb is NULL");
-		return QDF_STATUS_E_INVAL;
-	}
-
-	pdev->pause_cb = flowcontrol;
-	return 0;
-}
-
-static int ol_txrx_set_vdev_os_queue_status(u8 vdev_id,
-					    enum netif_action_type action)
-{
-	struct ol_txrx_vdev_t *vdev =
-	(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
-
-	if (!vdev) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: Invalid vdev_id %d", __func__, vdev_id);
-		return -EINVAL;
-	}
-
-	switch (action) {
-	case WLAN_NETIF_PRIORITY_QUEUE_ON:
-		qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
-		vdev->prio_q_paused = 0;
-		qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
-		break;
-	case WLAN_WAKE_NON_PRIORITY_QUEUE:
-		qdf_atomic_set(&vdev->os_q_paused, 0);
-		break;
-	default:
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: Invalid action %d", __func__, action);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-/**
- * ol_txrx_set_vdev_tx_desc_limit() - Set TX descriptor limits for a vdev
- * @vdev_id: vdev id for the vdev under consideration.
- * @chan: Channel on which the vdev has been started.
- */
-static int ol_txrx_set_vdev_tx_desc_limit(u8 vdev_id, u8 chan)
-{
-	struct ol_txrx_vdev_t *vdev =
-	(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
-	enum ol_txrx_fc_limit_id fc_limit_id;
-	u32 td_limit;
-
-	if (!vdev) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: Invalid vdev_id %d", __func__, vdev_id);
-		return -EINVAL;
-	}
-
-	/* TODO: Handle no of spatial streams and channel BW */
-	if (WLAN_REG_IS_5GHZ_CH(chan))
-		fc_limit_id = TXRX_FC_5GH_80M_2x2;
-	else
-		fc_limit_id = TXRX_FC_2GH_40M_2x2;
-
-	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
-	td_limit = ol_txrx_tx_desc_alloc_table[fc_limit_id];
-	vdev->tx_desc_limit = td_limit;
-	vdev->queue_stop_th = td_limit - TXRX_HL_TX_DESC_HI_PRIO_RESERVED;
-	vdev->queue_restart_th = td_limit - TXRX_HL_TX_DESC_QUEUE_RESTART_TH;
-	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
-
-	return 0;
-}
-#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
-
 /**
  * ol_rx_data_cb() - data rx callback
  * @peer: peer

+ 113 - 14
core/dp/txrx/ol_txrx.h

@@ -24,6 +24,8 @@
 #include "cds_sched.h"
 #include <cdp_txrx_handle.h>
 #include <ol_txrx_types.h>
+#include <ol_txrx_internal.h>
+
 /*
  * Pool of tx descriptors reserved for
  * high-priority traffic, such as ARP/EAPOL etc
@@ -170,15 +172,118 @@ bool ol_txrx_get_tx_resource(uint8_t sta_id,
 int ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth);
 #endif
 
+void ol_tx_init_pdev(ol_txrx_pdev_handle pdev);
+
+#ifdef CONFIG_HL_SUPPORT
+void ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev);
+void ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev);
+void ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
+			    struct ol_txrx_peer_t *peer);
+void ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
+				struct ol_txrx_peer_t *peer);
+#else
+static inline void
+ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev) {}
+
+static inline void
+ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev) {}
+
+static inline void
+ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
+		       struct ol_txrx_peer_t *peer) {}
+
+static inline void
+ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
+			   struct ol_txrx_peer_t *peer) {}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
+void ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev);
+void ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev);
+void ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev);
+void ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev);
+#else
+static inline void
+ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev) {}
+
+static inline void
+ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev) {}
+
+static inline void
+ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev) {}
+
+static inline void
+ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev) {}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+void ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr);
+void ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
+				struct cdp_vdev *pvdev, uint8_t *peer_id);
+bool is_vdev_restore_last_peer(void *ppeer);
+void ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
+				   uint8_t *peer_id, bool restore_last_peer);
+#endif
+
+#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
+void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev);
+void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev);
+void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev);
+void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev);
+#else
+static inline
+void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
+{
+	ol_txrx_err("TSO is not supported\n");
+}
+
+static inline
+void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev) {}
+
+static inline
+void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev) {}
+
+static inline
+void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev) {}
+#endif
+
+struct ol_tx_desc_t *
+ol_txrx_mgmt_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
+			   struct ol_txrx_vdev_t *vdev,
+			   qdf_nbuf_t tx_mgmt_frm,
+			   struct ol_txrx_msdu_info_t *tx_msdu_info);
+
+int ol_txrx_mgmt_send_frame(struct ol_txrx_vdev_t *vdev,
+			    struct ol_tx_desc_t *tx_desc,
+			    qdf_nbuf_t tx_mgmt_frm,
+			    struct ol_txrx_msdu_info_t *tx_msdu_info,
+			    uint16_t chanfreq);
+
+#ifdef CONFIG_HL_SUPPORT
+static inline
+uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
+{
+	return ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
+}
+#else
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
-void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
-uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev);
 static inline
 uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
 {
 	return pdev->num_msdu_desc;
 }
+#else
+static inline
+uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
+{
+	return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
+}
+#endif
+#endif
 
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
+uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev);
 QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
 				     tx_pause_callback pause_cb);
 /**
@@ -202,18 +307,6 @@ QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
  */
 bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *vdev);
 #else
-/**
- * ol_tx_get_desc_global_pool_size() - get global pool size
- * @pdev: pdev handle
- *
- * Return: global pool size
- */
-static inline
-uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
-{
-	return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
-}
-
 /**
  * ol_tx_get_total_free_desc() - get total free descriptors
  * @pdev: pdev handle
@@ -259,4 +352,10 @@ struct ol_txrx_stats_req_internal
 	*ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
 				       uint8_t desc_id);
 
+#ifdef QCA_HL_NETDEV_FLOW_CONTROL
+int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc,
+				     tx_pause_callback flowcontrol);
+int ol_txrx_set_vdev_os_queue_status(u8 vdev_id, enum netif_action_type action);
+int ol_txrx_set_vdev_tx_desc_limit(u8 vdev_id, u8 chan);
+#endif
 #endif /* _OL_TXRX__H_ */