Bläddra i källkod

qcacld-3.0: Add txrx apis for High Latency systems (Part 2 - HL Datapath)

Add tx schedular module, tx classify module within the data SW,
tx frame queues logging,  group credit support and
send-recieve tx frames instance for HL system.

CRs-Fixed: 975526
Change-Id: If1655d4d832f88e565ab946ef9e9719f256ab7b1
Siddarth Poddar 9 år sedan
förälder
incheckning
b2011f6435

+ 133 - 24
core/dp/ol/inc/ol_txrx_ctrl_api.h

@@ -68,6 +68,10 @@
 #define WLAN_HDD_NETIF_OPER_HISTORY 4
 #define WLAN_DUMP_TX_FLOW_POOL_INFO 5
 #define WLAN_TXRX_DESC_STATS  6
+#define WLAN_SCHEDULER_STATS        21
+#define WLAN_TX_QUEUE_STATS         22
+#define WLAN_BUNDLE_STATS           23
+#define WLAN_CREDIT_STATS           24
 
 /**
  * @brief Set up the data SW subsystem.
@@ -156,6 +160,7 @@ struct ol_tx_wmm_param_t {
 	struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
 };
 
+#if defined(CONFIG_HL_SUPPORT)
 /**
  * @brief Set paramters of WMM scheduler per AC settings.  .
  * @details
@@ -164,23 +169,9 @@ struct ol_tx_wmm_param_t {
  * @param data_pdev - the physical device being paused
  * @param wmm_param - the wmm parameters
  */
-#define ol_txrx_set_wmm_param(data_pdev, wmm_param)     /* no-op */
-
-/**
- * @brief notify tx data SW that a peer's transmissions are suspended.
- * @details
- *  This function applies only to HL systems - in LL systems, tx flow control
- *  is handled entirely within the target FW.
- *  The HL host tx data SW is doing tx classification and tx download
- *  scheduling, and therefore also needs to actively participate in tx
- *  flow control.  Specifically, the HL tx data SW needs to check whether a
- *  given peer is available to transmit to, or is paused.
- *  This function is used to tell the HL tx data SW when a peer is paused,
- *  so the host tx data SW can hold the tx frames for that SW.
- *
- * @param data_peer - which peer is being paused
- */
-#define ol_txrx_peer_pause(data_peer)   /* no-op */
+void
+ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+		      struct ol_tx_wmm_param_t wmm_param);
 
 /**
  * @brief notify tx data SW that a peer-TID is ready to transmit to.
@@ -204,7 +195,9 @@ struct ol_tx_wmm_param_t {
  * @param tid - which TID within the peer is being unpaused, or -1 as a
  *      wildcard to unpause all TIDs within the peer
  */
-#define ol_txrx_peer_tid_unpause(data_peer, tid)        /* no-op */
+void
+ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid);
+
 
 /**
  * @brief Tell a paused peer to release a specified number of tx frames.
@@ -230,8 +223,9 @@ struct ol_tx_wmm_param_t {
  * @param max_frms - limit on the number of tx frames to release from the
  *      specified TID's queues within the specified peer
  */
-#define ol_txrx_tx_release(peer, tid_mask, max_frms)    /* no-op */
-
+void ol_txrx_tx_release(ol_txrx_peer_handle peer,
+			u_int32_t tid_mask,
+			int max_frms);
 
 /**
  * @brief Suspend all tx data per thermal event/timer for the
@@ -240,7 +234,9 @@ struct ol_tx_wmm_param_t {
  *  This function applies only to HL systerms, and it makes pause and
  * unpause operations happen in pairs.
  */
-#define ol_txrx_throttle_pause(data_pdev)       /* no-op */
+void
+ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev);
+
 
 /**
  * @brief Resume all tx data per thermal event/timer for the
@@ -249,7 +245,64 @@ struct ol_tx_wmm_param_t {
  *  This function applies only to HL systerms, and it makes pause and
  * unpause operations happen in pairs.
  */
-#define ol_txrx_throttle_unpause(data_pdev)     /* no-op */
+void
+ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev);
+
+#else
+
+static inline
+void ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+		      struct ol_tx_wmm_param_t wmm_param)
+{
+	return;
+}
+
+static inline void
+ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid)
+{
+	return;
+}
+
+static inline void
+ol_txrx_tx_release(ol_txrx_peer_handle peer,
+		   u_int32_t tid_mask,
+		   int max_frms)
+{
+	return;
+}
+
+static inline void
+ol_txrx_throttle_pause(ol_txrx_pdev_handle data_pdev)
+{
+	return;
+}
+
+static inline void
+ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev)
+{
+	return;
+}
+
+#endif /* CONFIG_HL_SUPPORT */
+
+/**
+ * @brief notify tx data SW that a peer's transmissions are suspended.
+ * @details
+ *  This function applies only to HL systems - in LL systems, tx flow control
+ *  is handled entirely within the target FW.
+ *  The HL host tx data SW is doing tx classification and tx download
+ *  scheduling, and therefore also needs to actively participate in tx
+ *  flow control.  Specifically, the HL tx data SW needs to check whether a
+ *  given peer is available to transmit to, or is paused.
+ *  This function is used to tell the HL tx data SW when a peer is paused,
+ *  so the host tx data SW can hold the tx frames for that SW.
+ *
+ * @param data_peer - which peer is being paused
+ */
+static inline void ol_txrx_peer_pause(struct ol_txrx_peer_t *data_peer)
+{
+	return;
+}
 
 /**
  * @brief Suspend all tx data for the specified physical device.
@@ -263,7 +316,9 @@ struct ol_tx_wmm_param_t {
  *
  * @param data_pdev - the physical device being paused
  */
-#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
+		defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
+
 void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason);
 #else
 static inline
@@ -281,7 +336,9 @@ void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason)
  *
  * @param data_pdev - the physical device being unpaused
  */
-#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
+		defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
+
 void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason);
 #else
 static inline
@@ -488,6 +545,58 @@ int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
  */
 #define QCA_TX_DELAY_HIST_REPORT_BINS 6
 
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+
+/**
+ * @brief Configure the bad peer tx limit setting.
+ * @details
+ *
+ * @param pdev - the physics device
+ */
+void
+ol_txrx_bad_peer_txctl_set_setting(
+	struct ol_txrx_pdev_t *pdev,
+	int enable,
+	int period,
+	int txq_limit);
+
+/**
+ * @brief Configure the bad peer tx threshold limit
+ * @details
+ *
+ * @param pdev - the physics device
+ */
+void
+ol_txrx_bad_peer_txctl_update_threshold(
+	struct ol_txrx_pdev_t *pdev,
+	int level,
+	int tput_thresh,
+	int tx_limit);
+
+#else
+
+static inline void
+ol_txrx_bad_peer_txctl_set_setting(
+	struct ol_txrx_pdev_t *pdev,
+	int enable,
+	int period,
+	int txq_limit)
+{
+	return;
+}
+
+static inline void
+ol_txrx_bad_peer_txctl_update_threshold(
+	struct ol_txrx_pdev_t *pdev,
+	int level,
+	int tput_thresh,
+	int tx_limit)
+{
+	return;
+}
+#endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
+
+
 void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
 			  struct ol_txrx_peer_t *peer);
 

+ 20 - 1
core/dp/ol/inc/ol_txrx_dbg.h

@@ -134,7 +134,26 @@ void ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev, int just_once);
 /* uncomment this to enable the tx queue log feature */
 /* #define ENABLE_TX_QUEUE_LOG 1 */
 
-#define ol_tx_queue_log_display(pdev)
+#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
+
+void
+ol_tx_queue_log_display(ol_txrx_pdev_handle pdev);
+void ol_tx_queue_log_clear(ol_txrx_pdev_handle pdev);
+#else
+
+static inline void
+ol_tx_queue_log_display(ol_txrx_pdev_handle pdev)
+{
+	return;
+}
+
+static inline
+void ol_tx_queue_log_clear(ol_txrx_pdev_handle pdev)
+{
+	return;
+}
+#endif /* defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) */
+
 
 /*----------------------------------------*/
 

+ 50 - 5
core/dp/txrx/ol_cfg.c

@@ -57,6 +57,36 @@ void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
 }
 #endif
 
+#ifdef CONFIG_HL_SUPPORT
+
+/**
+ * ol_pdev_cfg_param_update() - assign download size of tx frame for txrx
+ *				    pdev that will be used across datapath
+ * @cfg_ctx: ptr to config parameter for txrx pdev
+ *
+ * Return: None
+ */
+static inline
+void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
+{
+	cfg_ctx->is_high_latency = 1;
+	/* 802.1Q and SNAP / LLC headers are accounted for elsewhere */
+	cfg_ctx->tx_download_size = 1500;
+	cfg_ctx->tx_free_at_download = 0;
+}
+#else
+
+static inline
+void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
+{
+	/*
+	 * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
+	 * Include payload, up to the end of UDP header for IPv4 case
+	 */
+	cfg_ctx->tx_download_size = 16;
+}
+#endif
+
 #if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
 static inline
 uint8_t ol_defrag_timeout_check(void)
@@ -86,6 +116,7 @@ uint8_t ol_defrag_timeout_check(void)
  *
  * Return: the control device object
  */
+
 ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
 				  struct txrx_pdev_cfg_param_t cfg_param)
 {
@@ -97,11 +128,8 @@ ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
 		return NULL;
 	}
 
-	/*
-	 * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
-	 * Include payload, up to the end of UDP header for IPv4 case
-	 */
-	cfg_ctx->tx_download_size = 16;
+	ol_pdev_cfg_param_update(cfg_ctx);
+
 	/* temporarily diabled PN check for Riva/Pronto */
 	cfg_ctx->rx_pn_check = 1;
 	cfg_ctx->defrag_timeout_check = ol_defrag_timeout_check();
@@ -248,6 +276,21 @@ int ol_cfg_tx_free_at_download(ol_pdev_handle pdev)
 	return cfg->tx_free_at_download;
 }
 
+void ol_cfg_set_tx_free_at_download(ol_pdev_handle pdev)
+{
+	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+	cfg->tx_free_at_download = 1;
+}
+
+
+#ifdef CONFIG_HL_SUPPORT
+uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
+{
+	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+	return cfg->target_tx_credit;
+}
+#else
+
 uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
 {
 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
@@ -259,6 +302,7 @@ uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
 
 	return rc;
 }
+#endif
 
 int ol_cfg_tx_download_size(ol_pdev_handle pdev)
 {
@@ -326,6 +370,7 @@ int ol_cfg_get_tx_flow_start_queue_offset(ol_pdev_handle pdev)
 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
 	return cfg->tx_flow_start_queue_offset;
 }
+
 #endif
 
 #ifdef IPA_OFFLOAD

+ 4 - 0
core/dp/txrx/ol_rx.c

@@ -1019,6 +1019,10 @@ ol_rx_deliver(struct ol_txrx_vdev_t *vdev,
 		qdf_nbuf_t next = qdf_nbuf_next(msdu);
 
 		rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
+		/* for HL, point to payload right now*/
+		if (pdev->cfg.is_high_latency)
+			qdf_nbuf_pull_head(msdu,
+				htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc));
 
 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
 		info.is_msdu_cmpl_mpdu =

+ 228 - 5
core/dp/txrx/ol_rx_defrag.c

@@ -104,16 +104,178 @@ const struct ol_rx_defrag_cipher f_wep = {
 	0,
 };
 
-inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * ol_rx_frag_get_mac_hdr() - retrieve mac header
+ * @htt_pdev: pointer to htt pdev handle
+ * @frag: rx fragment
+ *
+ * Return: pointer to ieee mac header of frag
+ */
+static struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
+	htt_pdev_handle htt_pdev, qdf_nbuf_t frag)
+{
+	void *rx_desc;
+	int rx_desc_len;
+
+	rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag);
+	rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc);
+	return (struct ieee80211_frame *)(qdf_nbuf_data(frag) + rx_desc_len);
+}
+
+/**
+ * ol_rx_frag_pull_hdr() - point to payload of rx frag
+ * @htt_pdev: pointer to htt pdev handle
+ * @frag: rx fragment
+ * @hdrsize: header size
+ *
+ * Return: None
+ */
+static void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,
+	qdf_nbuf_t frag, int hdrsize)
+{
+	void *rx_desc;
+	int rx_desc_len;
+
+	rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag);
+	rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc);
+	qdf_nbuf_pull_head(frag, rx_desc_len + hdrsize);
+}
+
+/**
+ * ol_rx_frag_clone() - clone the rx frag
+ * @frag: rx fragment to clone from
+ *
+ * Return: cloned buffer
+ */
+static inline qdf_nbuf_t
+ol_rx_frag_clone(qdf_nbuf_t frag)
+{
+	return qdf_nbuf_clone(frag);
+}
+
+/**
+ * ol_rx_frag_desc_adjust() - adjust rx frag descriptor position
+ * @pdev: pointer to txrx handle
+ * @msdu: msdu
+ * @rx_desc_old_position: rx descriptor old position
+ * @ind_old_position:index of old position
+ * @rx_desc_len: rx desciptor length
+ *
+ * Return: None
+ */
+static void
+ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
+		       qdf_nbuf_t msdu,
+			void **rx_desc_old_position,
+			void **ind_old_position, int *rx_desc_len)
+{
+	*rx_desc_old_position = htt_rx_msdu_desc_retrieve(pdev->htt_pdev,
+									msdu);
+	*ind_old_position = *rx_desc_old_position - HTT_RX_IND_HL_BYTES;
+	*rx_desc_len = htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev,
+			*rx_desc_old_position);
+}
+
+/**
+ * ol_rx_frag_restructure() - point to payload for HL
+ * @pdev: physical device object
+ * @msdu: the buffer containing the MSDU payload
+ * @rx_desc_old_position: rx MSDU descriptor
+ * @ind_old_position: rx msdu indication
+ * @f_type: pointing to rx defrag cipher
+ * @rx_desc_len: length by which rx descriptor to move
+ *
+ * Return: None
+ */
+static void
+ol_rx_frag_restructure(
+	ol_txrx_pdev_handle pdev,
+	qdf_nbuf_t msdu,
+	void *rx_desc_old_position,
+	void *ind_old_position,
+	const struct ol_rx_defrag_cipher *f_type,
+	int rx_desc_len)
+{
+	if ((ind_old_position == NULL) || (rx_desc_old_position == NULL)) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			   "ind_old_position,rx_desc_old_position is NULL\n");
+		ASSERT(0);
+		return;
+	}
+	/* move rx description*/
+	qdf_mem_move(rx_desc_old_position + f_type->ic_header,
+		     rx_desc_old_position, rx_desc_len);
+	/* move rx indication*/
+	qdf_mem_move(ind_old_position + f_type->ic_header, ind_old_position,
+		     HTT_RX_IND_HL_BYTES);
+}
+
+/**
+ * ol_rx_get_desc_len() - point to payload for HL
+ * @htt_pdev: the HTT instance the rx data was received on
+ * @wbuf: buffer containing the MSDU payload
+ * @rx_desc_old_position: rx MSDU descriptor
+ *
+ * Return: Return the HL rx desc size
+ */
+static
+int ol_rx_get_desc_len(htt_pdev_handle htt_pdev,
+			qdf_nbuf_t wbuf,
+			void **rx_desc_old_position)
+{
+	int rx_desc_len = 0;
+	*rx_desc_old_position = htt_rx_msdu_desc_retrieve(htt_pdev, wbuf);
+	rx_desc_len = htt_rx_msdu_rx_desc_size_hl(htt_pdev,
+			*rx_desc_old_position);
+
+	return rx_desc_len;
+}
+
+/**
+ * ol_rx_defrag_push_rx_desc() - point to payload for HL
+ * @nbuf: buffer containing the MSDU payload
+ * @rx_desc_old_position: rx MSDU descriptor
+ * @ind_old_position: rx msdu indication
+ * @rx_desc_len: HL rx desc size
+ *
+ * Return: Return the HL rx desc size
+ */
+static
+void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,
+				void *rx_desc_old_position,
+				void *ind_old_position,
+				int rx_desc_len)
+{
+	qdf_nbuf_push_head(nbuf, rx_desc_len);
+	qdf_mem_move(
+		qdf_nbuf_data(nbuf), rx_desc_old_position, rx_desc_len);
+	qdf_mem_move(
+		qdf_nbuf_data(nbuf) - HTT_RX_IND_HL_BYTES, ind_old_position,
+		HTT_RX_IND_HL_BYTES);
+}
+#else
+
+static inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
 	htt_pdev_handle htt_pdev,
 	qdf_nbuf_t frag)
 {
 	return
 		(struct ieee80211_frame *) qdf_nbuf_data(frag);
 }
-#define ol_rx_frag_pull_hdr(pdev, frag, hdrsize) \
+
+static inline void ol_rx_frag_pull_hdr(htt_pdev_handle htt_pdev,
+	qdf_nbuf_t frag, int hdrsize)
+{
 	qdf_nbuf_pull_head(frag, hdrsize);
-#define OL_RX_FRAG_CLONE(frag) NULL     /* no-op */
+}
+
+static inline qdf_nbuf_t
+ol_rx_frag_clone(qdf_nbuf_t frag)
+{
+	return NULL;
+}
 
 static inline void
 ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
@@ -126,6 +288,38 @@ ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
 	*rx_desc_len = 0;
 }
 
+static inline void
+ol_rx_frag_restructure(
+		ol_txrx_pdev_handle pdev,
+		qdf_nbuf_t msdu,
+		void *rx_desc_old_position,
+		void *ind_old_position,
+		const struct ol_rx_defrag_cipher *f_type,
+		int rx_desc_len)
+{
+	/* no op */
+	return;
+}
+
+static inline
+int ol_rx_get_desc_len(htt_pdev_handle htt_pdev,
+			qdf_nbuf_t wbuf,
+			void **rx_desc_old_position)
+{
+	return 0;
+}
+
+static inline
+void ol_rx_defrag_push_rx_desc(qdf_nbuf_t nbuf,
+			void *rx_desc_old_position,
+			void *ind_old_position,
+			int rx_desc_len)
+{
+	return;
+}
+#endif /* CONFIG_HL_SUPPORT */
+
+
 /*
  * Process incoming fragments
  */
@@ -302,7 +496,7 @@ ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
 	qdf_nbuf_t frag_clone;
 
 	qdf_assert(frag);
-	frag_clone = OL_RX_FRAG_CLONE(frag);
+	frag_clone = ol_rx_frag_clone(frag);
 	frag = frag_clone ? frag_clone : frag;
 
 	mac_hdr = (struct ieee80211_frame *)
@@ -608,6 +802,13 @@ ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
 		return OL_RX_DEFRAG_ERR;
 
 	qdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
+	ol_rx_frag_restructure(
+			pdev,
+			msdu,
+			rx_desc_old_position,
+			ind_old_position,
+			&f_tkip,
+			rx_desc_len);
 	qdf_nbuf_pull_head(msdu, f_tkip.ic_header);
 	qdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
 	return OL_RX_DEFRAG_OK;
@@ -630,6 +831,13 @@ ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t hdrlen)
 			       &ind_old_position, &rx_desc_len);
 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
 	qdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
+	ol_rx_frag_restructure(
+			pdev,
+			msdu,
+			rx_desc_old_position,
+			ind_old_position,
+			&f_wep,
+			rx_desc_len);
 	qdf_nbuf_pull_head(msdu, f_wep.ic_header);
 	qdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
 	return OL_RX_DEFRAG_OK;
@@ -694,6 +902,13 @@ ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
 		return OL_RX_DEFRAG_ERR;
 
 	qdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
+	ol_rx_frag_restructure(
+			pdev,
+			nbuf,
+			rx_desc_old_position,
+			ind_old_position,
+			&f_ccmp,
+			rx_desc_len);
 	qdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
 
 	return OL_RX_DEFRAG_OK;
@@ -788,6 +1003,7 @@ ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
 	void *rx_desc_old_position = NULL;
 	void *ind_old_position = NULL;
 	int rx_desc_len = 0;
+	htt_pdev_handle htt_pdev = pdev->htt_pdev;
 
 	ol_rx_frag_desc_adjust(pdev,
 			       wbuf,
@@ -831,7 +1047,8 @@ ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
 		if (wbuf == NULL)
 			return OL_RX_DEFRAG_ERR;
 
-		rx_desc_len = 0;
+		rx_desc_len = ol_rx_get_desc_len(htt_pdev, wbuf,
+						 &rx_desc_old_position);
 
 		if (space != 0) {
 			const uint8_t *data_next;
@@ -1008,6 +1225,9 @@ void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, qdf_nbuf_t msdu)
 
 	qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
 		     sizeof(llchdr.ethertype));
+
+	ol_rx_defrag_push_rx_desc(msdu, rx_desc_old_position,
+					  ind_old_position, rx_desc_len);
 }
 
 /*
@@ -1058,5 +1278,8 @@ ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
 		if (wh)
 			wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
 
+		ol_rx_defrag_push_rx_desc(nbuf, rx_desc_old_position,
+					  ind_old_position, rx_desc_len);
+
 	}
 }

+ 14 - 1
core/dp/txrx/ol_rx_fwd.c

@@ -121,7 +121,18 @@ static inline void ol_rx_fwd_to_tx(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu)
 	 */
 	qdf_nbuf_set_next(msdu, NULL);  /* add NULL terminator */
 
-	msdu = OL_TX_LL(vdev, msdu);
+	/* for HL, point to payload before send to tx again.*/
+		if (pdev->cfg.is_high_latency) {
+			void *rx_desc;
+
+			rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev,
+							    msdu);
+			qdf_nbuf_pull_head(msdu,
+				htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev,
+							    rx_desc));
+		}
+
+	msdu = OL_TX_SEND(vdev, msdu);
 
 	if (msdu) {
 		/*
@@ -131,6 +142,7 @@ static inline void ol_rx_fwd_to_tx(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu)
 		 */
 		qdf_nbuf_tx_free(msdu, QDF_NBUF_PKT_ERROR);
 	}
+	return;
 }
 
 void
@@ -232,6 +244,7 @@ ol_rx_fwd_check(struct ol_txrx_vdev_t *vdev,
 			ol_rx_deliver(vdev, peer, tid, deliver_list_head);
 		}
 	}
+	return;
 }
 
 /*

+ 459 - 49
core/dp/txrx/ol_tx.c

@@ -41,7 +41,10 @@
 #include <ol_txrx.h>
 
 /* internal header files relevant only for HL systems */
+#include <ol_tx_classify.h>   /* ol_tx_classify, ol_tx_classify_mgmt */
 #include <ol_tx_queue.h>        /* ol_tx_enqueue */
+#include <ol_tx_sched.h>      /* ol_tx_sched */
+
 
 /* internal header files relevant only for specific systems (Pronto) */
 #include <ol_txrx_encap.h>      /* OL_TX_ENCAP, etc */
@@ -165,7 +168,7 @@ qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb)
 
 	/* Terminate the (single-element) list of tx frames */
 	qdf_nbuf_set_next(skb, NULL);
-	ret = OL_TX_LL(vdev, skb);
+	ret = OL_TX_SEND(vdev, skb);
 	if (ret) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
 			"%s: Failed to tx", __func__);
@@ -202,7 +205,7 @@ qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
 
 	/* Terminate the (single-element) list of tx frames */
 	qdf_nbuf_set_next(skb, NULL);
-	ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
+	ret = OL_TX_SEND((struct ol_txrx_vdev_t *)vdev, skb);
 	if (ret) {
 		TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
 			"%s: Failed to tx", __func__);
@@ -1041,7 +1044,8 @@ static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
 
 qdf_nbuf_t
 ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
-		 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
+		 enum ol_tx_spec tx_spec,
+		 qdf_nbuf_t msdu_list)
 {
 	qdf_nbuf_t msdu = msdu_list;
 	htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
@@ -1082,14 +1086,14 @@ ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
 				uint8_t sub_type =
 					ol_txrx_tx_raw_subtype(tx_spec);
 				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
-						 htt_pkt_type_native_wifi,
-						 sub_type);
+						htt_pkt_type_native_wifi,
+						sub_type);
 			} else if (ol_txrx_tx_is_raw(tx_spec)) {
 				/* different types of raw frames */
 				uint8_t sub_type =
 					ol_txrx_tx_raw_subtype(tx_spec);
 				htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
-						 htt_pkt_type_raw, sub_type);
+						htt_pkt_type_raw, sub_type);
 			}
 		}
 		/*
@@ -1125,9 +1129,10 @@ ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
 
 /**
  * parse_ocb_tx_header() - Function to check for OCB
- * TX control header on a packet and extract it if present
- *
  * @msdu:   Pointer to OS packet (qdf_nbuf_t)
+ * @tx_ctrl: TX control header on a packet and extract it if present
+ *
+ * Return: true if ocb parsing is successful
  */
 #define OCB_HEADER_VERSION     1
 bool parse_ocb_tx_header(qdf_nbuf_t msdu,
@@ -1137,7 +1142,7 @@ bool parse_ocb_tx_header(qdf_nbuf_t msdu,
 	struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
 
 	/* Check if TX control header is present */
-	eth_hdr_p = (struct ether_header *) qdf_nbuf_data(msdu);
+	eth_hdr_p = (struct ether_header *)qdf_nbuf_data(msdu);
 	if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
 		/* TX control header is not present. Nothing to do.. */
 		return true;
@@ -1146,12 +1151,12 @@ bool parse_ocb_tx_header(qdf_nbuf_t msdu,
 	qdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
 
 	/* Parse the TX control header */
-	tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) qdf_nbuf_data(msdu);
+	tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
 
 	if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
 		if (tx_ctrl)
 			qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
-				sizeof(*tx_ctrl_hdr));
+				     sizeof(*tx_ctrl_hdr));
 	} else {
 		/* The TX control header is invalid. */
 		return false;
@@ -1162,6 +1167,440 @@ bool parse_ocb_tx_header(qdf_nbuf_t msdu,
 	return true;
 }
 
+
+#if defined(CONFIG_HL_SUPPORT) && defined(CONFIG_TX_DESC_HI_PRIO_RESERVE)
+
+/**
+ * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
+ *			   for a HL system.
+ * @pdev: the data physical device sending the data
+ * @vdev: the virtual device sending the data
+ * @msdu: the tx frame
+ * @msdu_info: the tx meta data
+ *
+ * Return: the tx decriptor
+ */
+static inline
+struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_vdev_t *vdev,
+	qdf_nbuf_t msdu,
+	struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc = NULL;
+
+	if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
+			TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
+		tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+	} else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
+		if ((qdf_nbuf_is_ipv4_dhcp_pkt(msdu) == true) ||
+		    (qdf_nbuf_is_ipv4_eapol_pkt(msdu) == true)) {
+			tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+				   "Provided tx descriptor from reserve pool for DHCP/EAPOL\n");
+		}
+	}
+	return tx_desc;
+}
+#else
+
+static inline
+struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_vdev_t *vdev,
+	qdf_nbuf_t msdu,
+	struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc = NULL;
+	tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+	return tx_desc;
+}
+#endif
+
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
+ *				 for management frame
+ * @pdev: the data physical device sending the data
+ * @vdev: the virtual device sending the data
+ * @tx_mgmt_frm: the tx managment frame
+ * @tx_msdu_info: the tx meta data
+ *
+ * Return: the tx decriptor
+ */
+static inline
+struct ol_tx_desc_t *
+ol_txrx_mgmt_tx_desc_alloc(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_vdev_t *vdev,
+	qdf_nbuf_t tx_mgmt_frm,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc;
+	tx_msdu_info->htt.action.tx_comp_req = 1;
+	tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
+	return tx_desc;
+}
+
+/**
+ * ol_txrx_mgmt_send_frame() - send a management frame
+ * @vdev: virtual device sending the frame
+ * @tx_desc: tx desc
+ * @tx_mgmt_frm: management frame to send
+ * @tx_msdu_info: the tx meta data
+ * @chanfreq: download change frequency
+ *
+ * Return:
+ *      0 -> the frame is accepted for transmission, -OR-
+ *      1 -> the frame was not accepted
+ */
+static inline
+int ol_txrx_mgmt_send_frame(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t tx_mgmt_frm,
+	struct ol_txrx_msdu_info_t *tx_msdu_info,
+	uint16_t chanfreq)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	struct ol_tx_frms_queue_t *txq;
+	/*
+	 * 1.  Look up the peer and queue the frame in the peer's mgmt queue.
+	 * 2.  Invoke the download scheduler.
+	 */
+	txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
+	if (!txq) {
+		/*TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
+								msdu);*/
+		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
+		ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
+					     1 /* error */);
+		if (tx_msdu_info->peer) {
+			/* remove the peer reference added above */
+			ol_txrx_peer_unref_delete(tx_msdu_info->peer);
+		}
+		return 1; /* can't accept the tx mgmt frame */
+	}
+	/* Initialize the HTT tx desc l2 header offset field.
+	 * Even though tx encap does not apply to mgmt frames,
+	 * htt_tx_desc_mpdu_header still needs to be called,
+	 * to specifiy that there was no L2 header added by tx encap,
+	 * so the frame's length does not need to be adjusted to account for
+	 * an added L2 header.
+	 */
+	htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
+	htt_tx_desc_init(
+			pdev->htt_pdev, tx_desc->htt_tx_desc,
+			tx_desc->htt_tx_desc_paddr,
+			ol_tx_desc_id(pdev, tx_desc),
+			tx_mgmt_frm,
+			&tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0);
+	htt_tx_desc_display(tx_desc->htt_tx_desc);
+	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
+
+	ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
+	if (tx_msdu_info->peer) {
+		/* remove the peer reference added above */
+		ol_txrx_peer_unref_delete(tx_msdu_info->peer);
+	}
+	ol_tx_sched(vdev->pdev);
+
+	return 0;
+}
+
+#else
+
+static inline
+struct ol_tx_desc_t *
+ol_txrx_mgmt_tx_desc_alloc(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_vdev_t *vdev,
+	qdf_nbuf_t tx_mgmt_frm,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc;
+	/* For LL tx_comp_req is not used so initialized to 0 */
+	tx_msdu_info->htt.action.tx_comp_req = 0;
+	tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
+	/* FIX THIS -
+	 * The FW currently has trouble using the host's fragments table
+	 * for management frames.  Until this is fixed, rather than
+	 * specifying the fragment table to the FW, specify just the
+	 * address of the initial fragment.
+	 */
+#if defined(HELIUMPLUS_PADDR64)
+	/* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
+	   tx_desc); */
+#endif /* defined(HELIUMPLUS_PADDR64) */
+	if (tx_desc) {
+		/*
+		 * Following the call to ol_tx_desc_ll, frag 0 is the
+		 * HTT tx HW descriptor, and the frame payload is in
+		 * frag 1.
+		 */
+		htt_tx_desc_frags_table_set(
+				pdev->htt_pdev,
+				tx_desc->htt_tx_desc,
+				qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
+				0, 0);
+#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
+		dump_frag_desc(
+				"after htt_tx_desc_frags_table_set",
+				tx_desc);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+	}
+
+	return tx_desc;
+}
+
+static inline
+int ol_txrx_mgmt_send_frame(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t tx_mgmt_frm,
+	struct ol_txrx_msdu_info_t *tx_msdu_info,
+	uint16_t chanfreq)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
+	QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
+					QDF_NBUF_TX_PKT_MGMT_TRACK;
+	ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
+			  htt_pkt_type_mgmt);
+
+	return 0;
+}
+#endif
+
+/**
+ * ol_tx_hl_base() - send tx frames for a HL system.
+ * @vdev: the virtual device sending the data
+ * @tx_spec: indicate what non-standard transmission actions to apply
+ * @msdu_list: the tx frames to send
+ * @tx_comp_req: tx completion req
+ *
+ * Return: NULL if all MSDUs are accepted
+ */
+static inline qdf_nbuf_t
+ol_tx_hl_base(
+	ol_txrx_vdev_handle vdev,
+	enum ol_tx_spec tx_spec,
+	qdf_nbuf_t msdu_list,
+	int tx_comp_req)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	qdf_nbuf_t msdu = msdu_list;
+	struct ol_txrx_msdu_info_t tx_msdu_info;
+	struct ocb_tx_ctrl_hdr_t tx_ctrl;
+
+	htt_pdev_handle htt_pdev = pdev->htt_pdev;
+	tx_msdu_info.peer = NULL;
+	tx_msdu_info.tso_info.is_tso = 0;
+
+	/*
+	 * The msdu_list variable could be used instead of the msdu var,
+	 * but just to clarify which operations are done on a single MSDU
+	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
+	 * within the list.
+	 */
+	while (msdu) {
+		qdf_nbuf_t next;
+		struct ol_tx_frms_queue_t *txq;
+		struct ol_tx_desc_t *tx_desc = NULL;
+
+		qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
+
+		/*
+		 * The netbuf will get stored into a (peer-TID) tx queue list
+		 * inside the ol_tx_classify_store function or else dropped,
+		 * so store the next pointer immediately.
+		 */
+		next = qdf_nbuf_next(msdu);
+
+		tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
+
+		if (!tx_desc) {
+			/*
+			 * If we're out of tx descs, there's no need to try
+			 * to allocate tx descs for the remaining MSDUs.
+			 */
+			TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
+						  msdu);
+			return msdu; /* the list of unaccepted MSDUs */
+		}
+
+		/* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
+
+		if (tx_spec != OL_TX_SPEC_STD) {
+#if defined(FEATURE_WLAN_TDLS)
+			if (tx_spec & OL_TX_SPEC_NO_FREE) {
+				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
+			} else if (tx_spec & OL_TX_SPEC_TSO) {
+#else
+				if (tx_spec & OL_TX_SPEC_TSO) {
+#endif
+					tx_desc->pkt_type = OL_TX_FRM_TSO;
+				}
+				if (ol_txrx_tx_is_raw(tx_spec)) {
+					/* CHECK THIS: does this need
+					 * to happen after htt_tx_desc_init?
+					 */
+					/* different types of raw frames */
+					u_int8_t sub_type =
+						ol_txrx_tx_raw_subtype(
+								tx_spec);
+					htt_tx_desc_type(htt_pdev,
+							 tx_desc->htt_tx_desc,
+							 htt_pkt_type_raw,
+							 sub_type);
+				}
+			}
+
+			tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
+			tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
+			tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
+			tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
+			tx_msdu_info.htt.action.tx_comp_req = tx_comp_req;
+
+			/* If the vdev is in OCB mode,
+			 * parse the tx control header.
+			 */
+			if (vdev->opmode == wlan_op_mode_ocb) {
+				if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
+					/* There was an error parsing
+					 * the header.Skip this packet.
+					 */
+					goto MSDU_LOOP_BOTTOM;
+				}
+			}
+
+			txq = ol_tx_classify(vdev, tx_desc, msdu,
+							&tx_msdu_info);
+
+			if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
+				/* drop this frame,
+				 * but try sending subsequent frames
+				 */
+				/*TXRX_STATS_MSDU_LIST_INCR(pdev,
+							tx.dropped.no_txq,
+							msdu);*/
+				qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
+				ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
+				if (tx_msdu_info.peer) {
+					/* remove the peer reference
+					 * added above */
+					ol_txrx_peer_unref_delete(
+							tx_msdu_info.peer);
+				}
+				goto MSDU_LOOP_BOTTOM;
+			}
+
+			if (tx_msdu_info.peer) {
+				/*If the state is not associated then drop all
+				 *the data packets received for that peer*/
+				if (tx_msdu_info.peer->state ==
+						OL_TXRX_PEER_STATE_DISC) {
+					qdf_atomic_inc(
+						&pdev->tx_queue.rsrc_cnt);
+					ol_tx_desc_frame_free_nonstd(pdev,
+								     tx_desc,
+								     1);
+					ol_txrx_peer_unref_delete(
+							tx_msdu_info.peer);
+					msdu = next;
+					continue;
+				} else if (tx_msdu_info.peer->state !=
+						OL_TXRX_PEER_STATE_AUTH) {
+					if (tx_msdu_info.htt.info.ethertype !=
+						ETHERTYPE_PAE &&
+						tx_msdu_info.htt.info.ethertype
+							!= ETHERTYPE_WAI) {
+						qdf_atomic_inc(
+							&pdev->tx_queue.
+								rsrc_cnt);
+						ol_tx_desc_frame_free_nonstd(
+								pdev,
+								tx_desc, 1);
+						ol_txrx_peer_unref_delete(
+							tx_msdu_info.peer);
+						msdu = next;
+						continue;
+					}
+				}
+			}
+			/*
+			 * Initialize the HTT tx desc l2 header offset field.
+			 * htt_tx_desc_mpdu_header  needs to be called to
+			 * make sure, the l2 header size is initialized
+			 * correctly to handle cases where TX ENCAP is disabled
+			 * or Tx Encap fails to perform Encap
+			 */
+			htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
+
+			/*
+			 * Note: when the driver is built without support for
+			 * SW tx encap,the following macro is a no-op.
+			 * When the driver is built with support for SW tx
+			 * encap, it performs encap, and if an error is
+			 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
+			 */
+			OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu,
+					    tx_msdu_info);
+
+			/* initialize the HW tx descriptor */
+			htt_tx_desc_init(
+					pdev->htt_pdev, tx_desc->htt_tx_desc,
+					tx_desc->htt_tx_desc_paddr,
+					ol_tx_desc_id(pdev, tx_desc),
+					msdu,
+					&tx_msdu_info.htt,
+					&tx_msdu_info.tso_info,
+					&tx_ctrl,
+					vdev->opmode == wlan_op_mode_ocb);
+			/*
+			 * If debug display is enabled, show the meta-data
+			 * being downloaded to the target via the
+			 * HTT tx descriptor.
+			 */
+			htt_tx_desc_display(tx_desc->htt_tx_desc);
+
+			ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
+			if (tx_msdu_info.peer) {
+				OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
+							msdu);
+				/* remove the peer reference added above */
+				ol_txrx_peer_unref_delete(tx_msdu_info.peer);
+			}
+MSDU_LOOP_BOTTOM:
+			msdu = next;
+		}
+		ol_tx_sched(pdev);
+		return NULL; /* all MSDUs were accepted */
+}
+
+qdf_nbuf_t
+ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	int tx_comp_req = pdev->cfg.default_tx_comp_req;
+	return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
+}
+
+qdf_nbuf_t
+ol_tx_non_std_hl(ol_txrx_vdev_handle vdev,
+		 enum ol_tx_spec tx_spec,
+		 qdf_nbuf_t msdu_list)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	int tx_comp_req = pdev->cfg.default_tx_comp_req;
+
+	if (!tx_comp_req) {
+		if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
+		    (pdev->tx_data_callback.func))
+			tx_comp_req = 1;
+	}
+	return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req);
+}
+
 /**
  * ol_tx_non_std - Allow the control-path SW to send data frames
  *
@@ -1188,7 +1627,10 @@ qdf_nbuf_t
 ol_tx_non_std(ol_txrx_vdev_handle vdev,
 	      enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
 {
-	return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
+	if (vdev->pdev->cfg.is_high_latency)
+		return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
+	else
+		return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
 }
 
 void
@@ -1297,7 +1739,7 @@ ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
 	struct ol_tx_desc_t *tx_desc;
 	struct ol_txrx_msdu_info_t tx_msdu_info;
-
+	int result = 0;
 	tx_msdu_info.tso_info.is_tso = 0;
 
 	tx_msdu_info.htt.action.use_6mbps = use_6mbps;
@@ -1348,37 +1790,8 @@ ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
 
 	tx_msdu_info.peer = NULL;
 
-
-	/* For LL tx_comp_req is not used so initialized to 0 */
-	tx_msdu_info.htt.action.tx_comp_req = 0;
-	tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
-	/* FIX THIS -
-	 * The FW currently has trouble using the host's fragments table
-	 * for management frames.  Until this is fixed, rather than
-	 * specifying the fragment table to the FW, specify just the
-	 * address of the initial fragment.
-	 */
-#if defined(HELIUMPLUS_PADDR64)
-	/* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
-	   tx_desc); */
-#endif /* defined(HELIUMPLUS_PADDR64) */
-	if (tx_desc) {
-		/*
-		 * Following the call to ol_tx_desc_ll, frag 0 is the
-		 * HTT tx HW descriptor, and the frame payload is in
-		 * frag 1.
-		 */
-		htt_tx_desc_frags_table_set(
-			pdev->htt_pdev,
-			tx_desc->htt_tx_desc,
-			qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
-			0, 0);
-#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
-		dump_frag_desc(
-			"after htt_tx_desc_frags_table_set",
-			tx_desc);
-#endif /* defined(HELIUMPLUS_PADDR64) */
-	}
+	tx_desc = ol_txrx_mgmt_tx_desc_alloc(pdev, vdev, tx_mgmt_frm,
+							&tx_msdu_info);
 	if (!tx_desc)
 		return -EINVAL;       /* can't accept the tx mgmt frame */
 
@@ -1386,11 +1799,8 @@ ol_txrx_mgmt_send_ext(ol_txrx_vdev_handle vdev,
 	TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
 	tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
 
-	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
-	QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
-				QDF_NBUF_TX_PKT_MGMT_TRACK;
-	ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
-			  htt_pkt_type_mgmt);
+	result = ol_txrx_mgmt_send_frame(vdev, tx_desc, tx_mgmt_frm,
+						&tx_msdu_info, chanfreq);
 
 	return 0;               /* accepted the tx mgmt frame */
 }

+ 29 - 0
core/dp/txrx/ol_tx.h

@@ -47,6 +47,12 @@ qdf_nbuf_t ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
 
 qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
 
+#ifdef CONFIG_HL_SUPPORT
+#define OL_TX_SEND ol_tx_hl
+#else
+#define OL_TX_SEND OL_TX_LL
+#endif
+
 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
 #define OL_TX_LL ol_tx_ll_queue
 #else
@@ -67,6 +73,29 @@ void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
 	return;
 }
 #endif
+
+/**
+ * ol_tx_non_std_hl() - send non std tx frame.
+ * @vdev: the virtual device sending the data
+ * @tx_spec: indicate what non-standard transmission actions to apply
+ * @msdu_list: the tx frames to send
+ *
+ * Return: NULL if all MSDUs are accepted
+ */
+qdf_nbuf_t
+ol_tx_non_std_hl(ol_txrx_vdev_handle data_vdev,
+		 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
+
+/**
+ * ol_tx_hl() - transmit tx frames for a HL system.
+ * @vdev: the virtual device transmit the data
+ * @msdu_list: the tx frames to send
+ *
+ * Return: NULL if all MSDUs are accepted
+ */
+qdf_nbuf_t
+ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
+
 qdf_nbuf_t
 ol_tx_non_std_ll(ol_txrx_vdev_handle data_vdev,
 		 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);

+ 888 - 0
core/dp/txrx/ol_tx_classify.c

@@ -0,0 +1,888 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <qdf_nbuf.h>         /* qdf_nbuf_t, etc. */
+#include <htt.h>              /* HTT_TX_EXT_TID_MGMT */
+#include <ol_htt_tx_api.h>    /* htt_tx_desc_tid */
+#include <ol_txrx_api.h>      /* ol_txrx_vdev_handle */
+#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
+#include <ol_txrx.h>
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
+#include <ol_txrx_types.h>    /* pdev stats */
+#include <ol_tx_desc.h>       /* ol_tx_desc */
+#include <ol_tx_send.h>       /* ol_tx_send */
+#include <ol_txrx_peer_find.h>
+#include <ol_tx_classify.h>
+#include <ol_tx_queue.h>
+#include <ipv4.h>
+#include <ipv6_defs.h>
+#include <ip_prot.h>
+#include <enet.h>             /* ETHERTYPE_VLAN, etc. */
+#include <cds_ieee80211_common.h>        /* ieee80211_frame */
+
+/*
+ * In theory, this tx classify code could be used on the host or in the target.
+ * Thus, this code uses generic OS primitives, that can be aliased to either
+ * the host's OS primitives or the target's OS primitives.
+ * For now, the following #defines set up these host-specific or
+ * target-specific aliases.
+ */
+
+#if defined(CONFIG_HL_SUPPORT)
+
+#define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
+#define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
+
+#ifdef QCA_TX_HTT2_SUPPORT
+static void
+ol_tx_classify_htt2_frm(
+	struct ol_txrx_vdev_t *vdev,
+	qdf_nbuf_t tx_nbuf,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	struct htt_msdu_info_t *htt = &tx_msdu_info->htt;
+	A_UINT8 candi_frm = 0;
+
+	/*
+	 * Offload the frame re-order to L3 protocol and ONLY support
+	 * TCP protocol now.
+	 */
+	if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) &&
+	    (htt->info.frame_type == htt_frm_type_data) &&
+	    htt->info.is_unicast &&
+	    (htt->info.ethertype == ETHERTYPE_IPV4)) {
+		struct ipv4_hdr_t *ipHdr;
+
+		ipHdr = (struct ipv4_hdr_t *)(qdf_nbuf_data(tx_nbuf) +
+			htt->info.l3_hdr_offset);
+		if (ipHdr->protocol == IP_PROTOCOL_TCP)
+			candi_frm = 1;
+	}
+
+	qdf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm);
+}
+
+#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info)      \
+	ol_tx_classify_htt2_frm(vdev, netbuf, msdu_info);
+#else
+#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info)      /* no-op */
+#endif /* QCA_TX_HTT2_SUPPORT */
+/* DHCP go with voice priority; WMM_AC_VO_TID1();*/
+#define TX_DHCP_TID  6
+
+#if defined(QCA_BAD_PEER_TX_FLOW_CL)
+static inline A_BOOL
+ol_if_tx_bad_peer_txq_overflow(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_peer_t *peer,
+	struct ol_tx_frms_queue_t *txq)
+{
+	if (peer && pdev && txq && (peer->tx_limit_flag) &&
+	    (txq->frms >= pdev->tx_peer_bal.peer_bal_txq_limit))
+		return true;
+	else
+		return false;
+}
+#else
+static inline A_BOOL ol_if_tx_bad_peer_txq_overflow(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_peer_t *peer,
+	struct ol_tx_frms_queue_t *txq)
+{
+	return false;
+}
+#endif
+
+/* EAPOL go with voice priority: WMM_AC_TO_TID1(WMM_AC_VO);*/
+#define TX_EAPOL_TID  6
+
+/* ARP go with voice priority: WMM_AC_TO_TID1(pdev->arp_ac_override)*/
+#define TX_ARP_TID  6
+
+/* For non-IP case, use default TID */
+#define TX_DEFAULT_TID  0
+
+/*
+ * Determine IP TOS priority
+ * IP Tos format :
+ *        (Refer Pg 57 WMM-test-plan-v1.2)
+ * IP-TOS - 8bits
+ *            : DSCP(6-bits) ECN(2-bits)
+ *            : DSCP - P2 P1 P0 X X X
+ *                where (P2 P1 P0) form 802.1D
+ */
+static inline A_UINT8
+ol_tx_tid_by_ipv4(A_UINT8 *pkt)
+{
+	A_UINT8 ipPri, tid;
+	struct ipv4_hdr_t *ipHdr = (struct ipv4_hdr_t *)pkt;
+
+	ipPri = ipHdr->tos >> 5;
+	tid = ipPri & 0x7;
+
+	return tid;
+}
+
+static inline A_UINT8
+ol_tx_tid_by_ipv6(A_UINT8 *pkt)
+{
+	return (ipv6_traffic_class((struct ipv6_hdr_t *)pkt) >> 5) & 0x7;
+}
+
+static inline void
+ol_tx_set_ether_type(
+	A_UINT8 *datap,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	A_UINT16 typeorlength;
+	A_UINT8 *ptr;
+	A_UINT8 *l3_data_ptr;
+
+	if (tx_msdu_info->htt.info.l2_hdr_type == htt_pkt_type_raw) {
+		/* adjust hdr_ptr to RA */
+		struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
+
+		if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
+					IEEE80211_FC0_TYPE_DATA) {
+			struct llc_snap_hdr_t *llc;
+			/* dot11 encapsulated frame */
+			struct ieee80211_qosframe *whqos =
+					(struct ieee80211_qosframe *)datap;
+			if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+				tx_msdu_info->htt.info.l3_hdr_offset =
+					sizeof(struct ieee80211_qosframe);
+			} else {
+				tx_msdu_info->htt.info.l3_hdr_offset =
+					sizeof(struct ieee80211_frame);
+			}
+			llc = (struct llc_snap_hdr_t *)
+				(datap + tx_msdu_info->htt.info.l3_hdr_offset);
+			tx_msdu_info->htt.info.ethertype =
+				(llc->ethertype[0] << 8) | llc->ethertype[1];
+			/*
+			 * l3_hdr_offset refers to the end of the 802.3 or
+			 * 802.11 header, which may be a LLC/SNAP header rather
+			 * than the IP header.
+			 * Thus, don't increment l3_hdr_offset += sizeof(*llc);
+			 * rather,leave it as is.
+			 */
+		} else {
+			/*
+			 * This function should only be applied to data frames.
+			 * For management frames, we already know to use
+			 * HTT_TX_EXT_TID_MGMT.
+			 */
+			TXRX_ASSERT2(0);
+		}
+	} else if (tx_msdu_info->htt.info.l2_hdr_type ==
+					htt_pkt_type_ethernet) {
+		ptr = (datap + ETHERNET_ADDR_LEN * 2);
+		typeorlength = (ptr[0] << 8) | ptr[1];
+		/*ETHERNET_HDR_LEN;*/
+		l3_data_ptr = datap + sizeof(struct ethernet_hdr_t);
+
+		if (typeorlength == ETHERTYPE_VLAN) {
+			ptr = (datap + ETHERNET_ADDR_LEN * 2
+					+ ETHERTYPE_VLAN_LEN);
+			typeorlength = (ptr[0] << 8) | ptr[1];
+			l3_data_ptr += ETHERTYPE_VLAN_LEN;
+		}
+
+		if (!IS_ETHERTYPE(typeorlength)) {
+			/* 802.3 header*/
+			struct llc_snap_hdr_t *llc_hdr =
+				(struct llc_snap_hdr_t *)l3_data_ptr;
+			typeorlength = (llc_hdr->ethertype[0] << 8) |
+							llc_hdr->ethertype[1];
+			l3_data_ptr += sizeof(struct llc_snap_hdr_t);
+		}
+
+		tx_msdu_info->htt.info.l3_hdr_offset = (A_UINT8)(l3_data_ptr -
+									datap);
+		tx_msdu_info->htt.info.ethertype = typeorlength;
+	}
+}
+
+static inline A_UINT8
+ol_tx_tid_by_ether_type(
+	A_UINT8 *datap,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	A_UINT8 tid;
+	A_UINT8 *l3_data_ptr;
+	A_UINT16 typeorlength;
+
+	l3_data_ptr = datap + tx_msdu_info->htt.info.l3_hdr_offset;
+	typeorlength = tx_msdu_info->htt.info.ethertype;
+
+	/* IP packet, do packet inspection for TID */
+	if (typeorlength == ETHERTYPE_IPV4) {
+		tid = ol_tx_tid_by_ipv4(l3_data_ptr);
+	} else if (typeorlength == ETHERTYPE_IPV6) {
+		tid = ol_tx_tid_by_ipv6(l3_data_ptr);
+	} else if (ETHERTYPE_IS_EAPOL_WAPI(typeorlength)) {
+		/* EAPOL go with voice priority*/
+		tid = TX_EAPOL_TID;
+	} else if (typeorlength == ETHERTYPE_ARP) {
+		tid = TX_ARP_TID;
+	} else {
+		/* For non-IP case, use default TID */
+		tid = TX_DEFAULT_TID;
+	}
+	return tid;
+}
+
+static inline A_UINT8
+ol_tx_tid_by_raw_type(
+	A_UINT8 *datap,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+	/* adjust hdr_ptr to RA */
+	struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
+
+	/* FIXME: This code does not handle 4 address formats. The QOS field
+	 * is not at usual location.
+	 */
+	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
+					IEEE80211_FC0_TYPE_DATA) {
+		/* dot11 encapsulated frame */
+		struct ieee80211_qosframe *whqos =
+					(struct ieee80211_qosframe *)datap;
+		if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
+			tid = whqos->i_qos[0] & IEEE80211_QOS_TID;
+		else
+			tid = HTT_NON_QOS_TID;
+	} else {
+		/*
+		 * This function should only be applied to data frames.
+		 * For management frames, we already know to use
+		 * HTT_TX_EXT_TID_MGMT.
+		 */
+		qdf_assert(0);
+	}
+	return tid;
+}
+
+static A_UINT8
+ol_tx_tid(
+	struct ol_txrx_pdev_t *pdev,
+	qdf_nbuf_t tx_nbuf,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	A_UINT8 *datap = qdf_nbuf_data(tx_nbuf);
+	A_UINT8 tid;
+
+	if (pdev->frame_format == wlan_frm_fmt_raw) {
+		tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw;
+
+		ol_tx_set_ether_type(datap, tx_msdu_info);
+		tid = tx_msdu_info->htt.info.ext_tid ==
+					QDF_NBUF_TX_EXT_TID_INVALID ?
+			ol_tx_tid_by_raw_type(datap, tx_msdu_info) :
+			tx_msdu_info->htt.info.ext_tid;
+	} else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+		tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet;
+
+		ol_tx_set_ether_type(datap, tx_msdu_info);
+		tid =
+			tx_msdu_info->htt.info.ext_tid ==
+					QDF_NBUF_TX_EXT_TID_INVALID ?
+				ol_tx_tid_by_ether_type(datap, tx_msdu_info) :
+				tx_msdu_info->htt.info.ext_tid;
+	} else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
+		struct llc_snap_hdr_t *llc;
+
+		tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
+		tx_msdu_info->htt.info.l3_hdr_offset =
+						sizeof(struct ieee80211_frame);
+		llc = (struct llc_snap_hdr_t *)
+			(datap + tx_msdu_info->htt.info.l3_hdr_offset);
+		tx_msdu_info->htt.info.ethertype =
+			(llc->ethertype[0] << 8) | llc->ethertype[1];
+		/*
+		 * Native WiFi is a special case of "raw" 802.11 header format.
+		 * However, we expect that for all cases that use native WiFi,
+		 * the TID will be directly specified out of band.
+		 */
+		tid = tx_msdu_info->htt.info.ext_tid;
+	} else {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
+			  "Invalid standard frame type: %d\n",
+			  pdev->frame_format);
+		qdf_assert(0);
+		tid = HTT_TX_EXT_TID_INVALID;
+	}
+	return tid;
+}
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+static inline
+struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
+						struct ol_txrx_vdev_t *vdev,
+						uint8_t *peer_id)
+{
+	struct ol_txrx_peer_t *peer = NULL;
+
+	if (vdev->hlTdlsFlag) {
+		peer = ol_txrx_find_peer_by_addr(pdev,
+						vdev->hl_tdls_ap_mac_addr.raw,
+						peer_id);
+		if (peer &&  (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
+			peer = NULL;
+		} else {
+			if (peer)
+				qdf_atomic_inc(&peer->ref_cnt);
+		}
+	}
+	if (!peer)
+		peer = ol_txrx_assoc_peer_find(vdev);
+
+	return peer;
+}
+
+#else
+struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
+						struct ol_txrx_vdev_t *vdev,
+						uint8_t *peer_id)
+{
+	struct ol_txrx_peer_t *peer = NULL;
+	peer = ol_txrx_assoc_peer_find(vdev);
+
+	return peer;
+}
+
+
+#endif
+
+struct ol_tx_frms_queue_t *
+ol_tx_classify(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t tx_nbuf,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	struct ol_txrx_peer_t *peer = NULL;
+	struct ol_tx_frms_queue_t *txq = NULL;
+	A_UINT8 *dest_addr;
+	A_UINT8 tid;
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+	u_int8_t peer_id;
+#endif
+
+	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+	dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
+	if ((IEEE80211_IS_MULTICAST(dest_addr)) ||
+	    (vdev->opmode == wlan_op_mode_ocb)) {
+		txq = &vdev->txqs[OL_TX_VDEV_MCAST_BCAST];
+		tx_msdu_info->htt.info.ext_tid =
+					HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+		if (vdev->opmode == wlan_op_mode_sta) {
+			/*
+			 * The STA sends a frame with a broadcast
+			 * dest addr (DA) as a
+			 * unicast frame to the AP's receive addr (RA).
+			 * Find the peer object that represents the AP
+			 * that the STA is associated with.
+			 */
+			peer = ol_txrx_assoc_peer_find(vdev);
+			if (!peer) {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					  QDF_TRACE_LEVEL_ERROR,
+					  "Error: STA %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast DA tx data frame w/o association\n",
+					  vdev,
+					  vdev->mac_addr.raw[0],
+					  vdev->mac_addr.raw[1],
+					  vdev->mac_addr.raw[2],
+					  vdev->mac_addr.raw[3],
+					  vdev->mac_addr.raw[4],
+					  vdev->mac_addr.raw[5]);
+				return NULL; /* error */
+			} else if ((peer->security[
+				OL_TXRX_PEER_SECURITY_MULTICAST].sec_type
+						!= htt_sec_type_wapi) &&
+				   (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
+				if (true == qdf_nbuf_is_ipv4_dhcp_pkt(
+								tx_nbuf)) {
+					/* DHCP frame to go with
+					 * voice priority
+					 */
+					txq = &peer->txqs[TX_DHCP_TID];
+					tx_msdu_info->htt.info.ext_tid =
+								TX_DHCP_TID;
+				}
+			}
+			/*
+			 * The following line assumes each peer object has a
+			 * single ID. This is currently true, and is expected
+			 * to remain true.
+			 */
+			tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
+		} else if (vdev->opmode == wlan_op_mode_ocb) {
+			tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
+			/* In OCB mode, don't worry about the peer.
+			 *We don't need it. */
+			peer = NULL;
+		} else {
+			tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
+			/*
+			 * Look up the vdev's BSS peer, so that the
+			 * classify_extension function can check whether to
+			 * encrypt multicast / broadcast frames.
+			 */
+			peer = ol_txrx_peer_find_hash_find(pdev,
+							vdev->mac_addr.raw,
+							0, 1);
+			if (!peer) {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					  QDF_TRACE_LEVEL_ERROR,
+					  "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast/mcast, but no self-peer found\n",
+					  vdev,
+					  vdev->mac_addr.raw[0],
+					  vdev->mac_addr.raw[1],
+					  vdev->mac_addr.raw[2],
+					  vdev->mac_addr.raw[3],
+					  vdev->mac_addr.raw[4],
+					  vdev->mac_addr.raw[5]);
+				return NULL; /* error */
+			}
+		}
+		tx_msdu_info->htt.info.is_unicast = false;
+	} else {
+		/* tid would be overwritten for non QoS case*/
+		tid = ol_tx_tid(pdev, tx_nbuf, tx_msdu_info);
+		if ((HTT_TX_EXT_TID_INVALID == tid) ||
+		    (tid >= OL_TX_NUM_TIDS)) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX,
+				  QDF_TRACE_LEVEL_ERROR,
+				  "%s Error: could not classify packet into valid TID(%d).\n",
+				  __func__, tid);
+			return NULL;
+		}
+#ifdef ATH_SUPPORT_WAPI
+		/* Check to see if a frame is a WAI frame */
+		if (tx_msdu_info->htt.info.ethertype == ETHERTYPE_WAI) {
+			/* WAI frames should not be encrypted */
+			tx_msdu_info->htt.action.do_encrypt = 0;
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
+				  "Tx Frame is a WAI frame\n");
+		}
+#endif /* ATH_SUPPORT_WAPI */
+
+		/*
+		 * Find the peer and increment its reference count.
+		 * If this vdev is an AP, use the dest addr (DA) to determine
+		 * which peer STA this unicast data frame is for.
+		 * If this vdev is a STA, the unicast data frame is for the
+		 * AP the STA is associated with.
+		 */
+		if (vdev->opmode == wlan_op_mode_sta) {
+			/*
+			 * TO DO:
+			 * To support TDLS, first check if there is a TDLS
+			 * peer STA,
+			 * and if so, check if the DA matches the TDLS peer
+			 * STA's MAC address. If there is no peer TDLS STA,
+			 * or if the DA is not the TDLS STA's address,
+			 * then the frame is either for the AP itself, or is
+			 * supposed to be sent to the AP for forwarding.
+			 */
+#if 0
+			if (vdev->num_tdls_peers > 0) {
+				peer = NULL;
+				for (i = 0; i < vdev->num_tdls_peers; i++) {
+					int differs = adf_os_mem_cmp(
+							vdev->tdls_peers[i]->
+							mac_addr.raw,
+							dest_addr,
+							OL_TXRX_MAC_ADDR_LEN);
+					if (!differs) {
+						peer = vdev->tdls_peers[i];
+						break;
+					}
+				}
+			} else {
+				/* send to AP */
+				peer = ol_txrx_assoc_peer_find(vdev);
+			}
+#endif
+
+			peer = ol_tx_tdls_peer_find(pdev, vdev, &peer_id);
+		} else {
+			peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
+									0, 1);
+		}
+		tx_msdu_info->htt.info.is_unicast = true;
+		if (!peer) {
+			/*
+			 * Unicast data xfer can only happen to an
+			 * associated peer. It is illegitimate to send unicast
+			 * data if there is no peer to send it to.
+			 */
+			QDF_TRACE(QDF_MODULE_ID_TXRX,
+				  QDF_TRACE_LEVEL_ERROR,
+				  "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send unicast tx data frame to an unknown peer\n",
+				  vdev,
+				  vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+				  vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+				  vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+			return NULL; /* error */
+		}
+		TX_SCHED_DEBUG_PRINT("Peer found\n");
+		if (!peer->qos_capable) {
+			tid = OL_TX_NON_QOS_TID;
+		} else if ((peer->security[
+				OL_TXRX_PEER_SECURITY_UNICAST].sec_type
+					!= htt_sec_type_wapi) &&
+			   (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
+			if (true == qdf_nbuf_is_ipv4_dhcp_pkt(tx_nbuf))
+				/* DHCP frame to go with voice priority */
+				tid = TX_DHCP_TID;
+		}
+
+		/* Only allow encryption when in authenticated state */
+		if (OL_TXRX_PEER_STATE_AUTH != peer->state)
+			tx_msdu_info->htt.action.do_encrypt = 0;
+
+		txq = &peer->txqs[tid];
+		tx_msdu_info->htt.info.ext_tid = tid;
+		/*
+		 * The following line assumes each peer object has a single ID.
+		 * This is currently true, and is expected to remain true.
+		 */
+		tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
+		/*
+		 * WORKAROUND - check that the peer ID is valid.
+		 * If tx data is provided before ol_rx_peer_map_handler is
+		 * called to record the peer ID specified by the target,
+		 * then we could end up here with an invalid peer ID.
+		 * TO DO: rather than dropping the tx frame, pause the txq it
+		 * goes into, then fill in the peer ID for the entries in the
+		 * txq when the peer_map event provides the peer ID, and then
+		 * unpause the txq.
+		 */
+		if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
+			if (peer) {
+				TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+					   "%s: remove the peer for invalid peer_id %p\n",
+					   __func__, peer);
+				/* remove the peer reference added above */
+				ol_txrx_peer_unref_delete(peer);
+				tx_msdu_info->peer = NULL;
+			}
+			return NULL;
+		}
+	}
+	tx_msdu_info->peer = peer;
+	if (ol_if_tx_bad_peer_txq_overflow(pdev, peer, txq))
+		return NULL;
+	/*
+	 * If relevant, do a deeper inspection to determine additional
+	 * characteristics of the tx frame.
+	 * If the frame is invalid, then the txq will be set to NULL to
+	 * indicate an error.
+	 */
+	OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq);
+	if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
+				wlan_op_mode_sta && tx_msdu_info->peer !=
+								NULL) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+			   "%s: remove the peer reference %p\n",
+			   __func__, peer);
+		/* remove the peer reference added above */
+		ol_txrx_peer_unref_delete(tx_msdu_info->peer);
+		/* Making peer NULL in case if multicast non STA mode */
+		tx_msdu_info->peer = NULL;
+	}
+
+	/* Whether this frame can download though HTT2 data pipe or not. */
+	OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
+
+	/* Update Tx Queue info */
+	tx_desc->txq = txq;
+
+	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+	return txq;
+}
+
+struct ol_tx_frms_queue_t *
+ol_tx_classify_mgmt(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t tx_nbuf,
+	struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	struct ol_txrx_peer_t *peer = NULL;
+	struct ol_tx_frms_queue_t *txq = NULL;
+	A_UINT8 *dest_addr;
+	union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
+
+	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+	dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
+	if (IEEE80211_IS_MULTICAST(dest_addr)) {
+		/*
+		 * AP:  beacons are broadcast,
+		 *      public action frames (e.g. extended channel
+		 *      switch announce) may be broadcast
+		 * STA: probe requests can be either broadcast or unicast
+		 */
+		txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
+		tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
+		tx_msdu_info->peer = NULL;
+		tx_msdu_info->htt.info.is_unicast = 0;
+	} else {
+		/*
+		 * Find the peer and increment its reference count.
+		 * If this vdev is an AP, use the receiver addr (RA) to
+		 * determine which peer STA this unicast mgmt frame is for.
+		 * If this vdev is a STA, the unicast mgmt frame is for the
+		 * AP the STA is associated with.
+		 * Probe request / response and Assoc request / response are
+		 * sent before the peer exists - in this case, use the
+		 * vdev's default tx queue.
+		 */
+		if (vdev->opmode == wlan_op_mode_sta) {
+			/*
+			 * TO DO:
+			 * To support TDLS, first check if there is a TDLS
+			 * peer STA, and if so, check if the DA matches
+			 * the TDLS peer STA's MAC address.
+			 */
+			peer = ol_txrx_assoc_peer_find(vdev);
+			/*
+			 * Some special case(preauth for example) needs to send
+			 * unicast mgmt frame to unassociated AP. In such case,
+			 * we need to check if dest addr match the associated
+			 * peer addr. If not, we set peer as NULL to queue this
+			 * frame to vdev queue.
+			 */
+			if (peer) {
+				qdf_mem_copy(
+					&local_mac_addr_aligned.raw[0],
+					dest_addr, OL_TXRX_MAC_ADDR_LEN);
+				mac_addr = &local_mac_addr_aligned;
+				if (ol_txrx_peer_find_mac_addr_cmp(
+							mac_addr,
+							&peer->mac_addr) != 0) {
+					qdf_atomic_dec(&peer->ref_cnt);
+					peer = NULL;
+				}
+			}
+		} else {
+			/* find the peer and increment its reference count */
+			peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
+									0, 1);
+		}
+		tx_msdu_info->peer = peer;
+		if (!peer) {
+			txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
+			tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
+		} else {
+			txq = &peer->txqs[HTT_TX_EXT_TID_MGMT];
+			tx_msdu_info->htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
+			/*
+			 * The following line assumes each peer object has a
+			 * single ID. This is currently true, and is expected
+			 * to remain true.
+			 */
+			tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
+		}
+		tx_msdu_info->htt.info.is_unicast = 1;
+	}
+	/*
+	 * If relevant, do a deeper inspection to determine additional
+	 * characteristics of the tx frame.
+	 * If the frame is invalid, then the txq will be set to NULL to
+	 * indicate an error.
+	 */
+	OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf,
+				      tx_msdu_info, txq);
+
+	/* Whether this frame can download though HTT2 data pipe or not. */
+	OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
+
+	/* Update Tx Queue info */
+	tx_desc->txq = txq;
+
+	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+	return txq;
+}
+
+A_STATUS
+ol_tx_classify_extension(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t tx_msdu,
+	struct ol_txrx_msdu_info_t *msdu_info)
+{
+	A_UINT8 *datap = qdf_nbuf_data(tx_msdu);
+	struct ol_txrx_peer_t *peer;
+	int which_key;
+
+	/*
+	 * The following msdu_info fields were already filled in by the
+	 * ol_tx entry function or the regular ol_tx_classify function:
+	 *     htt.info.vdev_id            (ol_tx_hl or ol_tx_non_std_hl)
+	 *     htt.info.ext_tid            (ol_tx_non_std_hl or ol_tx_classify)
+	 *     htt.info.frame_type         (ol_tx_hl or ol_tx_non_std_hl)
+	 *     htt.info.l2_hdr_type        (ol_tx_hl or ol_tx_non_std_hl)
+	 *     htt.info.is_unicast         (ol_tx_classify)
+	 *     htt.info.peer_id            (ol_tx_classify)
+	 *     peer                        (ol_tx_classify)
+	 *     if (is_unicast) {
+	 *         htt.info.ethertype      (ol_tx_classify)
+	 *         htt.info.l3_hdr_offset  (ol_tx_classify)
+	 *     }
+	 * The following fields need to be filled in by this function:
+	 *     if (!is_unicast) {
+	 *         htt.info.ethertype
+	 *         htt.info.l3_hdr_offset
+	 *     }
+	 *     htt.action.band (NOT CURRENTLY USED)
+	 *     htt.action.do_encrypt
+	 *     htt.action.do_tx_complete
+	 * The following fields are not needed for data frames, and can
+	 * be left uninitialized:
+	 *     htt.info.frame_subtype
+	 */
+
+	if (!msdu_info->htt.info.is_unicast) {
+		int l2_hdr_size;
+		A_UINT16 ethertype;
+
+		if (msdu_info->htt.info.l2_hdr_type == htt_pkt_type_ethernet) {
+			struct ethernet_hdr_t *eh;
+
+			eh = (struct ethernet_hdr_t *)datap;
+			l2_hdr_size = sizeof(*eh);
+			ethertype = (eh->ethertype[0] << 8) | eh->ethertype[1];
+
+			if (ethertype == ETHERTYPE_VLAN) {
+				struct ethernet_vlan_hdr_t *evh;
+
+				evh = (struct ethernet_vlan_hdr_t *)datap;
+				l2_hdr_size = sizeof(*evh);
+				ethertype = (evh->ethertype[0] << 8) |
+							evh->ethertype[1];
+			}
+
+			if (!IS_ETHERTYPE(ethertype)) {
+				/* 802.3 header*/
+				struct llc_snap_hdr_t *llc =
+					(struct llc_snap_hdr_t *)(datap +
+							l2_hdr_size);
+				ethertype = (llc->ethertype[0] << 8) |
+							llc->ethertype[1];
+				l2_hdr_size += sizeof(*llc);
+			}
+			msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
+			msdu_info->htt.info.ethertype = ethertype;
+		} else { /* 802.11 */
+			struct llc_snap_hdr_t *llc;
+			l2_hdr_size = ol_txrx_ieee80211_hdrsize(datap);
+			llc = (struct llc_snap_hdr_t *)(datap + l2_hdr_size);
+			ethertype = (llc->ethertype[0] << 8) |
+							llc->ethertype[1];
+			/*
+			 * Don't include the LLC/SNAP header in l2_hdr_size,
+			 * because l3_hdr_offset is actually supposed to refer
+			 * to the header after the 802.3 or 802.11 header,
+			 * which could be a LLC/SNAP header rather
+			 * than the L3 header.
+			 */
+		}
+		msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
+		msdu_info->htt.info.ethertype = ethertype;
+		which_key = txrx_sec_mcast;
+	} else {
+		which_key = txrx_sec_ucast;
+	}
+	peer = msdu_info->peer;
+	/*
+	 * msdu_info->htt.action.do_encrypt is initially set in ol_tx_desc_hl.
+	 * Add more check here.
+	 */
+	msdu_info->htt.action.do_encrypt = (!peer) ? 0 :
+		(peer->security[which_key].sec_type == htt_sec_type_none) ? 0 :
+		msdu_info->htt.action.do_encrypt;
+	/*
+	 * For systems that have a frame by frame spec for whether to receive
+	 * a tx completion notification, use the tx completion notification
+	 * only  for certain management frames, not for data frames.
+	 * (In the future, this may be changed slightly, e.g. to request a
+	 * tx completion notification for the final EAPOL message sent by a
+	 * STA during the key delivery handshake.)
+	 */
+	msdu_info->htt.action.do_tx_complete = 0;
+
+	return A_OK;
+}
+
+A_STATUS
+ol_tx_classify_mgmt_extension(
+		struct ol_txrx_vdev_t *vdev,
+		struct ol_tx_desc_t *tx_desc,
+		qdf_nbuf_t tx_msdu,
+		struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ieee80211_frame *wh;
+
+	/*
+	 * The following msdu_info fields were already filled in by the
+	 * ol_tx entry function or the regular ol_tx_classify_mgmt function:
+	 *     htt.info.vdev_id          (ol_txrx_mgmt_send)
+	 *     htt.info.frame_type       (ol_txrx_mgmt_send)
+	 *     htt.info.l2_hdr_type      (ol_txrx_mgmt_send)
+	 *     htt.action.do_tx_complete (ol_txrx_mgmt_send)
+	 *     htt.info.peer_id          (ol_tx_classify_mgmt)
+	 *     htt.info.ext_tid          (ol_tx_classify_mgmt)
+	 *     htt.info.is_unicast       (ol_tx_classify_mgmt)
+	 *     peer                      (ol_tx_classify_mgmt)
+	 * The following fields need to be filled in by this function:
+	 *     htt.info.frame_subtype
+	 *     htt.info.l3_hdr_offset
+	 *     htt.action.band (NOT CURRENTLY USED)
+	 * The following fields are not needed for mgmt frames, and can
+	 * be left uninitialized:
+	 *     htt.info.ethertype
+	 *     htt.action.do_encrypt
+	 *         (This will be filled in by other SW, which knows whether
+	 *         the peer has robust-managment-frames enabled.)
+	 */
+	wh = (struct ieee80211_frame *)qdf_nbuf_data(tx_msdu);
+	msdu_info->htt.info.frame_subtype =
+		(wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
+		IEEE80211_FC0_SUBTYPE_SHIFT;
+	msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame);
+
+	return A_OK;
+}
+
+#endif /* defined(CONFIG_HL_SUPPORT) */

+ 103 - 0
core/dp/txrx/ol_tx_classify.h

@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2012, 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_tx_classify.h
+ * @brief API definitions for the tx classify module within the data SW.
+ */
+#ifndef _OL_TX_CLASSIFY__H_
+#define _OL_TX_CLASSIFY__H_
+
+#include <qdf_nbuf.h>      /* qdf_nbuf_t */
+#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
+
+static inline u_int8_t *
+ol_tx_dest_addr_find(
+	struct ol_txrx_pdev_t *pdev,
+	qdf_nbuf_t tx_nbuf)
+{
+	u_int8_t *hdr_ptr;
+	void *datap = qdf_nbuf_data(tx_nbuf);
+
+	if (pdev->frame_format == wlan_frm_fmt_raw) {
+		/* adjust hdr_ptr to RA */
+		struct ieee80211_frame *wh =
+			(struct ieee80211_frame *)datap;
+		hdr_ptr = wh->i_addr1;
+	} else if (pdev->frame_format ==
+			wlan_frm_fmt_native_wifi) {
+		/* adjust hdr_ptr to RA */
+		struct ieee80211_frame *wh = (
+			struct ieee80211_frame *)datap;
+		hdr_ptr = wh->i_addr1;
+	} else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+		hdr_ptr = datap;
+	} else {
+		QDF_TRACE(QDF_MODULE_ID_TXRX,
+			  QDF_TRACE_LEVEL_ERROR,
+			"Invalid standard frame type: %d\n",
+			pdev->frame_format);
+		qdf_assert(0);
+		hdr_ptr = NULL;
+	}
+	return hdr_ptr;
+}
+
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * @brief Classify a tx frame to which tid queue.
+ *
+ * @param vdev - the virtual device sending the data
+ *      (for specifying the transmitter address for multicast / broadcast data)
+ * @param tx_desc - descriptor object with meta-data about the tx frame
+ * @param netbuf - the tx frame
+ * @param tx_msdu_info - characteristics of the tx frame
+ */
+struct ol_tx_frms_queue_t *
+ol_tx_classify(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t netbuf,
+	struct ol_txrx_msdu_info_t *tx_msdu_info);
+
+struct ol_tx_frms_queue_t *
+ol_tx_classify_mgmt(
+	struct ol_txrx_vdev_t *vdev,
+	struct ol_tx_desc_t *tx_desc,
+	qdf_nbuf_t netbuf,
+	struct ol_txrx_msdu_info_t *tx_msdu_info);
+
+#else
+
+#define ol_tx_classify(vdev, tx_desc, netbuf, tx_msdu_info) NULL
+#define ol_tx_classify_mgmt(vdev, tx_desc, netbuf, tx_msdu_info) NULL
+
+#endif /* defined(CONFIG_HL_SUPPORT) */
+
+
+#endif /* _OL_TX_CLASSIFY__H_ */

+ 152 - 1
core/dp/txrx/ol_tx_desc.c

@@ -105,7 +105,56 @@ static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
 }
 #endif
 
+#ifdef CONFIG_HL_SUPPORT
+
+/**
+ * ol_tx_desc_vdev_update() - vedv assign.
+ * @tx_desc: tx descriptor pointer
+ * @vdev: vdev handle
+ *
+ * Return: None
+ */
+static inline void
+ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
+		       struct ol_txrx_vdev_t *vdev)
+{
+	tx_desc->vdev = vdev;
+}
+#else
+
+static inline void
+ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
+		       struct ol_txrx_vdev_t *vdev)
+{
+	return;
+}
+#endif
+
+#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
+
+/**
+ * ol_tx_desc_count_inc() - tx desc count increment for desc allocation.
+ * @vdev: vdev handle
+ *
+ * Return: None
+ */
+static inline void
+ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
+{
+	qdf_atomic_inc(&vdev->tx_desc_count);
+}
+#else
+
+static inline void
+ol_tx_desc_count_inc(struct ol_txrx_vdev_t *vdev)
+{
+	return;
+}
+
+#endif
+
 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
+
 /**
  * ol_tx_desc_alloc() - allocate descriptor from freelist
  * @pdev: pdev handle
@@ -127,6 +176,13 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
 		ol_tx_desc_compute_delay(tx_desc);
 	}
 	qdf_spin_unlock_bh(&pdev->tx_mutex);
+
+	if (!tx_desc)
+		return NULL;
+
+	ol_tx_desc_vdev_update(tx_desc, vdev);
+	ol_tx_desc_count_inc(vdev);
+
 	return tx_desc;
 }
 
@@ -220,6 +276,53 @@ ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
 #endif
 #endif
 
+/**
+ * ol_tx_desc_alloc_hl() - allocate tx descriptor
+ * @pdev: pdev handle
+ * @vdev: vdev handle
+ * @msdu_info: tx msdu info
+ *
+ * Return: tx descriptor pointer/ NULL in case of error
+ */
+static struct ol_tx_desc_t *
+ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev,
+		    struct ol_txrx_vdev_t *vdev,
+		    struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc;
+
+	tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
+	if (!tx_desc)
+		return NULL;
+
+	qdf_atomic_dec(&pdev->tx_queue.rsrc_cnt);
+
+	return tx_desc;
+}
+
+#if defined(CONFIG_PER_VDEV_TX_DESC_POOL) && defined(CONFIG_HL_SUPPORT)
+
+/**
+ * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev.
+ * @tx_desc: tx desc
+ *
+ * Return: None
+ */
+static inline void
+ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
+{
+	qdf_atomic_dec(&tx_desc->vdev->tx_desc_count);
+	tx_desc->vdev = NULL;
+}
+#else
+
+static inline void
+ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
+{
+	return;
+}
+#endif
+
 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
 /**
  * ol_tx_desc_free() - put descriptor to freelist
@@ -246,6 +349,8 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 	ol_tx_desc_reset_timestamp(tx_desc);
 
 	ol_tx_put_desc_global_pool(pdev, tx_desc);
+	ol_tx_desc_vdev_rm(tx_desc);
+
 	qdf_spin_unlock_bh(&pdev->tx_mutex);
 }
 
@@ -313,7 +418,7 @@ void
 dump_pkt(qdf_nbuf_t nbuf, qdf_dma_addr_t nbuf_paddr, int len)
 {
 	qdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
-		  qdf_nbuf_data(nbuf), nbuf_paddr, len);
+		  qdf_nbuf_data(nbuf), (long long unsigned int)nbuf_paddr, len);
 	print_hex_dump(KERN_DEBUG, "Pkt:   ", DUMP_PREFIX_ADDRESS, 16, 4,
 		       qdf_nbuf_data(nbuf), len, true);
 }
@@ -491,6 +596,52 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
 	return tx_desc;
 }
 
+struct ol_tx_desc_t *
+ol_tx_desc_hl(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_vdev_t *vdev,
+	qdf_nbuf_t netbuf,
+	struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc;
+
+	/* FIX THIS: these inits should probably be done by tx classify */
+	msdu_info->htt.info.vdev_id = vdev->vdev_id;
+	msdu_info->htt.info.frame_type = pdev->htt_pkt_type;
+	msdu_info->htt.action.cksum_offload = qdf_nbuf_get_tx_cksum(netbuf);
+	switch (qdf_nbuf_get_exemption_type(netbuf)) {
+	case QDF_NBUF_EXEMPT_NO_EXEMPTION:
+	case QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
+		/* We want to encrypt this frame */
+		msdu_info->htt.action.do_encrypt = 1;
+		break;
+	case QDF_NBUF_EXEMPT_ALWAYS:
+		/* We don't want to encrypt this frame */
+		msdu_info->htt.action.do_encrypt = 0;
+		break;
+	default:
+		qdf_assert(0);
+		break;
+	}
+
+	/* allocate the descriptor */
+	tx_desc = ol_tx_desc_alloc_hl(pdev, vdev, msdu_info);
+	if (!tx_desc)
+		return NULL;
+
+	/* initialize the SW tx descriptor */
+	tx_desc->netbuf = netbuf;
+	/* fix this - get pkt_type from msdu_info */
+	tx_desc->pkt_type = OL_TX_FRM_STD;
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+	tx_desc->orig_l2_hdr_bytes = 0;
+#endif
+	/* the HW tx descriptor will be initialized later by the caller */
+
+	return tx_desc;
+}
+
 void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
 				ol_tx_desc_list *tx_descs, int had_error)
 {

+ 25 - 0
core/dp/txrx/ol_tx_desc.h

@@ -72,6 +72,31 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
 				   qdf_nbuf_t netbuf,
 				   struct ol_txrx_msdu_info_t *msdu_info);
 
+
+/**
+ * @brief Allocate and initialize a tx descriptor for a HL system.
+ * @details
+ *  Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor
+ *  for private use within the host data SW, and a HTT tx descriptor for
+ *  downloading tx meta-data to the target FW/HW.
+ *  Fill in the fields of this pair of tx descriptors based on the
+ *  information in the netbuf.
+ *
+ * @param pdev - the data physical device sending the data
+ *      (for accessing the tx desc pool)
+ * @param vdev - the virtual device sending the data
+ *      (for specifying the transmitter address for multicast / broadcast data)
+ * @param netbuf - the tx frame
+ * @param msdu_info - tx meta-data
+ */
+struct ol_tx_desc_t *
+ol_tx_desc_hl(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_txrx_vdev_t *vdev,
+		qdf_nbuf_t netbuf,
+		struct ol_txrx_msdu_info_t *msdu_info);
+
+
 /**
  * @brief Use a tx descriptor ID to find the corresponding desriptor object.
  *

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 1599 - 0
core/dp/txrx/ol_tx_queue.c


+ 504 - 15
core/dp/txrx/ol_tx_queue.h

@@ -37,46 +37,405 @@
 #include <qdf_types.h>          /* bool */
 
 /*--- function prototypes for optional queue log feature --------------------*/
-#if defined(ENABLE_TX_QUEUE_LOG)
+#if defined(ENABLE_TX_QUEUE_LOG) || \
+	(defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT))
 
+/**
+ * ol_tx_queue_log_enqueue() - enqueue tx queue logs
+ * @pdev: physical device object
+ * @msdu_info: tx msdu meta data
+ * @frms: number of frames for which logs need to be enqueued
+ * @bytes: number of bytes
+ *
+ *
+ * Return: None
+ */
 void
 ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
 			struct ol_txrx_msdu_info_t *msdu_info,
 			int frms, int bytes);
+
+/**
+ * ol_tx_queue_log_dequeue() - dequeue tx queue logs
+ * @pdev: physical device object
+ * @txq: tx queue
+ * @frms: number of frames for which logs need to be dequeued
+ * @bytes: number of bytes
+ *
+ *
+ * Return: None
+ */
 void
 ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
 			struct ol_tx_frms_queue_t *txq, int frms, int bytes);
+
+/**
+ * ol_tx_queue_log_free() - free tx queue logs
+ * @pdev: physical device object
+ * @txq: tx queue
+ * @tid: tid value
+ * @frms: number of frames for which logs need to be freed
+ * @bytes: number of bytes
+ *
+ *
+ * Return: None
+ */
 void
 ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
 		     struct ol_tx_frms_queue_t *txq,
 		     int tid, int frms, int bytes);
-#define OL_TX_QUEUE_LOG_ENQUEUE ol_tx_queue_log_enqueue
-#define OL_TX_QUEUE_LOG_DEQUEUE ol_tx_queue_log_dequeue
-#define OL_TX_QUEUE_LOG_FREE    ol_tx_queue_log_free
 
 #else
 
-#define OL_TX_QUEUE_LOG_ENQUEUE(pdev, msdu_info, frms, bytes)   /* no-op */
-#define OL_TX_QUEUE_LOG_DEQUEUE(pdev, txq, frms, bytes) /* no-op */
-#define OL_TX_QUEUE_LOG_FREE(pdev, txq, tid, frms, bytes)       /* no-op */
+static inline void
+ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
+			struct ol_txrx_msdu_info_t *msdu_info,
+			int frms, int bytes)
+{
+	return;
+}
+
+static inline void
+ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
+			struct ol_tx_frms_queue_t *txq, int frms, int bytes)
+{
+	return;
+}
+
+static inline void
+ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
+		     struct ol_tx_frms_queue_t *txq,
+		     int tid, int frms, int bytes)
+{
+	return;
+}
 
-#endif /* TXRX_DEBUG_LEVEL > 5 */
+#endif
 
-#define ol_tx_enqueue(pdev, txq, tx_desc, tx_msdu_info) /* no-op */
-#define ol_tx_dequeue(pdev, ext_tid, txq, head, num_frames, credit, bytes) 0
-#define ol_tx_queue_free(pdev, txq, tid)        /* no-op */
-#define ol_tx_queue_discard(pdev, flush, tx_descs)      /* no-op */
+#if defined(CONFIG_HL_SUPPORT)
 
+/**
+ * @brief Queue a tx frame to the tid queue.
+ *
+ * @param pdev - the data virtual device sending the data
+ *      (for storing the tx desc in the virtual dev's tx_target_list,
+ *      and for accessing the phy dev)
+ * @param txq - which queue the tx frame gets stored in
+ * @param tx_desc - tx meta-data, including prev and next ptrs
+ * @param tx_msdu_info - characteristics of the tx frame
+ */
+void
+ol_tx_enqueue(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_frms_queue_t *txq,
+		struct ol_tx_desc_t *tx_desc,
+		struct ol_txrx_msdu_info_t *tx_msdu_info);
+
+/**
+ * @brief - remove the specified number of frames from the head of a tx queue
+ * @details
+ *  This function removes frames from the head of a tx queue,
+ *  and returns them as a NULL-terminated linked list.
+ *  The function will remove frames until one of the following happens:
+ *  1.  The tx queue is empty
+ *  2.  The specified number of frames have been removed
+ *  3.  Removal of more frames would exceed the specified credit limit
+ *
+ * @param pdev - the physical device object
+ * @param txq - which tx queue to remove frames from
+ * @param head - which contains return linked-list of tx frames (descriptors)
+ * @param num_frames - maximum number of frames to remove
+ * @param[in/out] credit -
+ *     input:  max credit the dequeued frames can consume
+ *     output: how much credit the dequeued frames consume
+ * @param[out] bytes - the sum of the sizes of the dequeued frames
+ * @return number of frames dequeued
+*/
+u_int16_t
+ol_tx_dequeue(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_frms_queue_t *txq,
+	ol_tx_desc_list *head,
+	u_int16_t num_frames,
+	u_int32_t *credit,
+	int *bytes);
+
+/**
+ * @brief - free all of frames from the tx queue while deletion
+ * @details
+ *  This function frees all of frames from the tx queue.
+ *  This function is called during peer or vdev deletion.
+ *  This function notifies the scheduler, so the scheduler can update
+ *  its state to account for the absence of the queue.
+ *
+ * @param pdev - the physical device object, which stores the txqs
+ * @param txq - which tx queue to free frames from
+ * @param tid - the extended TID that the queue belongs to
+ */
+void
+ol_tx_queue_free(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_frms_queue_t *txq,
+		int tid);
+
+/**
+ * @brief - discard pending tx frames from the tx queue
+ * @details
+ *  This function is called if there are too many queues in tx scheduler.
+ *  This function is called if we wants to flush all pending tx
+ *  queues in tx scheduler.
+ *
+ * @param pdev - the physical device object, which stores the txqs
+ * @param flush_all - flush all pending tx queues if set to true
+ * @param tx_descs - List Of tx_descs to be discarded will be returned by this function
+ */
+
+void
+ol_tx_queue_discard(
+		struct ol_txrx_pdev_t *pdev,
+		bool flush_all,
+		ol_tx_desc_list *tx_descs);
+
+#else
+
+static inline void
+ol_tx_enqueue(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_frms_queue_t *txq,
+		struct ol_tx_desc_t *tx_desc,
+		struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+	return;
+}
+
+static inline u_int16_t
+ol_tx_dequeue(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_frms_queue_t *txq,
+	ol_tx_desc_list *head,
+	u_int16_t num_frames,
+	u_int32_t *credit,
+	int *bytes)
+{
+	return 0;
+}
+
+static inline void
+ol_tx_queue_free(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_frms_queue_t *txq,
+		int tid)
+{
+	return;
+}
+
+static inline void
+ol_tx_queue_discard(
+		struct ol_txrx_pdev_t *pdev,
+		bool flush_all,
+		ol_tx_desc_list *tx_descs)
+{
+	return;
+}
+#endif /* defined(CONFIG_HL_SUPPORT) */
+
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+
+void
+ol_txrx_peer_bal_add_limit_peer(
+		struct ol_txrx_pdev_t *pdev,
+		u_int16_t peer_id,
+		u_int16_t peer_limit);
+
+void
+ol_txrx_peer_bal_remove_limit_peer(
+		struct ol_txrx_pdev_t *pdev,
+		u_int16_t peer_id);
+
+/**
+ * ol_txrx_peer_pause_but_no_mgmt_q() - suspend/pause all txqs except
+ *					management queue for a given peer
+ * @peer: peer device object
+ *
+ * Return: None
+ */
+void
+ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer);
+
+/**
+ * ol_txrx_peer_unpause_but_no_mgmt_q() - unpause all txqs except management
+ *					  queue for a given peer
+ * @peer: peer device object
+ *
+ * Return: None
+ */
+void
+ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer);
+
+/**
+ * ol_tx_bad_peer_dequeue_check() - retrieve the send limit
+ *				    of the tx queue category
+ * @txq: tx queue of the head of the category list
+ * @max_frames: send limit of the txq category
+ * @tx_limit_flag: set true is tx limit is reached
+ *
+ * Return: send limit
+ */
+u_int16_t
+ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
+			     u_int16_t max_frames,
+			     u_int16_t *tx_limit_flag);
+
+/**
+ * ol_tx_bad_peer_update_tx_limit() - update the send limit of the
+ *				      tx queue category
+ * @pdev: the physical device object
+ * @txq: tx queue of the head of the category list
+ * @frames: frames that has been dequeued
+ * @tx_limit_flag: tx limit reached flag
+ *
+ * Return: None
+ */
+void
+ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
+			       struct ol_tx_frms_queue_t *txq,
+			       u_int16_t frames,
+			       u_int16_t tx_limit_flag);
+
+/**
+ * ol_txrx_set_txq_peer() - set peer to the tx queue's peer
+ * @txq: tx queue for a given tid
+ * @peer: the peer device object
+ *
+ * Return: None
+ */
+void
+ol_txrx_set_txq_peer(
+	struct ol_tx_frms_queue_t *txq,
+	struct ol_txrx_peer_t *peer);
+
+/**
+ * @brief - initialize the peer balance context
+ * @param pdev - the physical device object, which stores the txqs
+ */
+void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev);
+
+/**
+ * @brief - deinitialize the peer balance context
+ * @param pdev - the physical device object, which stores the txqs
+ */
+void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev);
+
+#else
+
+static inline void ol_txrx_peer_bal_add_limit_peer(
+		struct ol_txrx_pdev_t *pdev,
+		u_int16_t peer_id,
+		u_int16_t peer_limit)
+{
+	return;
+}
+
+static inline void ol_txrx_peer_bal_remove_limit_peer(
+		struct ol_txrx_pdev_t *pdev,
+		u_int16_t peer_id)
+{
+	return;
+}
+
+static inline void ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
+{
+	return;
+}
+
+static inline void ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
+{
+	return;
+}
+
+static inline u_int16_t
+ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
+			     u_int16_t max_frames,
+			     u_int16_t *tx_limit_flag)
+{
+	/* just return max_frames */
+	return max_frames;
+}
+
+static inline void
+ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
+			       struct ol_tx_frms_queue_t *txq,
+			       u_int16_t frames,
+			       u_int16_t tx_limit_flag)
+{
+	return;
+}
+
+static inline void
+ol_txrx_set_txq_peer(
+		struct ol_tx_frms_queue_t *txq,
+		struct ol_txrx_peer_t *peer)
+{
+	return;
+}
+
+static inline void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+
+static inline void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+
+#endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
+
+#if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
+
+/**
+ * ol_tx_queue_log_sched() - start logging of tx queues for HL
+ * @pdev: physical device object
+ * @credit: number of credits
+ * @num_active_tids: number of active tids for which logging needs to be done
+ * @active_bitmap:bitmap
+ * @data: buffer
+ *
+ * Return: None
+ */
 void
 ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
 		      int credit,
 		      int *num_active_tids,
 		      uint32_t **active_bitmap, uint8_t **data);
+#else
+
+static inline void
+ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
+		      int credit,
+		      int *num_active_tids,
+		      uint32_t **active_bitmap, uint8_t **data)
+{
+	return;
+}
+#endif /* defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING) */
+
+#if defined(CONFIG_HL_SUPPORT) && TXRX_DEBUG_LEVEL > 5
+/**
+ * @brief - show current state of all tx queues
+ * @param pdev - the physical device object, which stores the txqs
+ */
+void
+ol_tx_queues_display(struct ol_txrx_pdev_t *pdev);
 
-#define OL_TX_QUEUE_LOG_SCHED( \
-		pdev, credit, num_active_tids, active_bitmap, data)
+#else
 
-#define ol_tx_queues_display(pdev)      /* no-op */
+static inline void
+ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+#endif
 
 #define ol_tx_queue_decs_reinit(peer, peer_id)  /* no-op */
 
@@ -89,4 +448,134 @@ void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev);
 #else
 #define ol_tx_throttle_init(pdev)       /*no op */
 #endif
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+static inline bool
+ol_tx_is_txq_last_serviced_queue(struct ol_txrx_pdev_t *pdev,
+				 struct ol_tx_frms_queue_t *txq)
+{
+	return txq == pdev->tx_sched.last_used_txq;
+}
+
+/**
+ * ol_tx_txq_group_credit_limit() - check for credit limit of a given tx queue
+ * @pdev: physical device object
+ * @txq: tx queue for which credit limit needs be to checked
+ * @credit: number of credits of the selected category
+ *
+ * Return: updated credits
+ */
+u_int32_t ol_tx_txq_group_credit_limit(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_frms_queue_t *txq,
+		u_int32_t credit);
+
+/**
+ * ol_tx_txq_group_credit_update() - update group credits of the
+ *				     selected catoegory
+ * @pdev: physical device object
+ * @txq: tx queue for which credit needs to be updated
+ * @credit: number of credits by which selected category needs to be updated
+ * @absolute: TXQ group absolute value
+ *
+ * Return: None
+ */
+void ol_tx_txq_group_credit_update(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_frms_queue_t *txq,
+		int32_t credit,
+		u_int8_t absolute);
+
+/**
+ * ol_tx_set_vdev_group_ptr() - update vdev queues group pointer
+ * @pdev: physical device object
+ * @vdev_id: vdev id for which group pointer needs to update
+ * @grp_ptr: pointer to ol tx queue group which needs to be set for vdev queues
+ *
+ * Return: None
+ */
+void
+ol_tx_set_vdev_group_ptr(
+		ol_txrx_pdev_handle pdev,
+		u_int8_t vdev_id,
+		struct ol_tx_queue_group_t *grp_ptr);
+
+/**
+ * ol_tx_txq_set_group_ptr() - update tx queue group pointer
+ * @txq: tx queue of which group pointer needs to update
+ * @grp_ptr: pointer to ol tx queue group which needs to be
+ *	     set for given tx queue
+ *
+ *
+ * Return: None
+ */
+void
+ol_tx_txq_set_group_ptr(
+		struct ol_tx_frms_queue_t *txq,
+		struct ol_tx_queue_group_t *grp_ptr);
+
+/**
+ * ol_tx_set_peer_group_ptr() - update peer tx queues group pointer
+ *				for a given tid
+ * @pdev: physical device object
+ * @peer: peer device object
+ * @vdev_id: vdev id
+ * @tid: tid for which group pointer needs to update
+ *
+ *
+ * Return: None
+ */
+void
+ol_tx_set_peer_group_ptr(
+		ol_txrx_pdev_handle pdev,
+		struct ol_txrx_peer_t *peer,
+		u_int8_t vdev_id,
+		u_int8_t tid);
+#else
+
+static inline bool
+ol_tx_is_txq_last_serviced_queue(struct ol_txrx_pdev_t *pdev,
+				 struct ol_tx_frms_queue_t *txq)
+{
+	return 0;
+}
+
+static inline
+u_int32_t ol_tx_txq_group_credit_limit(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_frms_queue_t *txq,
+		u_int32_t credit)
+{
+	return credit;
+}
+
+static inline void ol_tx_txq_group_credit_update(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_frms_queue_t *txq,
+		int32_t credit,
+		u_int8_t absolute)
+{
+	return;
+}
+
+static inline void
+ol_tx_txq_set_group_ptr(
+		struct ol_tx_frms_queue_t *txq,
+		struct ol_tx_queue_group_t *grp_ptr)
+{
+	return;
+}
+
+static inline void
+ol_tx_set_peer_group_ptr(
+		ol_txrx_pdev_handle pdev,
+		struct ol_txrx_peer_t *peer,
+		u_int8_t vdev_id,
+		u_int8_t tid)
+{
+	return;
+}
+#endif
+
 #endif /* _OL_TX_QUEUE__H_ */

+ 1482 - 0
core/dp/txrx/ol_tx_sched.c

@@ -0,0 +1,1482 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <qdf_nbuf.h>         /* qdf_nbuf_t, etc. */
+#include <htt.h>              /* HTT_TX_EXT_TID_MGMT */
+#include <ol_htt_tx_api.h>    /* htt_tx_desc_tid */
+#include <ol_txrx_api.h>      /* ol_txrx_vdev_handle */
+#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
+#include <ol_txrx_types.h>    /* pdev stats, etc. */
+#include <ol_tx_desc.h>       /* ol_tx_desc */
+#include <ol_tx_send.h>       /* ol_tx_send */
+#include <ol_tx_sched.h>      /* OL_TX_SCHED, etc. */
+#include <ol_tx_queue.h>
+#include <ol_txrx.h>
+#include <qdf_types.h>
+#include <qdf_mem.h>         /* qdf_os_mem_alloc_consistent et al */
+
+#if defined(CONFIG_HL_SUPPORT)
+
+#if defined(DEBUG_HL_LOGGING)
+static void
+ol_tx_sched_log(struct ol_txrx_pdev_t *pdev);
+
+#else
+static void
+ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+#endif /* defined(DEBUG_HL_LOGGING) */
+
+#if DEBUG_HTT_CREDIT
+#define OL_TX_DISPATCH_LOG_CREDIT()                                           \
+	do {								      \
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,	\
+			"TX %d bytes\n", qdf_nbuf_len(msdu));	\
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,	\
+			" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",  \
+			qdf_atomic_read(&pdev->target_tx_credit),	\
+			qdf_atomic_read(&pdev->target_tx_credit) - 1,	\
+			qdf_nbuf_len(msdu));				\
+	} while (0)
+#else
+#define OL_TX_DISPATCH_LOG_CREDIT()
+#endif
+
+/*--- generic definitions used by the scheduler framework for all algs ---*/
+
+struct ol_tx_sched_ctx {
+	ol_tx_desc_list head;
+	int frms;
+};
+
+typedef TAILQ_HEAD(ol_tx_frms_queue_list_s, ol_tx_frms_queue_t)
+	ol_tx_frms_queue_list;
+
+#define OL_A_MAX(_x, _y) ((_x) > (_y) ? (_x) : (_y))
+
+#define OL_A_MIN(_x, _y) ((_x) < (_y) ? (_x) : (_y))
+
+	/*--- scheduler algorithm selection ---*/
+
+	/*--- scheduler options -----------------------------------------------
+	 * 1. Round-robin scheduler:
+	 *    Select the TID that is at the head of the list of active TIDs.
+	 *    Select the head tx queue for this TID.
+	 *    Move the tx queue to the back of the list of tx queues for
+	 *    this TID.
+	 *    Move the TID to the back of the list of active TIDs.
+	 *    Send as many frames from the tx queue as credit allows.
+	 * 2. Weighted-round-robin advanced scheduler:
+	 *    Keep an ordered list of which TID gets selected next.
+	 *    Use a weighted-round-robin scheme to determine when to promote
+	 *    a TID within this list.
+	 *    If a TID at the head of the list is inactive, leave it at the
+	 *    head, but check the next TIDs.
+	 *    If the credit available is less than the credit threshold for the
+	 *    next active TID, don't send anything, and leave the TID at the
+	 *    head of the list.
+	 *    After a TID is selected, move it to the back of the list.
+	 *    Select the head tx queue for this TID.
+	 *    Move the tx queue to the back of the list of tx queues for this
+	 *    TID.
+	 *    Send no more frames than the limit specified for the TID.
+	 */
+#define OL_TX_SCHED_RR  1
+#define OL_TX_SCHED_WRR_ADV 2
+
+#ifndef OL_TX_SCHED
+	/*#define OL_TX_SCHED OL_TX_SCHED_RR*/
+#define OL_TX_SCHED OL_TX_SCHED_WRR_ADV /* default */
+#endif
+
+
+#if OL_TX_SCHED == OL_TX_SCHED_RR
+
+#define ol_tx_sched_rr_t ol_tx_sched_t
+
+#define OL_TX_SCHED_NUM_CATEGORIES (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
+
+#define ol_tx_sched_init                ol_tx_sched_init_rr
+#define ol_tx_sched_select_init(pdev)   /* no-op */
+#define ol_tx_sched_select_batch        ol_tx_sched_select_batch_rr
+#define ol_tx_sched_txq_enqueue         ol_tx_sched_txq_enqueue_rr
+#define ol_tx_sched_txq_deactivate      ol_tx_sched_txq_deactivate_rr
+#define ol_tx_sched_category_tx_queues  ol_tx_sched_category_tx_queues_rr
+#define ol_tx_sched_txq_discard         ol_tx_sched_txq_discard_rr
+#define ol_tx_sched_category_info       ol_tx_sched_category_info_rr
+#define ol_tx_sched_discard_select_category \
+		ol_tx_sched_discard_select_category_rr
+
+#elif OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
+
+#define ol_tx_sched_wrr_adv_t ol_tx_sched_t
+
+#define OL_TX_SCHED_NUM_CATEGORIES OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES
+
+#define ol_tx_sched_init                ol_tx_sched_init_wrr_adv
+#define ol_tx_sched_select_init(pdev) \
+		do { \
+			qdf_spin_lock_bh(&pdev->tx_queue_spinlock); \
+			ol_tx_sched_select_init_wrr_adv(pdev); \
+			qdf_spin_unlock_bh(&pdev->tx_queue_spinlock); \
+		} while (0)
+#define ol_tx_sched_select_batch        ol_tx_sched_select_batch_wrr_adv
+#define ol_tx_sched_txq_enqueue         ol_tx_sched_txq_enqueue_wrr_adv
+#define ol_tx_sched_txq_deactivate      ol_tx_sched_txq_deactivate_wrr_adv
+#define ol_tx_sched_category_tx_queues  ol_tx_sched_category_tx_queues_wrr_adv
+#define ol_tx_sched_txq_discard         ol_tx_sched_txq_discard_wrr_adv
+#define ol_tx_sched_category_info       ol_tx_sched_category_info_wrr_adv
+#define ol_tx_sched_discard_select_category \
+		ol_tx_sched_discard_select_category_wrr_adv
+
+#else
+
+#error Unknown OL TX SCHED specification
+
+#endif /* OL_TX_SCHED */
+
+	/*--- round-robin scheduler ----------------------------------------*/
+#if OL_TX_SCHED == OL_TX_SCHED_RR
+
+	/*--- definitions ---*/
+
+	struct ol_tx_active_queues_in_tid_t {
+		/* list_elem is used to queue up into up level queues*/
+		TAILQ_ENTRY(ol_tx_active_queues_in_tid_t) list_elem;
+		u_int32_t frms;
+		u_int32_t bytes;
+		ol_tx_frms_queue_list head;
+		bool    active;
+		int tid;
+	};
+
+	struct ol_tx_sched_rr_t {
+		struct ol_tx_active_queues_in_tid_t
+			tx_active_queues_in_tid_array[OL_TX_NUM_TIDS
+						+ OL_TX_VDEV_NUM_QUEUES];
+	TAILQ_HEAD(ol_tx_active_tids_s, ol_tx_active_queues_in_tid_t)
+							tx_active_tids_list;
+		u_int8_t discard_weights[OL_TX_NUM_TIDS
+					+ OL_TX_VDEV_NUM_QUEUES];
+	};
+
+#define TX_SCH_MAX_CREDIT_FOR_THIS_TID(tidq) 16
+
+/*--- functions ---*/
+
+/*
+ * The scheduler sync spinlock has been acquired outside this function,
+ * so there is no need to worry about mutex within this function.
+ */
+static int
+ol_tx_sched_select_batch_rr(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_sched_ctx *sctx,
+	u_int32_t credit)
+{
+	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_active_queues_in_tid_t *txq_queue;
+	struct ol_tx_frms_queue_t *next_tq;
+	u_int16_t frames, used_credits, tx_limit, tx_limit_flag = 0;
+	int bytes;
+
+	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+	if (TAILQ_EMPTY(&scheduler->tx_active_tids_list))
+		return;
+
+	txq_queue = TAILQ_FIRST(&scheduler->tx_active_tids_list);
+
+	TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue, list_elem);
+	txq_queue->active = false;
+
+	next_tq = TAILQ_FIRST(&txq_queue->head);
+	TAILQ_REMOVE(&txq_queue->head, next_tq, list_elem);
+
+	credit = OL_A_MIN(credit, TX_SCH_MAX_CREDIT_FOR_THIS_TID(next_tq));
+	frames = next_tq->frms; /* download as many frames as credit allows */
+	tx_limit = ol_tx_bad_peer_dequeue_check(txq,
+					category->specs.send_limit,
+					&tx_limit_flag);
+	frames = ol_tx_dequeue(
+			pdev, txq, &sctx->head, tx_limit, &credit, &bytes);
+	ol_tx_bad_peer_update_tx_limit(pdev, txq, frames, tx_limit_flag);
+
+	used_credits = credit;
+	txq_queue->frms -= frames;
+	txq_queue->bytes -= bytes;
+
+	if (next_tq->frms > 0) {
+		TAILQ_INSERT_TAIL(&txq_queue->head, next_tq, list_elem);
+		TAILQ_INSERT_TAIL(
+				&scheduler->tx_active_tids_list,
+						txq_queue, list_elem);
+		txq_queue->active = true;
+	} else if (!TAILQ_EMPTY(&txq_queue->head)) {
+		/*
+		 * This tx queue is empty, but there's another tx queue for the
+		 * same TID that is not empty.
+		 *Thus, the TID as a whole is active.
+		 */
+		TAILQ_INSERT_TAIL(
+				&scheduler->tx_active_tids_list,
+						txq_queue, list_elem);
+		txq_queue->active = true;
+	}
+	sctx->frms += frames;
+
+	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+	return used_credits;
+}
+
+static inline void
+ol_tx_sched_txq_enqueue_rr(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_frms_queue_t *txq,
+	int tid,
+	int frms,
+	int bytes)
+{
+	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
+	if (txq->flag != ol_tx_queue_active)
+		TAILQ_INSERT_TAIL(&txq_queue->head, txq, list_elem);
+
+	txq_queue->frms += frms;
+	txq_queue->bytes += bytes;
+
+	if (!txq_queue->active) {
+		TAILQ_INSERT_TAIL(
+				&scheduler->tx_active_tids_list,
+				txq_queue, list_elem);
+		txq_queue->active = true;
+	}
+}
+
+static inline void
+ol_tx_sched_txq_deactivate_rr(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_frms_queue_t *txq,
+	int tid)
+{
+	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
+	txq_queue->frms -= txq->frms;
+	txq_queue->bytes -= txq->bytes;
+
+	TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
+	/*if (txq_queue->frms == 0 && txq_queue->active) {*/
+	if (TAILQ_EMPTY(&txq_queue->head) && txq_queue->active) {
+		TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
+			     list_elem);
+		txq_queue->active = false;
+	}
+}
+
+ol_tx_frms_queue_list *
+ol_tx_sched_category_tx_queues_rr(struct ol_txrx_pdev_t *pdev, int tid)
+{
+	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
+	return &txq_queue->head;
+}
+
+int
+ol_tx_sched_discard_select_category_rr(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_tx_sched_rr_t *scheduler;
+	u_int8_t i, tid = 0;
+	int max_score = 0;
+
+	scheduler = pdev->tx_sched.scheduler;
+	/*
+	 * Choose which TID's tx frames to drop next based on two factors:
+	 * 1.  Which TID has the most tx frames present
+	 * 2.  The TID's priority (high-priority TIDs have a low discard_weight)
+	 */
+	for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
+		int score;
+		score =
+			scheduler->tx_active_queues_in_tid_array[i].frms *
+			scheduler->discard_weights[i];
+		if (max_score == 0 || score > max_score) {
+			max_score = score;
+			tid = i;
+		}
+	}
+	return tid;
+}
+
+void
+ol_tx_sched_txq_discard_rr(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_frms_queue_t *txq,
+	int tid, int frames, int bytes)
+{
+	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
+
+	if (0 == txq->frms)
+		TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
+
+	txq_queue->frms -= frames;
+	txq_queue->bytes -= bytes;
+	if (txq_queue->active == true && txq_queue->frms == 0) {
+		TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
+			     list_elem);
+		txq_queue->active = false;
+	}
+}
+
+void
+ol_tx_sched_category_info_rr(
+	struct ol_txrx_pdev_t *pdev,
+	int cat, int *active,
+	int *frms, int *bytes)
+{
+	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_active_queues_in_tid_t *txq_queue;
+
+	txq_queue = &scheduler->tx_active_queues_in_tid_array[cat];
+
+	*active = txq_queue->active;
+	*frms = txq_queue->frms;
+	*bytes = txq_queue->bytes;
+}
+
+enum {
+	ol_tx_sched_discard_weight_voice = 1,
+	ol_tx_sched_discard_weight_video = 4,
+	ol_tx_sched_discard_weight_ucast_default = 8,
+	ol_tx_sched_discard_weight_mgmt_non_qos = 1, /* 0? */
+	ol_tx_sched_discard_weight_mcast = 1, /* 0? also for probe & assoc */
+};
+
+void *
+ol_tx_sched_init_rr(
+	struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_tx_sched_rr_t *scheduler;
+	int i;
+
+	scheduler = qdf_mem_malloc(sizeof(struct ol_tx_sched_rr_t));
+	if (scheduler == NULL)
+		return scheduler;
+
+	for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
+		scheduler->tx_active_queues_in_tid_array[i].tid = i;
+		TAILQ_INIT(&scheduler->tx_active_queues_in_tid_array[i].head);
+		scheduler->tx_active_queues_in_tid_array[i].active = 0;
+		scheduler->tx_active_queues_in_tid_array[i].frms = 0;
+		scheduler->tx_active_queues_in_tid_array[i].bytes = 0;
+	}
+	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+		scheduler->tx_active_queues_in_tid_array[i].tid = i;
+		if (i < OL_TX_NON_QOS_TID) {
+			int ac = TXRX_TID_TO_WMM_AC(i);
+			switch (ac) {
+			case TXRX_WMM_AC_VO:
+				scheduler->discard_weights[i] =
+					ol_tx_sched_discard_weight_voice;
+			case TXRX_WMM_AC_VI:
+				scheduler->discard_weights[i] =
+					ol_tx_sched_discard_weight_video;
+			default:
+				scheduler->discard_weights[i] =
+				ol_tx_sched_discard_weight_ucast_default;
+			};
+		} else {
+			scheduler->discard_weights[i] =
+				ol_tx_sched_discard_weight_mgmt_non_qos;
+		}
+	}
+	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+		int j = i + OL_TX_NUM_TIDS;
+		scheduler->tx_active_queues_in_tid_array[j].tid =
+							OL_TX_NUM_TIDS - 1;
+		scheduler->discard_weights[j] =
+					ol_tx_sched_discard_weight_mcast;
+	}
+	TAILQ_INIT(&scheduler->tx_active_tids_list);
+
+	return scheduler;
+}
+
+void
+ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+		      struct ol_tx_wmm_param_t wmm_param)
+{
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
+		  "Dummy function when OL_TX_SCHED_RR is enabled\n");
+}
+
+#endif /* OL_TX_SCHED == OL_TX_SCHED_RR */
+
+/*--- advanced scheduler ----------------------------------------------------*/
+#if OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
+
+/*--- definitions ---*/
+
+struct ol_tx_sched_wrr_adv_category_info_t {
+	struct {
+		int wrr_skip_weight;
+		u_int32_t credit_threshold;
+		u_int16_t send_limit;
+		int credit_reserve;
+		int discard_weight;
+	} specs;
+	struct {
+		int wrr_count;
+		int frms;
+		int bytes;
+		ol_tx_frms_queue_list head;
+		bool active;
+	} state;
+#ifdef DEBUG_HL_LOGGING
+	struct {
+		char *cat_name;
+		unsigned int queued;
+		unsigned int dispatched;
+		unsigned int discard;
+	} stat;
+#endif
+};
+
+#define OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(cat, \
+		wrr_skip_weight, \
+		credit_threshold, \
+		send_limit, \
+		credit_reserve, \
+		discard_weights) \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _WRR_SKIP_WEIGHT = \
+	(wrr_skip_weight) }; \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_THRESHOLD = \
+	(credit_threshold) }; \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _SEND_LIMIT = \
+	(send_limit) }; \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_RESERVE = \
+	(credit_reserve) }; \
+enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _DISCARD_WEIGHT = \
+	(discard_weights) }
+
+/* Rome:
+ * For high-volume traffic flows (VI, BE, BK), use a credit threshold
+ * roughly equal to a large A-MPDU (occupying half the target memory
+ * available for holding tx frames) to download AMPDU-sized batches
+ * of traffic.
+ * For high-priority, low-volume traffic flows (VO and mgmt), use no
+ * credit threshold, to minimize download latency.
+ */
+/*                                            WRR           send
+ *                                           skip  credit  limit credit disc
+ *                                            wts  thresh (frms) reserv  wts
+ */
+#ifdef HIF_SDIO
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO,           1,     17,    24,     0,  1);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI,           3,     17,    16,     1,  4);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE,          10,     17,    16,     1,  8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK,          12,      6,     6,     1,  8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA, 12,      6,     4,     1,  8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT,   1,      1,     4,     0,  1);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA,  10,     17,     4,     1,  4);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT,   1,      1,     4,     0,  1);
+#else
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO,           1,     16,    24,     0,  1);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI,           3,     16,    16,     1,  4);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE,          10,     12,    12,     1,  8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK,          12,      6,     6,     1,  8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA, 12,      6,     4,     1,  8);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT,   1,      1,     4,     0,  1);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA,  10,     16,     4,     1,  4);
+OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT,   1,      1,     4,     0,  1);
+#endif
+
+#ifdef DEBUG_HL_LOGGING
+
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler)               \
+	do {                                                                 \
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
+		.stat.queued = 0;					\
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
+		.stat.discard = 0;					\
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
+		.stat.dispatched = 0;					\
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
+		.stat.cat_name = #category;				\
+	} while (0)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms)             \
+	do {	\
+		category->stat.queued += frms;		\
+	} while (0)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms)           \
+	do {	\
+		category->stat.discard += frms;		\
+	} while (0)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms)         \
+	do {	\
+		category->stat.dispatched += frms;		\
+	} while (0)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler)                        \
+	ol_tx_sched_wrr_adv_cat_stat_dump(scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler)                   \
+	ol_tx_sched_wrr_adv_cat_cur_state_dump(scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler)                       \
+	ol_tx_sched_wrr_adv_cat_stat_clear(scheduler)
+
+#else   /* DEBUG_HL_LOGGING */
+
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler)
+#define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler)
+
+#endif  /* DEBUG_HL_LOGGING */
+
+#define OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(category, scheduler) \
+	do { \
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+		.specs.wrr_skip_weight = \
+		OL_TX_SCHED_WRR_ADV_ ## category ## _WRR_SKIP_WEIGHT; \
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+		.specs.credit_threshold = \
+		OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_THRESHOLD; \
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+		.specs.send_limit = \
+		OL_TX_SCHED_WRR_ADV_ ## category ## _SEND_LIMIT; \
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+		.specs.credit_reserve = \
+		OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_RESERVE; \
+		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
+		.specs.discard_weight = \
+		OL_TX_SCHED_WRR_ADV_ ## category ## _DISCARD_WEIGHT; \
+		OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler); \
+	} while (0)
+
+struct ol_tx_sched_wrr_adv_t {
+	int order[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
+	int index;
+	struct ol_tx_sched_wrr_adv_category_info_t
+		categories[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
+};
+
+#define OL_TX_AIFS_DEFAULT_VO   2
+#define OL_TX_AIFS_DEFAULT_VI   2
+#define OL_TX_AIFS_DEFAULT_BE   3
+#define OL_TX_AIFS_DEFAULT_BK   7
+#define OL_TX_CW_MIN_DEFAULT_VO   3
+#define OL_TX_CW_MIN_DEFAULT_VI   7
+#define OL_TX_CW_MIN_DEFAULT_BE   15
+#define OL_TX_CW_MIN_DEFAULT_BK   15
+
+/*--- functions ---*/
+
+#ifdef DEBUG_HL_LOGGING
+static void ol_tx_sched_wrr_adv_cat_stat_dump(
+	struct ol_tx_sched_wrr_adv_t *scheduler)
+{
+	int i;
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		  "Scheduler Stats:");
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		  "====category(CRR,CRT,WSW): Queued  Discard  Dequeued  frms  wrr===");
+	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%12s(%2d, %2d, %2d):  %6d  %7d  %8d  %4d  %3d",
+			  scheduler->categories[i].stat.cat_name,
+			  scheduler->categories[i].specs.credit_reserve,
+			  scheduler->categories[i].specs.credit_threshold,
+			  scheduler->categories[i].specs.wrr_skip_weight,
+			  scheduler->categories[i].stat.queued,
+			  scheduler->categories[i].stat.discard,
+			  scheduler->categories[i].stat.dispatched,
+			  scheduler->categories[i].state.frms,
+			  scheduler->categories[i].state.wrr_count);
+	}
+}
+
+static void ol_tx_sched_wrr_adv_cat_cur_state_dump(
+	struct ol_tx_sched_wrr_adv_t *scheduler)
+{
+	int i;
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		  "Scheduler State Snapshot:");
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		  "====category(CRR,CRT,WSW): IS_Active  Pend_Frames  Pend_bytes  wrr===");
+	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%12s(%2d, %2d, %2d):  %9d  %11d  %10d  %3d",
+			  scheduler->categories[i].stat.cat_name,
+			  scheduler->categories[i].specs.credit_reserve,
+			  scheduler->categories[i].specs.credit_threshold,
+			  scheduler->categories[i].specs.wrr_skip_weight,
+			  scheduler->categories[i].state.active,
+			  scheduler->categories[i].state.frms,
+			  scheduler->categories[i].state.bytes,
+			  scheduler->categories[i].state.wrr_count);
+	}
+}
+
+static void ol_tx_sched_wrr_adv_cat_stat_clear(
+	struct ol_tx_sched_wrr_adv_t *scheduler)
+{
+	int i;
+	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
+		scheduler->categories[i].stat.queued = 0;
+		scheduler->categories[i].stat.discard = 0;
+		scheduler->categories[i].stat.dispatched = 0;
+	}
+}
+
+#endif
+
+static void
+ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+	/* start selection from the front of the ordered list */
+	scheduler->index = 0;
+	pdev->tx_sched.last_used_txq = NULL;
+}
+
+static void
+ol_tx_sched_wrr_adv_rotate_order_list_tail(
+		struct ol_tx_sched_wrr_adv_t *scheduler, int idx)
+{
+	int value;
+	/* remember the value of the specified element */
+	value = scheduler->order[idx];
+	/* shift all further elements up one space */
+	for (; idx < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES-1; idx++)
+		scheduler->order[idx] = scheduler->order[idx + 1];
+
+	/* put the specified element at the end */
+	scheduler->order[idx] = value;
+}
+
+static void
+ol_tx_sched_wrr_adv_credit_sanity_check(struct ol_txrx_pdev_t *pdev,
+					u_int32_t credit)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+	int i;
+	int okay = 1;
+
+	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
+		if (scheduler->categories[i].specs.credit_threshold > credit) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				  "*** Config error: credit (%d) not enough to support category %d threshold (%d)\n",
+				  credit, i,
+				  scheduler->categories[i].specs.
+						credit_threshold);
+			okay = 0;
+		}
+	}
+	qdf_assert(okay);
+}
+
+/*
+ * The scheduler sync spinlock has been acquired outside this function,
+ * so there is no need to worry about mutex within this function.
+ */
+static int
+ol_tx_sched_select_batch_wrr_adv(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_sched_ctx *sctx,
+	u_int32_t credit)
+{
+	static int first = 1;
+	int category_index = 0;
+	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_frms_queue_t *txq;
+	int index;
+	struct ol_tx_sched_wrr_adv_category_info_t *category = NULL;
+	int frames, bytes, used_credits = 0, tx_limit;
+	u_int16_t tx_limit_flag;
+
+	/*
+	 * Just for good measure, do a sanity check that the initial credit
+	 * is enough to cover every category's credit threshold.
+	 */
+	if (first) {
+		first = 0;
+		ol_tx_sched_wrr_adv_credit_sanity_check(pdev, credit);
+	}
+
+	/* choose the traffic category from the ordered list */
+	index = scheduler->index;
+	while (index < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
+		category_index = scheduler->order[index];
+		category = &scheduler->categories[category_index];
+		if (!category->state.active) {
+			/* move on to the next category */
+			index++;
+			continue;
+		}
+		if (++category->state.wrr_count <
+					category->specs.wrr_skip_weight) {
+			/* skip this cateogry (move it to the back) */
+			ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler,
+								   index);
+			/* try again (iterate) on the new element
+			 *that was moved up */
+			continue;
+		}
+		/* found the first active category whose WRR turn is present */
+		break;
+	}
+	if (index >= OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
+		/* no categories are active */
+		return 0;
+	}
+
+	/* is there enough credit for the selected category? */
+	if (credit < category->specs.credit_threshold) {
+		/*
+		 * Can't send yet - wait until more credit becomes available.
+		 * In the meantime, restore the WRR counter (since we didn't
+		 * service this category after all).
+		 */
+		category->state.wrr_count = category->state.wrr_count - 1;
+		return 0;
+	}
+	/* enough credit is available - go ahead and send some frames */
+	/*
+	 * This category was serviced - reset the WRR counter, and move this
+	 * category to the back of the order list.
+	 */
+	category->state.wrr_count = 0;
+	ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler, index);
+	/*
+	 * With this category moved to the back, if there's still any credit
+	 * left, set up the next invocation of this function to start from
+	 * where this one left off, by looking at the category that just got
+	 * shifted forward into the position the service category was
+	 * occupying.
+	 */
+	scheduler->index = index;
+
+	/*
+	 * Take the tx queue from the head of the category list.
+	 */
+	txq = TAILQ_FIRST(&category->state.head);
+
+	if (txq) {
+		TAILQ_REMOVE(&category->state.head, txq, list_elem);
+		credit = ol_tx_txq_group_credit_limit(pdev, txq, credit);
+		if (credit > category->specs.credit_reserve) {
+			credit -= category->specs.credit_reserve;
+			/*
+			 * this tx queue will download some frames,
+			 * so update last_used_txq
+			 */
+			pdev->tx_sched.last_used_txq = txq;
+
+			tx_limit = ol_tx_bad_peer_dequeue_check(txq,
+					category->specs.send_limit,
+					&tx_limit_flag);
+			frames = ol_tx_dequeue(
+					pdev, txq, &sctx->head,
+					tx_limit, &credit, &bytes);
+			ol_tx_bad_peer_update_tx_limit(pdev, txq,
+						       frames,
+						       tx_limit_flag);
+
+			OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category,
+								    frames);
+			used_credits = credit;
+			category->state.frms -= frames;
+			category->state.bytes -= bytes;
+			if (txq->frms > 0) {
+				TAILQ_INSERT_TAIL(&category->state.head,
+						  txq, list_elem);
+			} else {
+				if (category->state.frms == 0)
+					category->state.active = 0;
+			}
+			sctx->frms += frames;
+			ol_tx_txq_group_credit_update(pdev, txq, -credit, 0);
+		} else {
+			if (ol_tx_is_txq_last_serviced_queue(pdev, txq)) {
+				/*
+				 * The scheduler has looked at all the active
+				 * tx queues but none were able to download any
+				 * of their tx frames.
+				 * Nothing is changed, so if none were able
+				 * to download before,
+				 * they wont be able to download now.
+				 * Return that no credit has been used, which
+				 * will cause the scheduler to stop.
+				 */
+				TAILQ_INSERT_HEAD(&category->state.head, txq,
+						  list_elem);
+				return 0;
+			} else {
+				TAILQ_INSERT_TAIL(&category->state.head, txq,
+						  list_elem);
+				if (!pdev->tx_sched.last_used_txq)
+					pdev->tx_sched.last_used_txq = txq;
+			}
+		}
+		TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+	} else {
+		used_credits = 0;
+		/* TODO: find its reason */
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "ol_tx_sched_select_batch_wrr_adv: error, no TXQ can be popped.");
+	}
+	return used_credits;
+}
+
+static inline void
+ol_tx_sched_txq_enqueue_wrr_adv(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_frms_queue_t *txq,
+	int tid,
+	int frms,
+	int bytes)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+	category = &scheduler->categories[pdev->tid_to_ac[tid]];
+	category->state.frms += frms;
+	category->state.bytes += bytes;
+	OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms);
+	if (txq->flag != ol_tx_queue_active) {
+		TAILQ_INSERT_TAIL(&category->state.head, txq, list_elem);
+		category->state.active = 1; /* may have already been active */
+	}
+}
+
+static inline void
+ol_tx_sched_txq_deactivate_wrr_adv(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_frms_queue_t *txq,
+	int tid)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+	category = &scheduler->categories[pdev->tid_to_ac[tid]];
+	category->state.frms -= txq->frms;
+	category->state.bytes -= txq->bytes;
+
+	TAILQ_REMOVE(&category->state.head, txq, list_elem);
+
+	if (category->state.frms == 0 && category->state.active)
+		category->state.active = 0;
+}
+
+ol_tx_frms_queue_list *
+ol_tx_sched_category_tx_queues_wrr_adv(struct ol_txrx_pdev_t *pdev, int cat)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+	category = &scheduler->categories[cat];
+	return &category->state.head;
+}
+
+int
+ol_tx_sched_discard_select_category_wrr_adv(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler;
+	u_int8_t i, cat = 0;
+	int max_score = 0;
+
+	scheduler = pdev->tx_sched.scheduler;
+	/*
+	 * Choose which category's tx frames to drop next based on two factors:
+	 * 1.  Which category has the most tx frames present
+	 * 2.  The category's priority (high-priority categories have a low
+	 *     discard_weight)
+	 */
+	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
+		int score;
+		score =
+			scheduler->categories[i].state.frms *
+			scheduler->categories[i].specs.discard_weight;
+		if (max_score == 0 || score > max_score) {
+			max_score = score;
+			cat = i;
+		}
+	}
+	return cat;
+}
+
+void
+ol_tx_sched_txq_discard_wrr_adv(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_frms_queue_t *txq,
+	int cat, int frames, int bytes)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+	category = &scheduler->categories[cat];
+
+	if (0 == txq->frms)
+		TAILQ_REMOVE(&category->state.head, txq, list_elem);
+
+
+	category->state.frms -= frames;
+	category->state.bytes -= bytes;
+	OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frames);
+	if (category->state.frms == 0)
+		category->state.active = 0;
+}
+
+void
+ol_tx_sched_category_info_wrr_adv(
+	struct ol_txrx_pdev_t *pdev,
+	int cat, int *active,
+	int *frms, int *bytes)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
+	struct ol_tx_sched_wrr_adv_category_info_t *category;
+
+	category = &scheduler->categories[cat];
+	*active = category->state.active;
+	*frms = category->state.frms;
+	*bytes = category->state.bytes;
+}
+
+void *
+ol_tx_sched_init_wrr_adv(
+		struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_tx_sched_wrr_adv_t *scheduler;
+	int i;
+
+	scheduler = qdf_mem_malloc(
+			sizeof(struct ol_tx_sched_wrr_adv_t));
+	if (scheduler == NULL)
+		return scheduler;
+
+	qdf_mem_zero(scheduler, sizeof(*scheduler));
+
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, scheduler);
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, scheduler);
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, scheduler);
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, scheduler);
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(NON_QOS_DATA, scheduler);
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(UCAST_MGMT, scheduler);
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_DATA, scheduler);
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_MGMT, scheduler);
+
+	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
+		scheduler->categories[i].state.active = 0;
+		scheduler->categories[i].state.frms = 0;
+		/*scheduler->categories[i].state.bytes = 0;*/
+		TAILQ_INIT(&scheduler->categories[i].state.head);
+		/* init categories to not be skipped before
+		 *their initial selection */
+		scheduler->categories[i].state.wrr_count =
+			scheduler->categories[i].specs.wrr_skip_weight - 1;
+	}
+
+	/*
+	 * Init the order array - the initial ordering doesn't matter, as the
+	 * order array will get reshuffled as data arrives.
+	 */
+	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++)
+		scheduler->order[i] = i;
+
+	return scheduler;
+}
+
+
+/* WMM parameters are suppposed to be passed when associate with AP.
+ * According to AIFS+CWMin, the function maps each queue to one of four default
+ * settings of the scheduler, ie. VO, VI, BE, or BK.
+ */
+void
+ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
+		      struct ol_tx_wmm_param_t wmm_param)
+{
+	struct ol_tx_sched_wrr_adv_t def_cfg;
+	struct ol_tx_sched_wrr_adv_t *scheduler =
+					data_pdev->tx_sched.scheduler;
+	u_int32_t i, ac_selected;
+	u_int32_t  weight[OL_TX_NUM_WMM_AC], default_edca[OL_TX_NUM_WMM_AC];
+
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, (&def_cfg));
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, (&def_cfg));
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, (&def_cfg));
+	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, (&def_cfg));
+
+	/* default_eca = AIFS + CWMin */
+	default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] =
+		OL_TX_AIFS_DEFAULT_VO + OL_TX_CW_MIN_DEFAULT_VO;
+	default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] =
+		OL_TX_AIFS_DEFAULT_VI + OL_TX_CW_MIN_DEFAULT_VI;
+	default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] =
+		OL_TX_AIFS_DEFAULT_BE + OL_TX_CW_MIN_DEFAULT_BE;
+	default_edca[OL_TX_SCHED_WRR_ADV_CAT_BK] =
+		OL_TX_AIFS_DEFAULT_BK + OL_TX_CW_MIN_DEFAULT_BK;
+
+	weight[OL_TX_SCHED_WRR_ADV_CAT_VO] =
+		wmm_param.ac[OL_TX_WMM_AC_VO].aifs +
+				wmm_param.ac[OL_TX_WMM_AC_VO].cwmin;
+	weight[OL_TX_SCHED_WRR_ADV_CAT_VI] =
+		wmm_param.ac[OL_TX_WMM_AC_VI].aifs +
+				wmm_param.ac[OL_TX_WMM_AC_VI].cwmin;
+	weight[OL_TX_SCHED_WRR_ADV_CAT_BK] =
+		wmm_param.ac[OL_TX_WMM_AC_BK].aifs +
+				wmm_param.ac[OL_TX_WMM_AC_BK].cwmin;
+	weight[OL_TX_SCHED_WRR_ADV_CAT_BE] =
+		wmm_param.ac[OL_TX_WMM_AC_BE].aifs +
+				wmm_param.ac[OL_TX_WMM_AC_BE].cwmin;
+
+	for (i = 0; i < OL_TX_NUM_WMM_AC; i++) {
+		if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] >= weight[i])
+			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VO;
+		else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] >= weight[i])
+			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VI;
+		else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] >= weight[i])
+			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BE;
+		else
+			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BK;
+
+
+		scheduler->categories[i].specs.wrr_skip_weight =
+			def_cfg.categories[ac_selected].specs.wrr_skip_weight;
+		scheduler->categories[i].specs.credit_threshold =
+			def_cfg.categories[ac_selected].specs.credit_threshold;
+		scheduler->categories[i].specs.send_limit =
+			def_cfg.categories[ac_selected].specs.send_limit;
+		scheduler->categories[i].specs.credit_reserve =
+			def_cfg.categories[ac_selected].specs.credit_reserve;
+		scheduler->categories[i].specs.discard_weight =
+			def_cfg.categories[ac_selected].specs.discard_weight;
+	}
+}
+
+#endif /* OL_TX_SCHED == OL_TX_SCHED_WRR_ADV */
+
+/*--- congestion control discard --------------------------------------------*/
+
+struct ol_tx_frms_queue_t *
+ol_tx_sched_discard_select_txq(
+		struct ol_txrx_pdev_t *pdev,
+		ol_tx_frms_queue_list *tx_queues)
+{
+	struct ol_tx_frms_queue_t *txq;
+	struct ol_tx_frms_queue_t *selected_txq = NULL;
+	int max_frms = 0;
+
+	/* return the tx queue with the most frames */
+	TAILQ_FOREACH(txq, tx_queues, list_elem) {
+		if (txq->frms > max_frms) {
+			max_frms = txq->frms;
+			selected_txq = txq;
+		}
+	}
+	return selected_txq;
+}
+
+u_int16_t
+ol_tx_sched_discard_select(
+		struct ol_txrx_pdev_t *pdev,
+		u_int16_t frms,
+		ol_tx_desc_list *tx_descs,
+		bool force)
+{
+	int cat;
+	struct ol_tx_frms_queue_t *txq;
+	int bytes;
+	u_int32_t credit;
+	struct ol_tx_sched_notify_ctx_t notify_ctx;
+
+	/* first decide what category of traffic (e.g. TID or AC)
+	 *to discard next */
+	cat = ol_tx_sched_discard_select_category(pdev);
+
+	/* then decide which peer within this category to discard from next */
+	txq = ol_tx_sched_discard_select_txq(
+			pdev, ol_tx_sched_category_tx_queues(pdev, cat));
+	if (NULL == txq)
+		/* No More pending Tx Packets in Tx Queue. Exit Discard loop */
+		return 0;
+
+
+	if (force == false) {
+		/*
+		 * Now decide how many frames to discard from this peer-TID.
+		 * Don't discard more frames than the caller has specified.
+		 * Don't discard more than a fixed quantum of frames at a time.
+		 * Don't discard more than 50% of the queue's frames at a time,
+		 * but if there's only 1 frame left, go ahead and discard it.
+		 */
+#define OL_TX_DISCARD_QUANTUM 10
+		if (OL_TX_DISCARD_QUANTUM < frms)
+			frms = OL_TX_DISCARD_QUANTUM;
+
+
+		if (txq->frms > 1 && frms >= (txq->frms >> 1))
+			frms = txq->frms >> 1;
+	}
+
+	/*
+	 * Discard from the head of the queue, because:
+	 * 1.  Front-dropping gives applications like TCP that include ARQ
+	 *     an early notification of congestion.
+	 * 2.  For time-sensitive applications like RTP, the newest frames are
+	 *     most relevant.
+	 */
+	credit = 10000; /* no credit limit */
+	frms = ol_tx_dequeue(pdev, txq, tx_descs, frms, &credit, &bytes);
+
+	notify_ctx.event = OL_TX_DISCARD_FRAMES;
+	notify_ctx.frames = frms;
+	notify_ctx.bytes = bytes;
+	notify_ctx.txq = txq;
+	notify_ctx.info.ext_tid = cat;
+	ol_tx_sched_notify(pdev, &notify_ctx);
+
+	TX_SCHED_DEBUG_PRINT("%s Tx Drop : %d\n", __func__, frms);
+	return frms;
+}
+
+/*--- scheduler framework ---------------------------------------------------*/
+
+/*
+ * The scheduler mutex spinlock has been acquired outside this function,
+ * so there is need to take locks inside this function.
+ */
+void
+ol_tx_sched_notify(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_sched_notify_ctx_t *ctx)
+{
+	struct ol_tx_frms_queue_t *txq = ctx->txq;
+	int tid;
+
+	if (!pdev->tx_sched.scheduler)
+		return;
+
+	switch (ctx->event) {
+	case OL_TX_ENQUEUE_FRAME:
+		tid = ctx->info.tx_msdu_info->htt.info.ext_tid;
+		ol_tx_sched_txq_enqueue(pdev, txq, tid, 1, ctx->bytes);
+		break;
+	case OL_TX_DELETE_QUEUE:
+		tid = ctx->info.ext_tid;
+		if (txq->flag == ol_tx_queue_active)
+			ol_tx_sched_txq_deactivate(pdev, txq, tid);
+
+		break;
+	case OL_TX_PAUSE_QUEUE:
+		tid = ctx->info.ext_tid;
+		if (txq->flag == ol_tx_queue_active)
+			ol_tx_sched_txq_deactivate(pdev, txq, tid);
+
+		break;
+	case OL_TX_UNPAUSE_QUEUE:
+		tid = ctx->info.ext_tid;
+		if (txq->frms != 0)
+			ol_tx_sched_txq_enqueue(pdev, txq, tid,
+						txq->frms, txq->bytes);
+
+		break;
+	case OL_TX_DISCARD_FRAMES:
+		/* not necessarily TID, could be category */
+		tid = ctx->info.ext_tid;
+		ol_tx_sched_txq_discard(pdev, txq, tid,
+					ctx->frames, ctx->bytes);
+		break;
+	default:
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "Error: unknown sched notification (%d)\n",
+			  ctx->event);
+		qdf_assert(0);
+		break;
+	}
+}
+
+#define OL_TX_MSDU_ID_STORAGE_ERR(ptr) (NULL == ptr)
+
+void
+ol_tx_sched_dispatch(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_tx_sched_ctx *sctx)
+{
+	qdf_nbuf_t msdu, prev = NULL, head_msdu = NULL;
+	struct ol_tx_desc_t *tx_desc;
+
+	u_int16_t *msdu_id_storage;
+	u_int16_t msdu_id;
+	int num_msdus = 0;
+	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+	while (sctx->frms) {
+		tx_desc = TAILQ_FIRST(&sctx->head);
+		if (tx_desc == NULL) {
+			/* TODO: find its reason */
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				  "%s: err, no enough tx_desc from stx->head.\n",
+				  __func__);
+			break;
+		}
+		msdu = tx_desc->netbuf;
+		TAILQ_REMOVE(&sctx->head, tx_desc, tx_desc_list_elem);
+		if (NULL == head_msdu)
+			head_msdu = msdu;
+
+		if (prev)
+			qdf_nbuf_set_next(prev, msdu);
+
+		prev = msdu;
+
+#ifndef ATH_11AC_TXCOMPACT
+		/*
+		 * When the tx frame is downloaded to the target, there are two
+		 * outstanding references:
+		 * 1.  The host download SW (HTT, HTC, HIF)
+		 *     This reference is cleared by the ol_tx_send_done callback
+		 *     functions.
+		 * 2.  The target FW
+		 *     This reference is cleared by the ol_tx_completion_handler
+		 *     function.
+		 * It is extremely probable that the download completion is
+		 * processed before the tx completion message.  However, under
+		 * exceptional conditions the tx completion may be processed
+		 *first. Thus, rather that assuming that reference (1) is
+		 *done before reference (2),
+		 * explicit reference tracking is needed.
+		 * Double-increment the ref count to account for both references
+		 * described above.
+		 */
+		qdf_atomic_init(&tx_desc->ref_cnt);
+		qdf_atomic_inc(&tx_desc->ref_cnt);
+		qdf_atomic_inc(&tx_desc->ref_cnt);
+#endif
+
+		/*Store the MSDU Id for each MSDU*/
+		/* store MSDU ID */
+		msdu_id = ol_tx_desc_id(pdev, tx_desc);
+		msdu_id_storage = ol_tx_msdu_id_storage(msdu);
+		if (OL_TX_MSDU_ID_STORAGE_ERR(msdu_id_storage)) {
+			/*
+			 * Send the prior frames as a batch,
+			 *then send this as a single,
+			 * then resume handling the remaining frames.
+			 */
+			if (head_msdu)
+				ol_tx_send_batch(pdev, head_msdu, num_msdus);
+
+			prev = NULL;
+			head_msdu = prev;
+			num_msdus = 0;
+
+			if (htt_tx_send_std(pdev->htt_pdev, msdu, msdu_id)) {
+				ol_tx_target_credit_incr(pdev, msdu);
+				ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
+							     1 /* error */);
+			}
+		} else {
+			*msdu_id_storage = msdu_id;
+			num_msdus++;
+		}
+		sctx->frms--;
+	}
+
+	/*Send Batch Of Frames*/
+	if (head_msdu)
+		ol_tx_send_batch(pdev, head_msdu, num_msdus);
+	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+	void
+ol_tx_sched(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_tx_sched_ctx sctx;
+	u_int32_t credit;
+
+	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+	if (pdev->tx_sched.tx_sched_status != ol_tx_scheduler_idle) {
+		qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+		return;
+	}
+	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_running;
+
+	ol_tx_sched_log(pdev);
+	/*adf_os_print("BEFORE tx sched:\n");*/
+	/*ol_tx_queues_display(pdev);*/
+	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+
+	TAILQ_INIT(&sctx.head);
+	sctx.frms = 0;
+
+	ol_tx_sched_select_init(pdev);
+	while (qdf_atomic_read(&pdev->target_tx_credit) > 0) {
+		int num_credits;
+		qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+		credit = qdf_atomic_read(&pdev->target_tx_credit);
+		num_credits = ol_tx_sched_select_batch(pdev, &sctx, credit);
+		if (num_credits > 0) {
+#if DEBUG_HTT_CREDIT
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
+				  " <HTT> Decrease credit %d - %d = %d.\n",
+				  qdf_atomic_read(&pdev->target_tx_credit),
+				  num_credits,
+				  qdf_atomic_read(&pdev->target_tx_credit) -
+				  num_credits);
+#endif
+			qdf_atomic_add(-num_credits, &pdev->target_tx_credit);
+		}
+		qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+
+		if (num_credits == 0)
+			break;
+	}
+	ol_tx_sched_dispatch(pdev, &sctx);
+
+	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
+	/*adf_os_print("AFTER tx sched:\n");*/
+	/*ol_tx_queues_display(pdev);*/
+
+	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
+	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
+	TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+void *
+ol_tx_sched_attach(
+	struct ol_txrx_pdev_t *pdev)
+{
+	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
+	return ol_tx_sched_init(pdev);
+}
+
+void
+ol_tx_sched_detach(
+	struct ol_txrx_pdev_t *pdev)
+{
+	if (pdev->tx_sched.scheduler) {
+		qdf_mem_free(pdev->tx_sched.scheduler);
+		pdev->tx_sched.scheduler = NULL;
+	}
+}
+
+/*--- debug functions -------------------------------------------------------*/
+
+#if defined(DEBUG_HL_LOGGING)
+
+static void
+ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
+{
+	u_int8_t  *buf;
+	u_int32_t *active_bitmap;
+	int i, j, num_cats_active;
+	int active, frms, bytes;
+	int credit;
+
+	/* don't bother recording state if credit is zero */
+	credit = qdf_atomic_read(&pdev->target_tx_credit);
+	if (credit == 0)
+		return;
+
+
+	/*
+	 * See how many TIDs are active, so queue state can be stored only
+	 * for those TIDs.
+	 * Do an initial iteration through all categories to see if any
+	 * are active.  Doing an extra iteration is inefficient, but
+	 * efficiency is not a dominant concern when logging is enabled.
+	 */
+	num_cats_active = 0;
+	for (i = 0; i < OL_TX_SCHED_NUM_CATEGORIES; i++) {
+		ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
+		if (active)
+			num_cats_active++;
+	}
+	/* don't bother recording state if there are no active queues */
+	if (num_cats_active == 0)
+		return;
+
+
+	ol_tx_queue_log_sched(pdev, credit, &num_cats_active,
+			      &active_bitmap, &buf);
+
+	if (num_cats_active == 0)
+		return;
+
+	*active_bitmap = 0;
+	for (i = 0, j = 0;
+			i < OL_TX_SCHED_NUM_CATEGORIES && j < num_cats_active;
+			i++) {
+		u_int8_t *p;
+		ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
+		if (!active)
+			continue;
+
+		p = &buf[j*6];
+		p[0]   = (frms >> 0) & 0xff;
+		p[1] = (frms >> 8) & 0xff;
+
+		p[2] = (bytes >> 0) & 0xff;
+		p[3] = (bytes >> 8) & 0xff;
+		p[4] = (bytes >> 16) & 0xff;
+		p[5] = (bytes >> 24) & 0xff;
+		j++;
+		*active_bitmap |= 1 << i;
+	}
+}
+
+#endif /* defined(DEBUG_HL_LOGGING) */
+
+void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
+{
+	OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(pdev->tx_sched.scheduler);
+}
+
+void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
+{
+	OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(pdev->tx_sched.scheduler);
+}
+
+void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
+{
+	OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(pdev->tx_sched.scheduler);
+}
+
+#endif /* defined(CONFIG_HL_SUPPORT) */

+ 198 - 0
core/dp/txrx/ol_tx_sched.h

@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2012-2013, 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_tx_sched.h
+ * @brief API definitions for the tx scheduler module within the data SW.
+ */
+#ifndef _OL_TX_SCHED__H_
+#define _OL_TX_SCHED__H_
+
+#include <qdf_types.h>
+
+enum ol_tx_queue_action {
+	OL_TX_ENQUEUE_FRAME,
+	OL_TX_DELETE_QUEUE,
+	OL_TX_PAUSE_QUEUE,
+	OL_TX_UNPAUSE_QUEUE,
+	OL_TX_DISCARD_FRAMES,
+};
+
+struct ol_tx_sched_notify_ctx_t {
+	int event;
+	struct ol_tx_frms_queue_t *txq;
+	union {
+		int ext_tid;
+		struct ol_txrx_msdu_info_t *tx_msdu_info;
+	} info;
+	int frames;
+	int bytes;
+};
+
+#if defined(CONFIG_HL_SUPPORT)
+
+void
+ol_tx_sched_notify(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_sched_notify_ctx_t *ctx);
+
+void
+ol_tx_sched(struct ol_txrx_pdev_t *pdev);
+
+u_int16_t
+ol_tx_sched_discard_select(
+		struct ol_txrx_pdev_t *pdev,
+		u_int16_t frms,
+		ol_tx_desc_list *tx_descs,
+		bool force);
+
+void *
+ol_tx_sched_attach(struct ol_txrx_pdev_t *pdev);
+
+void
+ol_tx_sched_detach(struct ol_txrx_pdev_t *pdev);
+
+void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev);
+
+void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev);
+
+void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev);
+
+#else
+
+static inline void
+ol_tx_sched_notify(
+		struct ol_txrx_pdev_t *pdev,
+		struct ol_tx_sched_notify_ctx_t *ctx)
+{
+	return;
+}
+
+static inline void
+ol_tx_sched(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+
+static inline u_int16_t
+ol_tx_sched_discard_select(
+		struct ol_txrx_pdev_t *pdev,
+		u_int16_t frms,
+		ol_tx_desc_list *tx_descs,
+		bool force)
+{
+	return 0;
+}
+
+static inline void *
+ol_tx_sched_attach(struct ol_txrx_pdev_t *pdev)
+{
+	return NULL;
+}
+
+static inline void
+ol_tx_sched_detach(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+
+static inline void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+
+static inline void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+
+static inline void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
+
+#endif /* defined(CONFIG_HL_SUPPORT) */
+
+#if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
+/*
+ * HL needs to keep track of the amount of credit available to download
+ * tx frames to the target - the download scheduler decides when to
+ * download frames, and which frames to download, based on the credit
+ * availability.
+ * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
+ * of the target_tx_credit, to determine when to poll for tx completion
+ * messages.
+ */
+
+static inline void
+ol_tx_target_credit_adjust(int factor,
+			   struct ol_txrx_pdev_t *pdev,
+			   qdf_nbuf_t msdu)
+{
+	qdf_atomic_add(factor * htt_tx_msdu_credit(msdu),
+		       &pdev->target_tx_credit);
+}
+
+static inline void ol_tx_target_credit_decr(struct ol_txrx_pdev_t *pdev,
+					    qdf_nbuf_t msdu)
+{
+	ol_tx_target_credit_adjust(-1, pdev, msdu);
+}
+
+static inline void ol_tx_target_credit_incr(struct ol_txrx_pdev_t *pdev,
+					    qdf_nbuf_t msdu)
+{
+	ol_tx_target_credit_adjust(1, pdev, msdu);
+}
+#else
+/*
+ * LL does not need to keep track of target credit.
+ * Since the host tx descriptor pool size matches the target's,
+ * we know the target has space for the new tx frame if the host's
+ * tx descriptor allocation succeeded.
+ */
+static inline void
+ol_tx_target_credit_adjust(int factor,
+			   struct ol_txrx_pdev_t *pdev,
+			   qdf_nbuf_t msdu)
+{
+	return;
+}
+
+static inline void ol_tx_target_credit_decr(struct ol_txrx_pdev_t *pdev,
+					    qdf_nbuf_t msdu)
+{
+	return;
+}
+
+static inline void ol_tx_target_credit_incr(struct ol_txrx_pdev_t *pdev,
+					    qdf_nbuf_t msdu)
+{
+	return;
+}
+#endif
+#endif /* _OL_TX_SCHED__H_ */

+ 267 - 83
core/dp/txrx/ol_tx_send.c

@@ -47,15 +47,20 @@
 #include <ol_txrx_types.h>      /* ol_txrx_vdev_t, etc */
 #include <ol_tx_desc.h>         /* ol_tx_desc_find, ol_tx_desc_frame_free */
 #ifdef QCA_COMPUTE_TX_DELAY
+#include <ol_tx_classify.h>     /* ol_tx_dest_addr_find */
 #endif
 #include <ol_txrx_internal.h>   /* OL_TX_DESC_NO_REFS, etc. */
 #include <ol_osif_txrx_api.h>
 #include <ol_tx.h>              /* ol_tx_reinject */
 
 #include <ol_cfg.h>             /* ol_cfg_is_high_latency */
+#include <ol_tx_sched.h>
 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
 #include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
 #endif
+#include <ol_tx_queue.h>
+#include <ol_txrx.h>
+
 
 #ifdef TX_CREDIT_RECLAIM_SUPPORT
 
@@ -73,7 +78,8 @@
 
 #endif /* TX_CREDIT_RECLAIM_SUPPORT */
 
-#if defined(TX_CREDIT_RECLAIM_SUPPORT)
+#if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
+
 /*
  * HL needs to keep track of the amount of credit available to download
  * tx frames to the target - the download scheduler decides when to
@@ -83,53 +89,87 @@
  * of the target_tx_credit, to determine when to poll for tx completion
  * messages.
  */
-#define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) \
-	qdf_atomic_add(	\
-		factor * htt_tx_msdu_credit(msdu), &pdev->target_tx_credit)
-#define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) \
-	OL_TX_TARGET_CREDIT_ADJUST(-1, pdev, msdu)
-#define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) \
-	OL_TX_TARGET_CREDIT_ADJUST(1, pdev, msdu)
-#define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) \
-	qdf_atomic_add(-1 * delta, &pdev->target_tx_credit)
-#define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) \
-	qdf_atomic_add(delta, &pdev->target_tx_credit)
+static inline void
+ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
+{
+	qdf_atomic_add(-1 * delta, &pdev->target_tx_credit);
+}
+
+static inline void
+ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
+{
+	qdf_atomic_add(delta, &pdev->target_tx_credit);
+}
 #else
-/*
- * LL does not need to keep track of target credit.
- * Since the host tx descriptor pool size matches the target's,
- * we know the target has space for the new tx frame if the host's
- * tx descriptor allocation succeeded.
- */
-#define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu)  /* no-op */
-#define OL_TX_TARGET_CREDIT_DECR(pdev, msdu)    /* no-op */
-#define OL_TX_TARGET_CREDIT_INCR(pdev, msdu)    /* no-op */
-#define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta)       /* no-op */
-#define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta)       /* no-op */
+
+static inline void
+ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
+{
+	return;
+}
+
+static inline void
+ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
+{
+	return;
+}
 #endif
 
-#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
-#define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev)				\
-	do {								\
-		struct ol_txrx_vdev_t *vdev;				\
-		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {	\
-			if (qdf_atomic_read(&vdev->os_q_paused) &&	\
-			    (vdev->tx_fl_hwm != 0)) {			\
-				qdf_spin_lock(&pdev->tx_mutex);		\
-				if (pdev->tx_desc.num_free >		\
-				    vdev->tx_fl_hwm) {			\
-					qdf_atomic_set(&vdev->os_q_paused, 0); \
-					qdf_spin_unlock(&pdev->tx_mutex); \
-					ol_txrx_flow_control_cb(vdev, true);\
-				}					\
-				else {					\
-					qdf_spin_unlock(&pdev->tx_mutex); \
-				}					\
-			}						\
-		}							\
-	} while (0)
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
+
+/**
+ * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
+ * @pdev: physical device object
+ *
+ *
+ * Return: None
+ */
+static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+{
+	struct ol_txrx_vdev_t *vdev;
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		if (qdf_atomic_read(&vdev->os_q_paused) &&
+		    (vdev->tx_fl_hwm != 0)) {
+			qdf_spin_lock(&pdev->tx_mutex);
+			if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) {
+				qdf_atomic_set(&vdev->os_q_paused, 0);
+				qdf_spin_unlock(&pdev->tx_mutex);
+				ol_txrx_flow_control_cb(vdev, true);
+			} else {
+				qdf_spin_unlock(&pdev->tx_mutex);
+			}
+		}
+	}
+}
+#elif defined(CONFIG_HL_SUPPORT) && defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+
+static void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+{
+	struct ol_txrx_vdev_t *vdev;
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		if (qdf_atomic_read(&vdev->os_q_paused) &&
+		    (vdev->tx_fl_hwm != 0)) {
+			qdf_spin_lock(&pdev->tx_mutex);
+			if (((ol_tx_desc_pool_size_hl(
+					vdev->pdev->ctrl_pdev) >> 1)
+					- TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED)
+					- qdf_atomic_read(&vdev->tx_desc_count)
+					> vdev->tx_fl_hwm) {
+				qdf_atomic_set(&vdev->os_q_paused, 0);
+				qdf_spin_unlock(&pdev->tx_mutex);
+				vdev->osif_flow_control_cb(vdev, true);
+			} else {
+				qdf_spin_unlock(&pdev->tx_mutex);
+			}
+		}
+	}
+}
 #else
-#define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev)
+
+static inline void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
+{
+	return;
+}
 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 
 static inline uint16_t
@@ -145,7 +185,7 @@ ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
 			      qdf_nbuf_len(msdu));
 
 	msdu_credit_consumed = htt_tx_msdu_credit(msdu);
-	OL_TX_TARGET_CREDIT_DECR_INT(pdev, msdu_credit_consumed);
+	ol_tx_target_credit_decr_int(pdev, msdu_credit_consumed);
 	OL_TX_CREDIT_RECLAIM(pdev);
 
 	/*
@@ -190,7 +230,7 @@ ol_tx_send(struct ol_txrx_pdev_t *pdev,
 				vdev_id));
 	failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
 	if (qdf_unlikely(failed)) {
-		OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
+		ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
 	}
 }
@@ -212,7 +252,7 @@ ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
 		msdu_id_storage = ol_tx_msdu_id_storage(rejected);
 		tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
 
-		OL_TX_TARGET_CREDIT_INCR(pdev, rejected);
+		ol_tx_target_credit_incr(pdev, rejected);
 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
 
 		rejected = next;
@@ -235,7 +275,7 @@ ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
 	if (failed) {
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
 			   "Error: freeing tx frame after htt_tx failed");
-		OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
+		ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
 	}
 }
@@ -266,7 +306,7 @@ ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
 	}
 
 	if (status != A_OK) {
-		OL_TX_TARGET_CREDIT_INCR(pdev, msdu);
+		ol_tx_target_credit_incr(pdev, msdu);
 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
 					     1 /* download err */);
 	} else {
@@ -340,9 +380,15 @@ static void
 ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
 		    enum htt_tx_status status,
 		    uint16_t *desc_ids, int num_msdus);
-#define OL_TX_DELAY_COMPUTE ol_tx_delay_compute
+
 #else
-#define OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus)  /* no-op */
+static inline void
+ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
+		    enum htt_tx_status status,
+		    uint16_t *desc_ids, int num_msdus)
+{
+	return;
+}
 #endif /* QCA_COMPUTE_TX_DELAY */
 
 #ifndef OL_TX_RESTORE_HDR
@@ -469,8 +515,11 @@ void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
 {
 	ol_tx_target_credit_update(pdev, credits);
 
+	if (pdev->cfg.is_high_latency)
+		ol_tx_sched(pdev);
+
 	/* UNPAUSE OS Q */
-	OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
+	ol_tx_flow_ct_unpause_os_q(pdev);
 }
 
 /* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
@@ -495,7 +544,7 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
 	ol_tx_desc_list tx_descs;
 	TAILQ_INIT(&tx_descs);
 
-	OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus);
+	ol_tx_delay_compute(pdev, status, desc_ids, num_msdus);
 
 	for (i = 0; i < num_msdus; i++) {
 		tx_desc_id = desc_ids[i];
@@ -507,6 +556,7 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
 			qdf_nbuf_data_addr(netbuf),
 			sizeof(qdf_nbuf_data(netbuf)), tx_desc->id, status));
 		qdf_runtime_pm_put();
+		ol_tx_desc_update_group_credit(pdev, tx_desc_id, 1, 0, status);
 		/* Per SDU update of byte count */
 		byte_cnt += qdf_nbuf_len(netbuf);
 		if (OL_TX_DESC_NO_REFS(tx_desc)) {
@@ -540,14 +590,158 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
 					   status != htt_tx_status_ok);
 	}
 
-	OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
+	if (pdev->cfg.is_high_latency) {
+		/*
+		 * Credit was already explicitly updated by HTT,
+		 * but update the number of available tx descriptors,
+		 * then invoke the scheduler, since new credit is probably
+		 * available now.
+		 */
+		qdf_atomic_add(num_msdus, &pdev->tx_queue.rsrc_cnt);
+		ol_tx_sched(pdev);
+	} else {
+		ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
+	}
 
 	/* UNPAUSE OS Q */
-	OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
+	ol_tx_flow_ct_unpause_os_q(pdev);
 	/* Do one shot statistics */
 	TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
 }
 
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,
+		u_int16_t tx_desc_id, int credit, u_int8_t absolute,
+		enum htt_tx_status status)
+{
+	uint8_t i, is_member;
+	uint16_t vdev_id_mask;
+	struct ol_tx_desc_t *tx_desc;
+
+	tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
+	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
+		vdev_id_mask =
+			OL_TXQ_GROUP_VDEV_ID_MASK_GET(
+					pdev->txq_grps[i].membership);
+		is_member = OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask,
+				tx_desc->vdev->vdev_id);
+		if (is_member) {
+			ol_txrx_update_group_credit(&pdev->txq_grps[i],
+						    credit, absolute);
+			break;
+		}
+	}
+	ol_tx_update_group_credit_stats(pdev);
+}
+
+#ifdef DEBUG_HL_LOGGING
+
+void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+	uint16_t curr_index;
+	uint8_t i;
+
+	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
+	pdev->grp_stats.last_valid_index++;
+	if (pdev->grp_stats.last_valid_index > (OL_TX_GROUP_STATS_LOG_SIZE
+				- 1)) {
+		pdev->grp_stats.last_valid_index -= OL_TX_GROUP_STATS_LOG_SIZE;
+		pdev->grp_stats.wrap_around = 1;
+	}
+	curr_index = pdev->grp_stats.last_valid_index;
+
+	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
+		pdev->grp_stats.stats[curr_index].grp[i].member_vdevs =
+			OL_TXQ_GROUP_VDEV_ID_MASK_GET(
+					pdev->txq_grps[i].membership);
+		pdev->grp_stats.stats[curr_index].grp[i].credit =
+			qdf_atomic_read(&pdev->txq_grps[i].credit);
+	}
+
+	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
+}
+
+void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+	uint16_t i, j, is_break = 0;
+	int16_t curr_index, old_index, wrap_around;
+	uint16_t curr_credit, old_credit, mem_vdevs;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		  "Group credit stats:");
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		  "  No: GrpID: Credit: Change: vdev_map");
+
+	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
+	curr_index = pdev->grp_stats.last_valid_index;
+	wrap_around = pdev->grp_stats.wrap_around;
+	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
+
+	if (curr_index < 0) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "Not initialized");
+		return;
+	}
+
+	for (i = 0; i < OL_TX_GROUP_STATS_LOG_SIZE; i++) {
+		old_index = curr_index - 1;
+		if (old_index < 0) {
+			if (wrap_around == 0)
+				is_break = 1;
+			else
+				old_index = OL_TX_GROUP_STATS_LOG_SIZE - 1;
+		}
+
+		for (j = 0; j < OL_TX_MAX_TXQ_GROUPS; j++) {
+			qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
+			curr_credit =
+				pdev->grp_stats.stats[curr_index].
+								grp[j].credit;
+			if (!is_break)
+				old_credit =
+					pdev->grp_stats.stats[old_index].
+								grp[j].credit;
+
+			mem_vdevs =
+				pdev->grp_stats.stats[curr_index].grp[j].
+								member_vdevs;
+			qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
+
+			if (!is_break)
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					  QDF_TRACE_LEVEL_ERROR,
+					  "%4d: %5d: %6d %6d %8x",
+					  curr_index, j,
+					  curr_credit,
+					  (curr_credit - old_credit),
+					  mem_vdevs);
+			else
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					  QDF_TRACE_LEVEL_ERROR,
+					  "%4d: %5d: %6d %6s %8x",
+					  curr_index, j,
+					  curr_credit, "NA", mem_vdevs);
+		}
+
+		if (is_break)
+			break;
+
+		curr_index = old_index;
+	}
+}
+
+void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
+	qdf_mem_zero(&pdev->grp_stats, sizeof(pdev->grp_stats));
+	pdev->grp_stats.last_valid_index = -1;
+	pdev->grp_stats.wrap_around = 0;
+	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
+}
+#endif
+#endif
+
 /*
  * ol_tx_single_completion_handler performs the same tx completion
  * processing as ol_tx_completion_handler, but for a single frame.
@@ -581,8 +775,18 @@ ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
 			      qdf_atomic_read(&pdev->target_tx_credit),
 			      1, qdf_atomic_read(&pdev->target_tx_credit) + 1);
 
-
-	qdf_atomic_add(1, &pdev->target_tx_credit);
+	if (pdev->cfg.is_high_latency) {
+		/*
+		 * Credit was already explicitly updated by HTT,
+		 * but update the number of available tx descriptors,
+		 * then invoke the scheduler, since new credit is probably
+		 * available now.
+		 */
+		qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
+		ol_tx_sched(pdev);
+	} else {
+		qdf_atomic_add(1, &pdev->target_tx_credit);
+	}
 }
 
 /* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
@@ -649,7 +853,12 @@ ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
 			      qdf_atomic_read(&pdev->target_tx_credit) +
 			      num_msdus);
 
-	OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
+	if (pdev->cfg.is_high_latency) {
+		/* credit was already explicitly updated by HTT */
+		ol_tx_sched(pdev);
+	} else {
+		ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
+	}
 }
 
 #ifdef QCA_COMPUTE_TX_DELAY
@@ -777,31 +986,6 @@ ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
 }
 
 #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
-static inline uint8_t *ol_tx_dest_addr_find(struct ol_txrx_pdev_t *pdev,
-					    qdf_nbuf_t tx_nbuf)
-{
-	uint8_t *hdr_ptr;
-	void *datap = qdf_nbuf_data(tx_nbuf);
-
-	if (pdev->frame_format == wlan_frm_fmt_raw) {
-		/* adjust hdr_ptr to RA */
-		struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
-		hdr_ptr = wh->i_addr1;
-	} else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
-		/* adjust hdr_ptr to RA */
-		struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
-		hdr_ptr = wh->i_addr1;
-	} else if (pdev->frame_format == wlan_frm_fmt_802_3) {
-		hdr_ptr = datap;
-	} else {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "Invalid standard frame type: %d",
-			  pdev->frame_format);
-		qdf_assert(0);
-		hdr_ptr = NULL;
-	}
-	return hdr_ptr;
-}
 
 static uint8_t
 ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,

+ 9 - 0
core/dp/txrx/ol_tx_send.h

@@ -35,11 +35,20 @@
 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
 #include <cdp_txrx_cmn.h>       /* ol_txrx_vdev_t, etc. */
 
+#if defined(CONFIG_HL_SUPPORT)
+
+static inline void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
+{
+	return;
+}
+#else
+
 /**
  * @flush the ol tx when surprise remove.
  *
  */
 void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev);
+#endif
 
 /**
  * @brief Send a tx frame to the target.

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 658 - 54
core/dp/txrx/ol_txrx.c


+ 90 - 0
core/dp/txrx/ol_txrx.h

@@ -34,10 +34,100 @@
 
 void ol_txrx_peer_unref_delete(struct ol_txrx_peer_t *peer);
 
+/**
+ * ol_tx_desc_pool_size_hl() - allocate tx descriptor pool size for HL systems
+ * @ctrl_pdev: the control pdev handle
+ *
+ * Return: allocated pool size
+ */
+u_int16_t
+ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev);
+
 #ifndef OL_TX_AVG_FRM_BYTES
 #define OL_TX_AVG_FRM_BYTES 1000
 #endif
 
+#ifndef OL_TX_DESC_POOL_SIZE_MIN_HL
+#define OL_TX_DESC_POOL_SIZE_MIN_HL 500
+#endif
+
+#ifndef OL_TX_DESC_POOL_SIZE_MAX_HL
+#define OL_TX_DESC_POOL_SIZE_MAX_HL 5000
+#endif
+
+
+#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
+#define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400
+#define TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED 100
+#endif
+
+#ifdef CONFIG_TX_DESC_HI_PRIO_RESERVE
+#define TXRX_HL_TX_DESC_HI_PRIO_RESERVED 20
+#endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+
+void
+ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag);
+#else
+
+static inline void
+ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag)
+{
+	return;
+}
+#endif
+
+#ifdef CONFIG_HL_SUPPORT
+
+void
+ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr);
+
+void
+ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
+			   ol_txrx_vdev_handle vdev,
+			   uint8_t *peer_id);
+
+bool
+is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer);
+
+void
+ol_txrx_update_last_real_peer(
+	ol_txrx_pdev_handle pdev,
+	struct ol_txrx_peer_t *peer,
+	uint8_t *peer_id, bool restore_last_peer);
+#else
+
+static inline void
+ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr)
+{
+	return;
+}
+
+static inline void
+ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev,
+			   ol_txrx_vdev_handle vdev, uint8_t *peer_id)
+{
+	return;
+}
+
+static inline bool
+is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer)
+{
+	return  false;
+}
+
+static inline void
+ol_txrx_update_last_real_peer(
+	ol_txrx_pdev_handle pdev,
+	struct ol_txrx_peer_t *peer,
+	uint8_t *peer_id, bool restore_last_peer)
+
+{
+	return;
+}
+#endif
+
 ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
 
 void htt_pkt_log_init(struct ol_txrx_pdev_t *handle, void *scn);

+ 40 - 5
core/dp/txrx/ol_txrx_internal.h

@@ -559,6 +559,36 @@ NOT_IP_TCP:
 				     is_mcast);				\
 	} while (false)
 
+#ifdef CONFIG_HL_SUPPORT
+
+	/**
+	 * ol_rx_err_inv_get_wifi_header() - retrieve wifi header
+	 * @pdev: handle to the physical device
+	 * @rx_msdu: msdu of which header needs to be retrieved
+	 *
+	 * Return: wifi header
+	 */
+	static inline
+	struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
+		struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
+	{
+		return NULL;
+	}
+#else
+
+	static inline
+	struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
+		struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
+	{
+		struct ieee80211_frame *wh = NULL;
+		if (ol_cfg_frame_type(pdev) == wlan_frm_fmt_native_wifi)
+			/* For windows, it is always native wifi header .*/
+			wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu);
+
+		return wh;
+	}
+#endif
+
 #define OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu)			\
 	do {								\
 		struct ieee80211_frame *wh = NULL;			\
@@ -568,11 +598,7 @@ NOT_IP_TCP:
 		/*wh = (struct ieee80211_frame *) */			\
 		/*htt_rx_mpdu_wifi_hdr_retrieve(pdev->htt_pdev, rx_desc);*/ \
 		/* this only apply to LL device.*/			\
-		if (ol_cfg_frame_type(pdev->ctrl_pdev) ==		\
-		    wlan_frm_fmt_native_wifi) {				\
-			/* For windows, it is always native wifi header .*/ \
-			wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu); \
-		}							\
+		wh = ol_rx_err_inv_get_wifi_header(pdev->ctrl_pdev, rx_msdu); \
 		ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev,		\
 					      wh, OL_RX_ERR_UNKNOWN_PEER); \
 	} while (false)
@@ -734,4 +760,13 @@ NOT_IP_TCP:
 
 #endif /* FEATURE_TSO_DEBUG */
 
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+void
+ol_txrx_update_group_credit(
+	struct ol_tx_queue_group_t *group,
+	int32_t credit,
+	u_int8_t absolute);
+#endif
+
 #endif /* _OL_TXRX_INTERNAL__H_ */

+ 68 - 0
core/dp/txrx/ol_txrx_peer_find.c

@@ -398,18 +398,86 @@ void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev)
 
 /*=== function definitions for message handling =============================*/
 
+#if defined(CONFIG_HL_SUPPORT)
+
 void
 ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
 		       uint16_t peer_id,
 		       uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready)
 {
 	ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
+	if (!tx_ready) {
+		struct ol_txrx_peer_t *peer;
+		peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+		if (!peer) {
+			/* ol_txrx_peer_detach called before peer map arrived*/
+			return;
+		} else {
+			if (tx_ready) {
+				int i;
+				/* unpause all tx queues now, since the
+				 * target is ready
+				 */
+				for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs);
+									i++)
+					ol_txrx_peer_tid_unpause(peer, i);
+
+			} else {
+				/* walk through paused mgmt queue,
+				 * update tx descriptors
+				 */
+				ol_tx_queue_decs_reinit(peer, peer_id);
+
+				/* keep non-mgmt tx queues paused until assoc
+				 * is finished tx queues were paused in
+				 * ol_txrx_peer_attach*/
+				/* unpause tx mgmt queue */
+				ol_txrx_peer_tid_unpause(peer,
+							 HTT_TX_EXT_TID_MGMT);
+			}
+		}
+	}
+}
+
+void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
+{
+	struct ol_txrx_peer_t *peer;
+	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+	if (peer) {
+		int i;
+		/*
+		 * Unpause all data tx queues now that the target is ready.
+		 * The mgmt tx queue was not paused, so skip it.
+		 */
+		for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
+			if (i == HTT_TX_EXT_TID_MGMT)
+				continue; /* mgmt tx queue was not paused */
+
+			ol_txrx_peer_tid_unpause(peer, i);
+		}
+	}
+}
+#else
+
+void
+ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
+		       uint16_t peer_id,
+		       uint8_t vdev_id,
+		       uint8_t *peer_mac_addr,
+		       int tx_ready)
+{
+	ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
+
 }
 
 void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
 {
+	return;
 }
 
+#endif
+
+
 void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
 {
 	struct ol_txrx_peer_t *peer;

+ 187 - 0
core/dp/txrx/ol_txrx_types.h

@@ -123,6 +123,42 @@ enum ol_tx_frm_type {
 	OL_TX_FRM_NO_FREE, /* frame requires special tx completion callback */
 };
 
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+
+#define MAX_NO_PEERS_IN_LIMIT (2*10 + 2)
+
+enum ol_tx_peer_bal_state {
+	ol_tx_peer_bal_enable = 0,
+	ol_tx_peer_bal_disable,
+};
+
+enum ol_tx_peer_bal_timer_state {
+	ol_tx_peer_bal_timer_disable = 0,
+	ol_tx_peer_bal_timer_active,
+	ol_tx_peer_bal_timer_inactive,
+};
+
+struct ol_tx_limit_peer_t {
+	u_int16_t limit_flag;
+	u_int16_t peer_id;
+	u_int16_t limit;
+};
+
+enum tx_peer_level {
+	TXRX_IEEE11_B = 0,
+	TXRX_IEEE11_A_G,
+	TXRX_IEEE11_N,
+	TXRX_IEEE11_AC,
+	TXRX_IEEE11_MAX,
+};
+
+struct tx_peer_threshold {
+	u_int32_t tput_thresh;
+	u_int32_t tx_limit;
+};
+#endif
+
+
 struct ol_tx_desc_t {
 	qdf_nbuf_t netbuf;
 	void *htt_tx_desc;
@@ -151,11 +187,17 @@ struct ol_tx_desc_t {
 	 * This field is filled in with the ol_tx_frm_type enum.
 	 */
 	uint8_t pkt_type;
+#if defined(CONFIG_HL_SUPPORT)
+	struct ol_txrx_vdev_t *vdev;
+#endif
+	void *txq;
+
 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
 	/* used by tx encap, to restore the os buf start offset
 	   after tx complete */
 	uint8_t orig_l2_hdr_bytes;
 #endif
+
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 	struct ol_tx_flow_pool_t *pool;
 #endif
@@ -197,6 +239,19 @@ struct ol_rx_reorder_timeout_list_elem_t {
 		(((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
 		TXRX_WMM_AC_BE)
 
+enum {
+	OL_TX_SCHED_WRR_ADV_CAT_BE,
+	OL_TX_SCHED_WRR_ADV_CAT_BK,
+	OL_TX_SCHED_WRR_ADV_CAT_VI,
+	OL_TX_SCHED_WRR_ADV_CAT_VO,
+	OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA,
+	OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT,
+	OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA,
+	OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT,
+
+	OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES /* must be last */
+};
+
 struct ol_tx_reorder_cat_timeout_t {
 	TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
 	qdf_timer_t timer;
@@ -204,6 +259,11 @@ struct ol_tx_reorder_cat_timeout_t {
 	struct ol_txrx_pdev_t *pdev;
 };
 
+enum ol_tx_scheduler_status {
+	ol_tx_scheduler_idle = 0,
+	ol_tx_scheduler_running,
+};
+
 enum ol_tx_queue_status {
 	ol_tx_queue_empty = 0,
 	ol_tx_queue_active,
@@ -224,6 +284,19 @@ enum {
 	ol_tx_aggr_in_progress,
 };
 
+#define OL_TX_MAX_GROUPS_PER_QUEUE 1
+#define OL_TX_MAX_VDEV_ID 16
+#define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership)           \
+	(((_membership) & 0xffff0000) >> 16)
+#define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id)   \
+	((_mask >> _vdev_id) & 0x01)
+#define OL_TXQ_GROUP_AC_MASK_GET(_membership)           \
+	((_membership) & 0x0000ffff)
+#define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask)   \
+	((_mask >> _ac_mask) & 0x01)
+#define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask)     \
+	((_vdev_mask << 16) | _ac_mask)
+
 struct ol_tx_frms_queue_t {
 	/* list_elem -
 	 * Allow individual tx frame queues to be linked together into
@@ -241,6 +314,10 @@ struct ol_tx_frms_queue_t {
 	uint32_t bytes;
 	ol_tx_desc_list head;
 	enum ol_tx_queue_status flag;
+	struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+	struct ol_txrx_peer_t *peer;
+#endif
 };
 
 enum {
@@ -272,6 +349,9 @@ struct ol_mac_addr {
 	uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
 };
 
+struct ol_tx_sched_t;
+
+
 #ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
 #define OL_TXRX_NUM_LOCAL_PEER_IDS 33   /* default */
 #endif
@@ -322,6 +402,24 @@ enum throttle_phase {
 
 typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt);
 
+struct ol_tx_queue_group_t {
+	qdf_atomic_t credit;
+	u_int32_t membership;
+};
+#define OL_TX_MAX_TXQ_GROUPS 2
+
+#define OL_TX_GROUP_STATS_LOG_SIZE 128
+struct ol_tx_group_credit_stats_t {
+	struct {
+		struct {
+			u_int16_t member_vdevs;
+			u_int16_t credit;
+		} grp[OL_TX_MAX_TXQ_GROUPS];
+	} stats[OL_TX_GROUP_STATS_LOG_SIZE];
+	u_int16_t last_valid_index;
+	u_int16_t wrap_around;
+};
+
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 
 /**
@@ -558,6 +656,7 @@ struct ol_txrx_pdev_t {
 	struct {
 		uint16_t pool_size;
 		uint16_t num_free;
+		union ol_tx_desc_list_elem_t *array;
 		union ol_tx_desc_list_elem_t *freelist;
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 		uint8_t num_invalid_bin;
@@ -676,6 +775,24 @@ struct ol_txrx_pdev_t {
 	bool host_80211_enable;
 #endif
 
+	/*
+	 * tx_sched only applies for HL, but is defined unconditionally
+	 * rather than  only if defined(CONFIG_HL_SUPPORT).
+	 * This is because the struct only
+	 * occupies a few bytes, and to avoid the complexity of
+	 * wrapping references
+	 * to the struct members in "defined(CONFIG_HL_SUPPORT)" conditional
+	 * compilation.
+	 * If this struct gets expanded to a non-trivial size,
+	 * then it should be
+	 * conditionally compiled to only apply if defined(CONFIG_HL_SUPPORT).
+	 */
+	qdf_spinlock_t tx_queue_spinlock;
+	struct {
+		enum ol_tx_scheduler_status tx_sched_status;
+		struct ol_tx_sched_t *scheduler;
+		struct ol_tx_frms_queue_t *last_used_txq;
+	} tx_sched;
 	/*
 	 * tx_queue only applies for HL, but is defined unconditionally to avoid
 	 * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
@@ -690,6 +807,20 @@ struct ol_txrx_pdev_t {
 		uint16_t rsrc_threshold_hi;
 	} tx_queue;
 
+#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
+#define OL_TXQ_LOG_SIZE 512
+	qdf_spinlock_t txq_log_spinlock;
+	struct {
+		int size;
+		int oldest_record_offset;
+		int offset;
+		int allow_wrap;
+		u_int32_t wrapped;
+		/* aligned to u_int32_t boundary */
+		u_int8_t data[OL_TXQ_LOG_SIZE];
+	} txq_log;
+#endif
+
 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
 	qdf_spinlock_t peer_stat_mutex;
 #endif
@@ -767,6 +898,35 @@ struct ol_txrx_pdev_t {
 		OL_TX_MUTEX_TYPE tso_mutex;
 	} tso_seg_pool;
 #endif
+
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+	struct {
+		enum ol_tx_peer_bal_state enabled;
+		qdf_spinlock_t mutex;
+		/* timer used to trigger more frames for bad peers */
+		qdf_timer_t peer_bal_timer;
+		/*This is the time in ms of the peer balance timer period */
+		u_int32_t peer_bal_period_ms;
+		/*This is the txq limit */
+		u_int32_t peer_bal_txq_limit;
+		/*This is the state of the peer balance timer */
+		enum ol_tx_peer_bal_timer_state peer_bal_timer_state;
+		/*This is the counter about active peers which are under
+		 *tx flow control */
+		u_int32_t peer_num;
+		/*This is peer list which are under tx flow control */
+		struct ol_tx_limit_peer_t limit_list[MAX_NO_PEERS_IN_LIMIT];
+		/*This is threshold configurationl */
+		struct tx_peer_threshold ctl_thresh[TXRX_IEEE11_MAX];
+	} tx_peer_bal;
+#endif /* CONFIG_Hl_SUPPORT && QCA_BAD_PEER_TX_FLOW_CL */
+
+	struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
+#ifdef DEBUG_HL_LOGGING
+		qdf_spinlock_t grp_stat_spinlock;
+		struct ol_tx_group_credit_stats_t grp_stats;
+#endif
+	int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
 	uint8_t ocb_peer_valid;
 	struct ol_txrx_peer_t *ocb_peer;
 	ol_tx_pause_callback_fp pause_cb;
@@ -831,6 +991,10 @@ struct ol_txrx_vdev_t {
 	int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
 #endif
 
+#if defined(CONFIG_HL_SUPPORT)
+	struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES];
+#endif
+
 	struct {
 		struct {
 			qdf_nbuf_t head;
@@ -854,6 +1018,16 @@ struct ol_txrx_vdev_t {
 	qdf_spinlock_t flow_control_lock;
 	ol_txrx_tx_flow_control_fp osif_flow_control_cb;
 	void *osif_fc_ctx;
+
+#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
+	union ol_txrx_align_mac_addr_t hl_tdls_ap_mac_addr;
+	bool hlTdlsFlag;
+#endif
+
+#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+	qdf_atomic_t tx_desc_count;
+#endif
+
 	uint16_t wait_on_peer_id;
 	qdf_event_t wait_delete_comp;
 #if defined(FEATURE_TSO)
@@ -917,6 +1091,10 @@ enum {
 typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
 				      tx_msdu_info);
 
+#define OL_TXRX_PEER_SECURITY_MULTICAST  0
+#define OL_TXRX_PEER_SECURITY_UNICAST    1
+#define OL_TXRX_PEER_SECURITY_MAX        2
+
 struct ol_txrx_peer_t {
 	struct ol_txrx_vdev_t *vdev;
 
@@ -978,6 +1156,10 @@ struct ol_txrx_peer_t {
 			    struct ol_txrx_peer_t *peer,
 			    unsigned tid, qdf_nbuf_t msdu_list);
 
+#if defined(CONFIG_HL_SUPPORT)
+	struct ol_tx_frms_queue_t txqs[OL_TX_NUM_TIDS];
+#endif
+
 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
 	ol_txrx_peer_stats_t stats;
 #endif
@@ -1013,6 +1195,11 @@ struct ol_txrx_peer_t {
 	uint32_t last_pkt_tsf;
 	uint8_t last_pkt_tid;
 	uint16_t last_pkt_center_freq;
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+	u_int16_t tx_limit;
+	u_int16_t tx_limit_flag;
+	u_int16_t tx_pause_flag;
+#endif
 	qdf_time_t last_assoc_rcvd;
 	qdf_time_t last_disassoc_rcvd;
 	qdf_time_t last_deauth_rcvd;

+ 0 - 3
target/inc/wlan_tgt_def_config_hl.h

@@ -1,9 +1,6 @@
 /*
  * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
  *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
  * above copyright notice and this permission notice appear in all

Vissa filer visades inte eftersom för många filer har ändrats