Browse Source

qcacld-3.0: Add htt apis for High Latency systems (Part 1 - HL Datapath)

Add support for HTT instance creation, initialization and
deletion, Host -> Target and Target -> Host HTT messages
exchange, receive and transmit aspects of HTT.

CRs-Fixed: 975526
Change-Id: Ie66a83b6efe216df192b033c9d137a93fdce688f
Siddarth Poddar 9 years ago
parent
commit
1df1cd85b4

+ 235 - 59
core/dp/htt/htt.c

@@ -49,7 +49,7 @@
 
 #define HTT_HTC_PKT_POOL_INIT_SIZE 100  /* enough for a large A-MPDU */
 
-A_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
+QDF_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
 
 #ifdef IPA_OFFLOAD
 A_STATUS htt_ipa_config(htt_pdev_handle pdev, A_STATUS status)
@@ -145,6 +145,125 @@ void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
 #define NO_HTT_NEEDED false
 #endif
 
+#if defined(QCA_TX_HTT2_SUPPORT) && defined(CONFIG_HL_SUPPORT)
+
+/**
+ * htt_htc_tx_htt2_service_start() - Start TX HTT2 service
+ *
+ * @pdev: pointer to htt device.
+ * @connect_req: pointer to service connection request information
+ * @connect_resp: pointer to service connection response information
+ *
+ *
+ * Return: None
+ */
+static void
+htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
+			      HTC_SERVICE_CONNECT_REQ *connect_req,
+			      HTC_SERVICE_CONNECT_RESP *connect_resp)
+{
+	A_STATUS status;
+
+	qdf_mem_set(connect_req, 0, sizeof(HTC_SERVICE_CONNECT_REQ));
+	qdf_mem_set(connect_resp, 0, sizeof(HTC_SERVICE_CONNECT_RESP));
+
+	/* The same as HTT service but no RX. */
+	connect_req->EpCallbacks.pContext = pdev;
+	connect_req->EpCallbacks.EpTxComplete = htt_h2t_send_complete;
+	connect_req->EpCallbacks.EpSendFull = htt_h2t_full;
+	connect_req->MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
+	/* Should NOT support credit flow control. */
+	connect_req->ConnectionFlags |=
+				HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+	/* Enable HTC schedule mechanism for TX HTT2 service. */
+	connect_req->ConnectionFlags |= HTC_CONNECT_FLAGS_ENABLE_HTC_SCHEDULE;
+
+	connect_req->ServiceID = HTT_DATA2_MSG_SVC;
+
+	status = htc_connect_service(pdev->htc_pdev, connect_req, connect_resp);
+
+	if (status != A_OK) {
+		pdev->htc_tx_htt2_endpoint = ENDPOINT_UNUSED;
+		pdev->htc_tx_htt2_max_size = 0;
+	} else {
+		pdev->htc_tx_htt2_endpoint = connect_resp->Endpoint;
+		pdev->htc_tx_htt2_max_size = HTC_TX_HTT2_MAX_SIZE;
+	}
+
+	qdf_print("TX HTT %s, ep %d size %d\n",
+		  (status == A_OK ? "ON" : "OFF"),
+		  pdev->htc_tx_htt2_endpoint,
+		  pdev->htc_tx_htt2_max_size);
+}
+#else
+
+static inline void
+htt_htc_tx_htt2_service_start(struct htt_pdev_t *pdev,
+			      HTC_SERVICE_CONNECT_REQ *connect_req,
+			      HTC_SERVICE_CONNECT_RESP *connect_resp)
+{
+	return;
+}
+#endif
+
+/**
+ * htt_htc_credit_flow_disable() - disable flow control for
+ *				   HTT data message service
+ *
+ * @pdev: pointer to htt device.
+ * @connect_req: pointer to service connection request information
+ *
+ * HTC Credit mechanism is disabled based on
+ * default_tx_comp_req as throughput will be lower
+ * if we disable htc credit mechanism with default_tx_comp_req
+ * set since txrx download packet will be limited by ota
+ * completion.
+ *
+ * Return: None
+ */
+static
+void htt_htc_credit_flow_disable(struct htt_pdev_t *pdev,
+				 HTC_SERVICE_CONNECT_REQ *connect_req)
+{
+	if (pdev->osdev->bus_type == QDF_BUS_TYPE_SDIO) {
+		/*
+		 * TODO:Conditional disabling will be removed once firmware
+		 * with reduced tx completion is pushed into release builds.
+		 */
+		if (!pdev->cfg.default_tx_comp_req)
+			connect_req->ConnectionFlags |=
+			HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+	} else {
+		connect_req->ConnectionFlags |=
+			HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+	}
+}
+
+#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
+
+/**
+ * htt_dump_bundle_stats() - dump wlan stats
+ * @pdev: handle to the HTT instance
+ *
+ * Return: None
+ */
+void htt_dump_bundle_stats(htt_pdev_handle pdev)
+{
+	htc_dump_bundle_stats(pdev->htc_pdev);
+}
+
+/**
+ * htt_clear_bundle_stats() - clear wlan stats
+ * @pdev: handle to the HTT instance
+ *
+ * Return: None
+ */
+void htt_clear_bundle_stats(htt_pdev_handle pdev)
+{
+	htc_clear_bundle_stats(pdev->htc_pdev);
+}
+#endif
+
 /**
  * htt_pdev_alloc() - allocate HTT pdev
  * @txrx_pdev: txrx pdev
@@ -179,6 +298,9 @@ htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
 #ifdef ATH_11AC_TXCOMPACT
 	pdev->htt_htc_pkt_misclist = NULL;
 #endif
+
+	/* for efficiency, store a local copy of the is_high_latency flag */
+	pdev->cfg.is_high_latency = ol_cfg_is_high_latency(pdev->ctrl_pdev);
 	pdev->cfg.default_tx_comp_req =
 			!ol_cfg_tx_free_at_download(pdev->ctrl_pdev);
 
@@ -192,6 +314,13 @@ htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
 	qdf_print("ce_classify_enabled %d\n",
 		  pdev->cfg.ce_classify_enabled);
 
+	if (pdev->cfg.is_high_latency) {
+		qdf_atomic_init(&pdev->htt_tx_credit.target_delta);
+		qdf_atomic_init(&pdev->htt_tx_credit.bus_delta);
+		qdf_atomic_add(HTT_MAX_BUS_CREDIT,
+			       &pdev->htt_tx_credit.bus_delta);
+	}
+
 	pdev->targetdef = htc_get_targetdef(htc_pdev);
 #if defined(HELIUMPLUS_PADDR64)
 	/* TODO: OKA: Remove hard-coding */
@@ -241,7 +370,6 @@ int
 htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
 {
 	int i;
-	enum wlan_frm_fmt frm_type;
 	int ret = 0;
 
 	ret = htt_tx_attach(pdev, desc_pool_size);
@@ -264,65 +392,112 @@ htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
 		htt_htc_pkt_free(pdev, &pkt->u.pkt);
 	}
 
-	/*
-	 * LL - download just the initial portion of the frame.
-	 * Download enough to cover the encapsulation headers checked
-	 * by the target's tx classification descriptor engine.
-	 */
-
-	/* account for the 802.3 or 802.11 header */
-	frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
-	if (frm_type == wlan_frm_fmt_native_wifi) {
-		pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
-	} else if (frm_type == wlan_frm_fmt_802_3) {
-		pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
+	if (pdev->cfg.is_high_latency) {
+		/*
+		 * HL - download the whole frame.
+		 * Specify a download length greater than the max MSDU size,
+		 * so the downloads will be limited by the actual frame sizes.
+		 */
+		pdev->download_len = 5000;
+
+		if (ol_cfg_tx_free_at_download(pdev->ctrl_pdev))
+			pdev->tx_send_complete_part2 =
+						ol_tx_download_done_hl_free;
+		else
+			pdev->tx_send_complete_part2 =
+						ol_tx_download_done_hl_retain;
+
+		/*
+		 * CHECK THIS LATER: does the HL HTT version of
+		 * htt_rx_mpdu_desc_list_next
+		 * (which is not currently implemented) present the
+		 * adf_nbuf_data(rx_ind_msg)
+		 * as the abstract rx descriptor?
+		 * If not, the rx_fw_desc_offset initialization
+		 * here will have to be adjusted accordingly.
+		 * NOTE: for HL, because fw rx desc is in ind msg,
+		 * not in rx desc, so the
+		 * offset should be negtive value
+		 */
+		pdev->rx_fw_desc_offset =
+			HTT_ENDIAN_BYTE_IDX_SWAP(
+					HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET
+					- HTT_RX_IND_HL_BYTES);
+
+		htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_hl;
+
+		/* initialize the txrx credit count */
+		ol_tx_target_credit_update(
+				pdev->txrx_pdev, ol_cfg_target_tx_credit(
+					pdev->ctrl_pdev));
 	} else {
-		qdf_print("Unexpected frame type spec: %d\n", frm_type);
-		HTT_ASSERT0(0);
-	}
-	/*
-	 * Account for the optional L2 / ethernet header fields:
-	 * 802.1Q, LLC/SNAP
-	 */
-	pdev->download_len +=
-		HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
-
-	/*
-	 * Account for the portion of the L3 (IP) payload that the
-	 * target needs for its tx classification.
-	 */
-	pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
-
-	/*
-	 * Account for the HTT tx descriptor, including the
-	 * HTC header + alignment padding.
-	 */
-	pdev->download_len += sizeof(struct htt_host_tx_desc_t);
-
-	/*
-	 * The TXCOMPACT htt_tx_sched function uses pdev->download_len
-	 * to apply for all requeued tx frames.  Thus,
-	 * pdev->download_len has to be the largest download length of
-	 * any tx frame that will be downloaded.
-	 * This maximum download length is for management tx frames,
-	 * which have an 802.11 header.
-	 */
+		enum wlan_frm_fmt frm_type;
+
+		/*
+		 * LL - download just the initial portion of the frame.
+		 * Download enough to cover the encapsulation headers checked
+		 * by the target's tx classification descriptor engine.
+		 *
+		 * For LL, the FW rx desc directly referenced at its location
+		 * inside the rx indication message.
+		 */
+
+		/* account for the 802.3 or 802.11 header */
+		frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
+
+		if (frm_type == wlan_frm_fmt_native_wifi) {
+			pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
+		} else if (frm_type == wlan_frm_fmt_802_3) {
+			pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
+		} else {
+			qdf_print("Unexpected frame type spec: %d\n", frm_type);
+			HTT_ASSERT0(0);
+		}
+
+		/*
+		 * Account for the optional L2 / ethernet header fields:
+		 * 802.1Q, LLC/SNAP
+		 */
+		pdev->download_len +=
+			HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
+
+		/*
+		 * Account for the portion of the L3 (IP) payload that the
+		 * target needs for its tx classification.
+		 */
+		pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
+
+		/*
+		 * Account for the HTT tx descriptor, including the
+		 * HTC header + alignment padding.
+		 */
+		pdev->download_len += sizeof(struct htt_host_tx_desc_t);
+
+		/*
+		 * The TXCOMPACT htt_tx_sched function uses pdev->download_len
+		 * to apply for all requeued tx frames.  Thus,
+		 * pdev->download_len has to be the largest download length of
+		 * any tx frame that will be downloaded.
+		 * This maximum download length is for management tx frames,
+		 * which have an 802.11 header.
+		 */
 #ifdef ATH_11AC_TXCOMPACT
-	pdev->download_len = sizeof(struct htt_host_tx_desc_t)
-		+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
-		+ HTT_TX_HDR_SIZE_802_1Q
-		+ HTT_TX_HDR_SIZE_LLC_SNAP
-		+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
+		pdev->download_len = sizeof(struct htt_host_tx_desc_t)
+			+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+			+ HTT_TX_HDR_SIZE_802_1Q
+			+ HTT_TX_HDR_SIZE_LLC_SNAP
+			+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
 #endif
-	pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
+		pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
 
-	/*
-	 * For LL, the FW rx desc is alongside the HW rx desc fields in
-	 * the htt_host_rx_desc_base struct/.
-	 */
-	pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
+		/*
+		 * For LL, the FW rx desc is alongside the HW rx desc fields in
+		 * the htt_host_rx_desc_base struct/.
+		 */
+		pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
 
-	htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
+		htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
+	}
 
 	return 0;
 
@@ -459,9 +634,7 @@ int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id)
 	connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
 
 	/* disable flow control for HTT data message service */
-#ifndef HIF_SDIO
-	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
-#endif
+	htt_htc_credit_flow_disable(pdev, &connect);
 
 	/* connect to control service */
 	connect.service_id = service_id;
@@ -473,6 +646,9 @@ int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id)
 
 	htt_update_endpoint(pdev, service_id, response.Endpoint);
 
+	/* Start TX HTT2 service if the target support it. */
+	htt_htc_tx_htt2_service_start(pdev, &connect, &response);
+
 	return 0;               /* success */
 }
 

+ 195 - 3
core/dp/htt/htt_h2t.c

@@ -86,6 +86,18 @@ void htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
 		send_complete_part2(htt_pkt->pdev_ctxt, htc_pkt->Status, netbuf,
 				    htt_pkt->msdu_id);
 	}
+
+	if (pdev->cfg.is_high_latency && !pdev->cfg.default_tx_comp_req) {
+		int32_t credit_delta;
+
+		qdf_atomic_add(1, &pdev->htt_tx_credit.bus_delta);
+		credit_delta = htt_tx_credit_update(pdev);
+
+		if (credit_delta)
+			ol_tx_credit_completion_handler(pdev->txrx_pdev,
+							credit_delta);
+	}
+
 	/* free the htt_htc_pkt / HTC_PACKET object */
 	htt_htc_pkt_free(pdev, htt_pkt);
 }
@@ -194,11 +206,21 @@ A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
 	struct htt_htc_pkt *pkt;
 	qdf_nbuf_t msg;
 	uint32_t *msg_word;
+	uint32_t msg_size;
+	uint32_t max_tx_group;
 
 	pkt = htt_htc_pkt_alloc(pdev);
 	if (!pkt)
 		return A_ERROR; /* failure */
 
+	max_tx_group = ol_tx_get_max_tx_groups_supported(pdev->txrx_pdev);
+
+	if (max_tx_group)
+		msg_size = HTT_VER_REQ_BYTES +
+			sizeof(struct htt_option_tlv_mac_tx_queue_groups_t);
+	else
+		msg_size = HTT_VER_REQ_BYTES;
+
 	/* show that this is not a tx frame download
 	 * (not required, but helpful)
 	 */
@@ -206,7 +228,7 @@ A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
 	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
 
 	/* reserve room for the HTC header */
-	msg = qdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
+	msg = qdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(msg_size),
 			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
 			     true);
 	if (!msg) {
@@ -220,7 +242,7 @@ A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
 	 * separately during the below call to qdf_nbuf_push_head.
 	 * The contribution from the HTC header is added separately inside HTC.
 	 */
-	qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES);
+	qdf_nbuf_put_tail(msg, msg_size);
 
 	/* fill in the message contents */
 	msg_word = (uint32_t *) qdf_nbuf_data(msg);
@@ -231,6 +253,18 @@ A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
 	*msg_word = 0;
 	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
 
+	if (max_tx_group) {
+		*(msg_word + 1) = 0;
+
+		/* Fill Group Info */
+		HTT_OPTION_TLV_TAG_SET(*(msg_word+1),
+				       HTT_OPTION_TLV_TAG_MAX_TX_QUEUE_GROUPS);
+		HTT_OPTION_TLV_LENGTH_SET(*(msg_word+1),
+			(sizeof(struct htt_option_tlv_mac_tx_queue_groups_t)/
+			 sizeof(uint32_t)));
+		HTT_OPTION_TLV_VALUE0_SET(*(msg_word+1), max_tx_group);
+	}
+
 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
 			       htt_h2t_send_complete_free_netbuf,
 			       qdf_nbuf_data(msg), qdf_nbuf_len(msg),
@@ -246,10 +280,14 @@ A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
 	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
 #endif
 
+	if ((pdev->cfg.is_high_latency) &&
+	    (!pdev->cfg.default_tx_comp_req))
+		ol_tx_target_credit_update(pdev->txrx_pdev, -1);
+
 	return A_OK;
 }
 
-A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev)
+QDF_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev)
 {
 	struct htt_htc_pkt *pkt;
 	qdf_nbuf_t msg;
@@ -445,6 +483,148 @@ A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev)
 	return A_OK;
 }
 
+QDF_STATUS
+htt_h2t_rx_ring_cfg_msg_hl(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt *pkt;
+	qdf_nbuf_t msg;
+	u_int32_t *msg_word;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_ERROR; /* failure */
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful) */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+	msg = qdf_nbuf_alloc(
+		pdev->osdev,
+		HTT_MSG_BUF_SIZE(HTT_RX_RING_CFG_BYTES(1)),
+		/* reserve room for the HTC header */
+		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_ERROR; /* failure */
+	}
+	/*
+	 * Set the length of the message.
+	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+	 * separately during the below call to adf_nbuf_push_head.
+	 * The contribution from the HTC header is added separately inside HTC.
+	 */
+	qdf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1));
+
+	/* fill in the message contents */
+	msg_word = (u_int32_t *)qdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_CFG);
+	HTT_RX_RING_CFG_NUM_RINGS_SET(*msg_word, 1);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_SET(
+			*msg_word, pdev->rx_ring.alloc_idx.paddr);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_BASE_PADDR_SET(*msg_word, pdev->rx_ring.base_paddr);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_LEN_SET(*msg_word, pdev->rx_ring.size);
+	HTT_RX_RING_CFG_BUF_SZ_SET(*msg_word, HTT_RX_BUF_SIZE);
+
+	/* FIX THIS: if the FW creates a complete translated rx descriptor,
+	 * then the MAC DMA of the HW rx descriptor should be disabled. */
+	msg_word++;
+	*msg_word = 0;
+
+	HTT_RX_RING_CFG_ENABLED_802_11_HDR_SET(*msg_word, 0);
+	HTT_RX_RING_CFG_ENABLED_MSDU_PAYLD_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_PPDU_START_SET(*msg_word, 0);
+	HTT_RX_RING_CFG_ENABLED_PPDU_END_SET(*msg_word, 0);
+	HTT_RX_RING_CFG_ENABLED_MPDU_START_SET(*msg_word, 0);
+	HTT_RX_RING_CFG_ENABLED_MPDU_END_SET(*msg_word,   0);
+	HTT_RX_RING_CFG_ENABLED_MSDU_START_SET(*msg_word, 0);
+	HTT_RX_RING_CFG_ENABLED_MSDU_END_SET(*msg_word,   0);
+	HTT_RX_RING_CFG_ENABLED_RX_ATTN_SET(*msg_word,    0);
+	/* always present? */
+	HTT_RX_RING_CFG_ENABLED_FRAG_INFO_SET(*msg_word,  0);
+	HTT_RX_RING_CFG_ENABLED_UCAST_SET(*msg_word, 1);
+	HTT_RX_RING_CFG_ENABLED_MCAST_SET(*msg_word, 1);
+	/* Must change to dynamic enable at run time
+	 * rather than at compile time
+	 */
+	HTT_RX_RING_CFG_ENABLED_CTRL_SET(*msg_word, 0);
+	HTT_RX_RING_CFG_ENABLED_MGMT_SET(*msg_word, 0);
+	HTT_RX_RING_CFG_ENABLED_NULL_SET(*msg_word, 0);
+	HTT_RX_RING_CFG_ENABLED_PHY_SET(*msg_word, 0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_802_11_HDR_SET(*msg_word,
+					      0);
+	HTT_RX_RING_CFG_OFFSET_MSDU_PAYLD_SET(*msg_word,
+					      0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_PPDU_START_SET(*msg_word,
+					      0);
+	HTT_RX_RING_CFG_OFFSET_PPDU_END_SET(*msg_word,
+					    0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_MPDU_START_SET(*msg_word,
+					      0);
+	HTT_RX_RING_CFG_OFFSET_MPDU_END_SET(*msg_word,
+					    0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_MSDU_START_SET(*msg_word,
+					      0);
+	HTT_RX_RING_CFG_OFFSET_MSDU_END_SET(*msg_word,
+					    0);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_RX_RING_CFG_OFFSET_RX_ATTN_SET(*msg_word,
+					   0);
+	HTT_RX_RING_CFG_OFFSET_FRAG_INFO_SET(*msg_word,
+					     0);
+
+	SET_HTC_PACKET_INFO_TX(
+		&pkt->htc_pkt,
+		htt_h2t_send_complete_free_netbuf,
+		qdf_nbuf_data(msg),
+		qdf_nbuf_len(msg),
+		pdev->htc_tx_endpoint,
+		1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+	if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+		htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+	if ((pdev->cfg.is_high_latency) &&
+	    (!pdev->cfg.default_tx_comp_req))
+		ol_tx_target_credit_update(pdev->txrx_pdev, -1);
+
+	return QDF_STATUS_SUCCESS;
+}
+
 int
 htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
 		      uint32_t stats_type_upload_mask,
@@ -533,6 +713,10 @@ htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
 	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
 #endif
 
+	if ((pdev->cfg.is_high_latency) &&
+	    (!pdev->cfg.default_tx_comp_req))
+		ol_tx_target_credit_update(pdev->txrx_pdev, -1);
+
 	return 0;
 }
 
@@ -589,6 +773,10 @@ A_STATUS htt_h2t_sync_msg(struct htt_pdev_t *pdev, uint8_t sync_cnt)
 	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
 #endif
 
+	if ((pdev->cfg.is_high_latency) &&
+	    (!pdev->cfg.default_tx_comp_req))
+		ol_tx_target_credit_update(pdev->txrx_pdev, -1);
+
 	return A_OK;
 }
 
@@ -656,6 +844,10 @@ htt_h2t_aggr_cfg_msg(struct htt_pdev_t *pdev,
 	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
 #endif
 
+	if ((pdev->cfg.is_high_latency) &&
+	    (!pdev->cfg.default_tx_comp_req))
+		ol_tx_target_credit_update(pdev->txrx_pdev, -1);
+
 	return 0;
 }
 

+ 62 - 2
core/dp/htt/htt_internal.h

@@ -420,7 +420,17 @@ static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
 
 #endif
 
+#ifdef CONFIG_HL_SUPPORT
+
+static inline void htt_tx_resume_handler(void *context)
+{
+	return;
+}
+#else
+
 void htt_tx_resume_handler(void *);
+#endif
+
 #ifdef ATH_11AC_TXCOMPACT
 #define HTT_TX_SCHED htt_tx_sched
 #else
@@ -433,7 +443,16 @@ void htt_tx_detach(struct htt_pdev_t *pdev);
 
 int htt_rx_attach(struct htt_pdev_t *pdev);
 
+#if defined(CONFIG_HL_SUPPORT)
+
+static inline void htt_rx_detach(struct htt_pdev_t *pdev)
+{
+	return;
+}
+#else
+
 void htt_rx_detach(struct htt_pdev_t *pdev);
+#endif
 
 int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id);
 
@@ -458,8 +477,11 @@ A_STATUS
 htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev);
 #endif /* defined(HELIUMPLUS_PADDR64) */
 
-extern A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev);
-extern A_STATUS (*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
+extern QDF_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev);
+
+extern QDF_STATUS htt_h2t_rx_ring_cfg_msg_hl(struct htt_pdev_t *pdev);
+
+extern QDF_STATUS (*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
 
 HTC_SEND_FULL_ACTION htt_h2t_full(void *context, HTC_PACKET *pkt);
 
@@ -538,6 +560,44 @@ static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
 }
 #endif /* IPA_OFFLOAD */
 
+/* Maximum Outstanding Bus Download */
+#define HTT_MAX_BUS_CREDIT 33
+
+#ifdef CONFIG_HL_SUPPORT
+
+/**
+ * htt_tx_credit_update() - check for diff in bus delta and target delta
+ * @pdev: pointer to htt device.
+ *
+ * Return: min of bus delta and target delta
+ */
+int
+htt_tx_credit_update(struct htt_pdev_t *pdev);
+#else
+
+static inline int
+htt_tx_credit_update(struct htt_pdev_t *pdev)
+{
+	return 0;
+}
+#endif
+
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+#define HTT_TX_GROUP_INDEX_OFFSET \
+(sizeof(struct htt_txq_group) / sizeof(u_int32_t))
+
+void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word);
+#else
+
+static inline
+void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word)
+{
+	return;
+}
+#endif
+
 #ifdef DEBUG_RX_RING_BUFFER
 /**
  * htt_rx_dbg_rxbuf_init() - init debug rx buff list

+ 520 - 9
core/dp/htt/htt_rx.c

@@ -55,7 +55,7 @@
 #include <cds_ieee80211_defines.h>  /* ieee80211_rx_status */
 #include <cds_utils.h>
 #include <cds_concurrency.h>
-
+#include "ol_txrx_types.h"
 #ifdef DEBUG_DMA_DONE
 #include <asm/barrier.h>
 #include <wma_api.h>
@@ -159,6 +159,169 @@ void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
 	pdev->rx_ring.hash_table = NULL;
 }
 
+/*
+ * This function is used both below within this file (which the compiler
+ * will hopefully inline), and out-line from other files via the
+ * htt_rx_msdu_first_msdu_flag function pointer.
+ */
+
+static inline bool
+htt_rx_msdu_first_msdu_flag_hl(htt_pdev_handle pdev, void *msdu_desc)
+{
+	return ((u_int8_t *)msdu_desc - sizeof(struct hl_htt_rx_ind_base))
+		[HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_FLAG_OFFSET)] &
+		HTT_RX_IND_HL_FLAG_FIRST_MSDU ? true : false;
+}
+
+u_int16_t
+htt_rx_msdu_rx_desc_size_hl(
+	htt_pdev_handle pdev,
+	void *msdu_desc
+		)
+{
+	return ((u_int8_t *)(msdu_desc) - HTT_RX_IND_HL_BYTES)
+		[HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
+}
+
+/**
+ * htt_rx_mpdu_desc_retry_hl() - Returns the retry bit from the Rx descriptor
+ *                               for the High Latency driver
+ * @pdev: Handle (pointer) to HTT pdev.
+ * @mpdu_desc: Void pointer to the Rx descriptor for MPDU
+ *             before the beginning of the payload.
+ *
+ *  This function returns the retry bit of the 802.11 header for the
+ *  provided rx MPDU descriptor. For the high latency driver, this function
+ *  pretends as if the retry bit is never set so that the mcast duplicate
+ *  detection never fails.
+ *
+ * Return:        boolean -- false always for HL
+ */
+static inline bool
+htt_rx_mpdu_desc_retry_hl(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	return false;
+}
+
+#ifdef CONFIG_HL_SUPPORT
+u_int16_t
+htt_rx_mpdu_desc_seq_num_hl(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	if (pdev->rx_desc_size_hl) {
+		return pdev->cur_seq_num_hl =
+			(u_int16_t)(HTT_WORD_GET(*(u_int32_t *)mpdu_desc,
+						HTT_HL_RX_DESC_MPDU_SEQ_NUM));
+	} else {
+		return (u_int16_t)(pdev->cur_seq_num_hl);
+	}
+}
+#endif
+
+void
+htt_rx_mpdu_desc_pn_hl(
+	htt_pdev_handle pdev,
+	void *mpdu_desc,
+	union htt_rx_pn_t *pn,
+	int pn_len_bits)
+{
+	if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true) {
+		/* Fix Me: only for little endian */
+		struct hl_htt_rx_desc_base *rx_desc =
+			(struct hl_htt_rx_desc_base *)mpdu_desc;
+		u_int32_t *word_ptr = (u_int32_t *)pn->pn128;
+
+		/* TODO: for Host of big endian */
+		switch (pn_len_bits) {
+		case 128:
+			/* bits 128:64 */
+			*(word_ptr + 3) = rx_desc->pn_127_96;
+			/* bits 63:0 */
+			*(word_ptr + 2) = rx_desc->pn_95_64;
+		case 48:
+			/* bits 48:0
+			 * copy 64 bits
+			 */
+			*(word_ptr + 1) = rx_desc->u0.pn_63_32;
+		case 24:
+			/* bits 23:0
+			 * copy 32 bits
+			 */
+			*(word_ptr + 0) = rx_desc->pn_31_0;
+			break;
+		default:
+			qdf_print(
+				"Error: invalid length spec (%d bits) for PN\n",
+				pn_len_bits);
+			qdf_assert(0);
+			break;
+		};
+	} else {
+		/* not first msdu, no pn info */
+		qdf_print(
+			"Error: get pn from a not-first msdu.\n");
+		qdf_assert(0);
+	}
+}
+
+/**
+ * htt_rx_mpdu_desc_tid_hl() - Returns the TID value from the Rx descriptor
+ *                             for High Latency driver
+ * @pdev:                        Handle (pointer) to HTT pdev.
+ * @mpdu_desc:                   Void pointer to the Rx descriptor for the MPDU
+ *                               before the beginning of the payload.
+ *
+ * This function returns the TID set in the 802.11 QoS Control for the MPDU
+ * in the packet header, by looking at the mpdu_start of the Rx descriptor.
+ * Rx descriptor gets a copy of the TID from the MAC.
+ * For the HL driver, this is currently uimplemented and always returns
+ * an invalid tid. It is the responsibility of the caller to make
+ * sure that return value is checked for valid range.
+ *
+ * Return:        Invalid TID value (0xff) for HL driver.
+ */
+static inline uint8_t
+htt_rx_mpdu_desc_tid_hl(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	return 0xff;  /* Invalid TID */
+}
+
+static inline bool
+htt_rx_msdu_desc_completes_mpdu_hl(htt_pdev_handle pdev, void *msdu_desc)
+{
+	return (
+		((u_int8_t *)(msdu_desc) - sizeof(struct hl_htt_rx_ind_base))
+		[HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_FLAG_OFFSET)]
+		& HTT_RX_IND_HL_FLAG_LAST_MSDU)
+		? true : false;
+}
+
+static inline int
+htt_rx_msdu_has_wlan_mcast_flag_hl(htt_pdev_handle pdev, void *msdu_desc)
+{
+	/* currently, only first msdu has hl rx_desc */
+	return htt_rx_msdu_first_msdu_flag_hl(pdev, msdu_desc) == true;
+}
+
+static inline bool
+htt_rx_msdu_is_wlan_mcast_hl(htt_pdev_handle pdev, void *msdu_desc)
+{
+	struct hl_htt_rx_desc_base *rx_desc =
+		(struct hl_htt_rx_desc_base *)msdu_desc;
+
+	return
+		HTT_WORD_GET(*(u_int32_t *)rx_desc, HTT_HL_RX_DESC_MCAST_BCAST);
+}
+
+static inline int
+htt_rx_msdu_is_frag_hl(htt_pdev_handle pdev, void *msdu_desc)
+{
+	struct hl_htt_rx_desc_base *rx_desc =
+		(struct hl_htt_rx_desc_base *)msdu_desc;
+
+	return
+		HTT_WORD_GET(*(u_int32_t *)rx_desc, HTT_HL_RX_DESC_MCAST_BCAST);
+}
+
 static bool
 htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
 {
@@ -170,6 +333,8 @@ htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
 		 RX_MSDU_END_4_FIRST_MSDU_LSB);
 }
 
+#ifndef CONFIG_HL_SUPPORT
+
 static int htt_rx_ring_size(struct htt_pdev_t *pdev)
 {
 	int size;
@@ -231,6 +396,7 @@ static void htt_rx_ring_refill_retry(void *arg)
 	htt_pdev_handle pdev = (htt_pdev_handle) arg;
 	htt_rx_msdu_buff_replenish(pdev);
 }
+#endif
 
 void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
 {
@@ -351,6 +517,8 @@ unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
 		pdev->rx_ring.size_mask;
 }
 
+#ifndef CONFIG_HL_SUPPORT
+
 void htt_rx_detach(struct htt_pdev_t *pdev)
 {
 	qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
@@ -404,6 +572,7 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
 				   qdf_get_dma_mem_context((&pdev->rx_ring.buf),
 							   memctx));
 }
+#endif
 
 /*--- rx descriptor field access functions ----------------------------------*/
 /*
@@ -719,8 +888,74 @@ htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
 #undef MAX_IP_VER
 #undef MAX_PROTO_VAL
 }
+
+#if defined(CONFIG_HL_SUPPORT)
+
+static void
+htt_set_checksum_result_hl(qdf_nbuf_t msdu,
+			   struct htt_host_rx_desc_base *rx_desc)
+{
+	u_int8_t flag = ((u_int8_t *)rx_desc -
+				sizeof(struct hl_htt_rx_ind_base))[
+					HTT_ENDIAN_BYTE_IDX_SWAP(
+						HTT_RX_IND_HL_FLAG_OFFSET)];
+
+	int is_ipv6 = flag & HTT_RX_IND_HL_FLAG_IPV6 ? 1 : 0;
+	int is_tcp = flag & HTT_RX_IND_HL_FLAG_TCP ? 1 : 0;
+	int is_udp = flag & HTT_RX_IND_HL_FLAG_UDP ? 1 : 0;
+
+	qdf_nbuf_rx_cksum_t cksum = {
+		QDF_NBUF_RX_CKSUM_NONE,
+		QDF_NBUF_RX_CKSUM_NONE,
+		0
+	};
+
+	switch ((is_udp << 2) | (is_tcp << 1) | (is_ipv6 << 0)) {
+	case 0x4:
+		cksum.l4_type = QDF_NBUF_RX_CKSUM_UDP;
+		break;
+	case 0x2:
+		cksum.l4_type = QDF_NBUF_RX_CKSUM_TCP;
+		break;
+	case 0x5:
+		cksum.l4_type = QDF_NBUF_RX_CKSUM_UDPIPV6;
+		break;
+	case 0x3:
+		cksum.l4_type = QDF_NBUF_RX_CKSUM_TCPIPV6;
+		break;
+	default:
+		cksum.l4_type = QDF_NBUF_RX_CKSUM_NONE;
+		break;
+	}
+	if (cksum.l4_type != (qdf_nbuf_l4_rx_cksum_type_t)
+				QDF_NBUF_RX_CKSUM_NONE) {
+		cksum.l4_result = flag & HTT_RX_IND_HL_FLAG_C4_FAILED ?
+			QDF_NBUF_RX_CKSUM_NONE :
+				QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
+	}
+	qdf_nbuf_set_rx_cksum(msdu, &cksum);
+}
+#endif
+
 #else
-#define htt_set_checksum_result_ll(pdev, msdu, rx_desc) /* no-op */
+
+static inline
+void htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
+			   struct htt_host_rx_desc_base *rx_desc)
+{
+	return;
+}
+
+#if defined(CONFIG_HL_SUPPORT)
+
+static inline
+void htt_set_checksum_result_hl(qdf_nbuf_t msdu,
+			   struct htt_host_rx_desc_base *rx_desc)
+{
+	return;
+}
+#endif
+
 #endif
 
 #ifdef DEBUG_DMA_DONE
@@ -1074,6 +1309,70 @@ htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
 	return msdu_chaining;
 }
 
+#if defined(CONFIG_HL_SUPPORT)
+
+static int
+htt_rx_amsdu_pop_hl(
+	htt_pdev_handle pdev,
+	qdf_nbuf_t rx_ind_msg,
+	qdf_nbuf_t *head_msdu,
+	qdf_nbuf_t *tail_msdu)
+{
+	pdev->rx_desc_size_hl =
+		(qdf_nbuf_data(rx_ind_msg))
+		[HTT_ENDIAN_BYTE_IDX_SWAP(
+				HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
+
+	/* point to the rx desc */
+	qdf_nbuf_pull_head(rx_ind_msg,
+			   sizeof(struct hl_htt_rx_ind_base));
+	*head_msdu = *tail_msdu = rx_ind_msg;
+
+	htt_set_checksum_result_hl(rx_ind_msg,
+				   (struct htt_host_rx_desc_base *)
+				   (qdf_nbuf_data(rx_ind_msg)));
+
+	qdf_nbuf_set_next(*tail_msdu, NULL);
+	return 0;
+}
+
+static int
+htt_rx_frag_pop_hl(
+	htt_pdev_handle pdev,
+	qdf_nbuf_t frag_msg,
+	qdf_nbuf_t *head_msdu,
+	qdf_nbuf_t *tail_msdu)
+{
+	qdf_nbuf_pull_head(frag_msg, HTT_RX_FRAG_IND_BYTES);
+	pdev->rx_desc_size_hl =
+		(qdf_nbuf_data(frag_msg))
+		[HTT_ENDIAN_BYTE_IDX_SWAP(
+				HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
+
+	/* point to the rx desc */
+	qdf_nbuf_pull_head(frag_msg,
+			   sizeof(struct hl_htt_rx_ind_base));
+	*head_msdu = *tail_msdu = frag_msg;
+
+	qdf_nbuf_set_next(*tail_msdu, NULL);
+	return 0;
+}
+
+static inline int
+htt_rx_offload_msdu_pop_hl(htt_pdev_handle pdev,
+			   qdf_nbuf_t offload_deliver_msg,
+			   int *vdev_id,
+			   int *peer_id,
+			   int *tid,
+			   u_int8_t *fw_desc,
+			   qdf_nbuf_t *head_buf,
+			   qdf_nbuf_t *tail_buf)
+{
+	return 0;
+}
+
+#endif
+
 int
 htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
 			   qdf_nbuf_t offload_deliver_msg,
@@ -2260,6 +2559,152 @@ void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
 	return (void *)htt_rx_desc(netbuf);
 }
 
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * htt_rx_mpdu_desc_list_next_hl() - provides an abstract way to obtain
+ *				     the next MPDU descriptor
+ * @pdev: the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ *
+ * for HL, the returned value is not mpdu_desc,
+ * it's translated hl_rx_desc just after the hl_ind_msg
+ * for HL AMSDU, we can't point to payload now, because
+ * hl rx desc is not fixed, we can't retrive the desc
+ * by minus rx_desc_size when release. keep point to hl rx desc
+ * now
+ *
+ * Return: next abstract rx descriptor from the series of MPDUs
+ *		   referenced by an rx ind msg
+ */
+static inline void *
+htt_rx_mpdu_desc_list_next_hl(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
+{
+	void *mpdu_desc = (void *)qdf_nbuf_data(rx_ind_msg);
+	return mpdu_desc;
+}
+
+/**
+ * htt_rx_msdu_desc_retrieve_hl() - Retrieve a previously-stored rx descriptor
+ *				    from a MSDU buffer
+ * @pdev: the HTT instance the rx data was received on
+ * @msdu - the buffer containing the MSDU payload
+ *
+ * currently for HL AMSDU, we don't point to payload.
+ * we shift to payload in ol_rx_deliver later
+ *
+ * Return: the corresponding abstract rx MSDU descriptor
+ */
+static inline void *
+htt_rx_msdu_desc_retrieve_hl(htt_pdev_handle pdev, qdf_nbuf_t msdu)
+{
+	return qdf_nbuf_data(msdu);
+}
+
+static
+bool htt_rx_mpdu_is_encrypted_hl(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true) {
+		/* Fix Me: only for little endian */
+		struct hl_htt_rx_desc_base *rx_desc =
+			(struct hl_htt_rx_desc_base *)mpdu_desc;
+
+		return HTT_WORD_GET(*(u_int32_t *)rx_desc,
+					HTT_HL_RX_DESC_MPDU_ENC);
+	} else {
+		/* not first msdu, no encrypt info for hl */
+		qdf_print(
+			"Error: get encrypted from a not-first msdu.\n");
+		qdf_assert(0);
+		return false;
+	}
+}
+
+static inline bool
+htt_rx_msdu_chan_info_present_hl(htt_pdev_handle pdev, void *mpdu_desc)
+{
+	if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true &&
+	    HTT_WORD_GET(*(u_int32_t *)mpdu_desc,
+			 HTT_HL_RX_DESC_CHAN_INFO_PRESENT))
+		return true;
+
+	return false;
+}
+
+static bool
+htt_rx_msdu_center_freq_hl(htt_pdev_handle pdev,
+			   struct ol_txrx_peer_t *peer,
+			   void *mpdu_desc,
+			   uint16_t *primary_chan_center_freq_mhz,
+			   uint16_t *contig_chan1_center_freq_mhz,
+			   uint16_t *contig_chan2_center_freq_mhz,
+			   uint8_t *phy_mode)
+{
+	int pn_len, index;
+	uint32_t *chan_info;
+
+	index = htt_rx_msdu_is_wlan_mcast(pdev, mpdu_desc) ?
+		txrx_sec_mcast : txrx_sec_ucast;
+
+	pn_len = (peer ?
+			pdev->txrx_pdev->rx_pn[peer->security[index].sec_type].
+								len : 0);
+	chan_info = (uint32_t *)((uint8_t *)mpdu_desc +
+			HTT_HL_RX_DESC_PN_OFFSET + pn_len);
+
+	if (htt_rx_msdu_chan_info_present_hl(pdev, mpdu_desc)) {
+		if (primary_chan_center_freq_mhz)
+			*primary_chan_center_freq_mhz =
+				HTT_WORD_GET(
+					*chan_info,
+					HTT_CHAN_INFO_PRIMARY_CHAN_CENTER_FREQ);
+		if (contig_chan1_center_freq_mhz)
+			*contig_chan1_center_freq_mhz =
+				HTT_WORD_GET(
+					*chan_info,
+					HTT_CHAN_INFO_CONTIG_CHAN1_CENTER_FREQ);
+		chan_info++;
+		if (contig_chan2_center_freq_mhz)
+			*contig_chan2_center_freq_mhz =
+				HTT_WORD_GET(
+					*chan_info,
+					HTT_CHAN_INFO_CONTIG_CHAN2_CENTER_FREQ);
+		if (phy_mode)
+			*phy_mode =
+				HTT_WORD_GET(*chan_info,
+					     HTT_CHAN_INFO_PHY_MODE);
+		return true;
+	}
+
+	if (primary_chan_center_freq_mhz)
+		*primary_chan_center_freq_mhz = 0;
+	if (contig_chan1_center_freq_mhz)
+		*contig_chan1_center_freq_mhz = 0;
+	if (contig_chan2_center_freq_mhz)
+		*contig_chan2_center_freq_mhz = 0;
+	if (phy_mode)
+		*phy_mode = 0;
+	return false;
+}
+
+static bool
+htt_rx_msdu_desc_key_id_hl(htt_pdev_handle htt_pdev,
+			   void *mpdu_desc, u_int8_t *key_id)
+{
+	if (htt_rx_msdu_first_msdu_flag_hl(htt_pdev, mpdu_desc) == true) {
+		/* Fix Me: only for little endian */
+		struct hl_htt_rx_desc_base *rx_desc =
+			(struct hl_htt_rx_desc_base *)mpdu_desc;
+
+		*key_id = rx_desc->key_id_oct;
+		return true;
+	}
+
+	return false;
+}
+
+#endif
+
 void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu)
 {
 	return htt_rx_desc(msdu);
@@ -2329,16 +2774,36 @@ void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
 	 */
 }
 
+#if defined(CONFIG_HL_SUPPORT)
+
+/**
+ * htt_rx_fill_ring_count() - replenish rx msdu buffer
+ * @pdev: Handle (pointer) to HTT pdev.
+ *
+ * This funciton will replenish the rx buffer to the max number
+ * that can be kept in the ring
+ *
+ * Return: None
+ */
+static inline void htt_rx_fill_ring_count(htt_pdev_handle pdev)
+{
+	return;
+}
+#else
+
+static void htt_rx_fill_ring_count(htt_pdev_handle pdev)
+{
+	int num_to_fill;
+	num_to_fill = pdev->rx_ring.fill_level - pdev->rx_ring.fill_cnt;
+	htt_rx_ring_fill_n(pdev, num_to_fill /* okay if <= 0 */);
+}
+#endif
+
 void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev)
 {
-	if (qdf_atomic_dec_and_test(&pdev->rx_ring.refill_ref_cnt)) {
-		int num_to_fill;
-		num_to_fill = pdev->rx_ring.fill_level -
-			pdev->rx_ring.fill_cnt;
+	if (qdf_atomic_dec_and_test(&pdev->rx_ring.refill_ref_cnt))
+		htt_rx_fill_ring_count(pdev);
 
-		htt_rx_ring_fill_n(pdev,
-				   num_to_fill /* okay if <= 0 */);
-	}
 	qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
 }
 
@@ -2614,6 +3079,51 @@ void htt_rx_hash_dump_table(struct htt_pdev_t *pdev)
 /* move the function to the end of file
  * to omit ll/hl pre-declaration
  */
+
+#if defined(CONFIG_HL_SUPPORT)
+
+int htt_rx_attach(struct htt_pdev_t *pdev)
+{
+	pdev->rx_ring.size = HTT_RX_RING_SIZE_MIN;
+	HTT_ASSERT2(IS_PWR2(pdev->rx_ring.size));
+	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
+	/* host can force ring base address if it wish to do so */
+	pdev->rx_ring.base_paddr = 0;
+	htt_rx_amsdu_pop = htt_rx_amsdu_pop_hl;
+	htt_rx_frag_pop = htt_rx_frag_pop_hl;
+	htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_hl;
+	htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_hl;
+	htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_hl;
+	htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_hl;
+	htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_hl;
+	htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_hl;
+	htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_hl;
+	htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_hl;
+	htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_hl;
+	htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_hl;
+	htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_hl;
+	htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_hl;
+	htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_hl;
+	htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_hl;
+	htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_hl;
+	htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_hl;
+
+	/*
+	 * HL case, the rx descriptor can be different sizes for
+	 * different sub-types of RX_IND messages, e.g. for the
+	 * initial vs. interior vs. final MSDUs within a PPDU.
+	 * The size of each RX_IND message's rx desc is read from
+	 * a field within the RX_IND message itself.
+	 * In the meantime, until the rx_desc_size_hl variable is
+	 * set to its real value based on the RX_IND message,
+	 * initialize it to a reasonable value (zero).
+	 */
+	pdev->rx_desc_size_hl = 0;
+	return 0;	/* success */
+}
+
+#else
+
 int htt_rx_attach(struct htt_pdev_t *pdev)
 {
 	qdf_dma_addr_t paddr;
@@ -2763,6 +3273,7 @@ fail2:
 fail1:
 	return 1;               /* failure */
 }
+#endif
 
 #ifdef IPA_OFFLOAD
 #ifdef QCA_WIFI_3_0

+ 102 - 26
core/dp/htt/htt_t2h.c

@@ -58,34 +58,24 @@
 #define DEBUG_CREDIT 0
 #endif
 
-static uint8_t *htt_t2h_mac_addr_deswizzle(uint8_t *tgt_mac_addr,
-					   uint8_t *buffer)
+#if defined(CONFIG_HL_SUPPORT)
+
+
+
+/**
+ * htt_rx_frag_set_last_msdu() - set last msdu bit in rx descriptor
+ *				 for recieved frames
+ * @pdev: Handle (pointer) to HTT pdev.
+ * @msg: htt recieved msg
+ *
+ * Return: None
+ */
+static inline
+void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
 {
-#ifdef BIG_ENDIAN_HOST
-	/*
-	 * The host endianness is opposite of the target endianness.
-	 * To make uint32_t elements come out correctly, the target->host
-	 * upload has swizzled the bytes in each uint32_t element of the
-	 * message.
-	 * For byte-array message fields like the MAC address, this
-	 * upload swizzling puts the bytes in the wrong order, and needs
-	 * to be undone.
-	 */
-	buffer[0] = tgt_mac_addr[3];
-	buffer[1] = tgt_mac_addr[2];
-	buffer[2] = tgt_mac_addr[1];
-	buffer[3] = tgt_mac_addr[0];
-	buffer[4] = tgt_mac_addr[7];
-	buffer[5] = tgt_mac_addr[6];
-	return buffer;
-#else
-	/*
-	 * The host endianness matches the target endianness -
-	 * we can use the mac addr directly from the message buffer.
-	 */
-	return tgt_mac_addr;
-#endif
+	return;
 }
+#else
 
 static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
 {
@@ -131,6 +121,36 @@ static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, qdf_nbuf_t msg)
 	rx_desc->msdu_end.last_msdu = 1;
 	qdf_nbuf_map(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
 }
+#endif
+
+static uint8_t *htt_t2h_mac_addr_deswizzle(uint8_t *tgt_mac_addr,
+					   uint8_t *buffer)
+{
+#ifdef BIG_ENDIAN_HOST
+	/*
+	 * The host endianness is opposite of the target endianness.
+	 * To make uint32_t elements come out correctly, the target->host
+	 * upload has swizzled the bytes in each uint32_t element of the
+	 * message.
+	 * For byte-array message fields like the MAC address, this
+	 * upload swizzling puts the bytes in the wrong order, and needs
+	 * to be undone.
+	 */
+	buffer[0] = tgt_mac_addr[3];
+	buffer[1] = tgt_mac_addr[2];
+	buffer[2] = tgt_mac_addr[1];
+	buffer[3] = tgt_mac_addr[0];
+	buffer[4] = tgt_mac_addr[7];
+	buffer[5] = tgt_mac_addr[6];
+	return buffer;
+#else
+	/*
+	 * The host endianness matches the target endianness -
+	 * we can use the mac addr directly from the message buffer.
+	 */
+	return tgt_mac_addr;
+#endif
+}
 
 /* Target to host Msg/event  handler  for low priority messages*/
 void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
@@ -284,10 +304,26 @@ void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
 	{
 		struct htt_mgmt_tx_compl_ind *compl_msg;
+		int32_t credit_delta = 1;
 
 		compl_msg =
 			(struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
 
+		if (pdev->cfg.is_high_latency) {
+			if (!pdev->cfg.default_tx_comp_req) {
+				qdf_atomic_add(credit_delta,
+					       &pdev->htt_tx_credit.
+								target_delta);
+				credit_delta = htt_tx_credit_update(pdev);
+			}
+			if (credit_delta)
+				ol_tx_target_credit_update(
+						pdev->txrx_pdev, credit_delta);
+		}
+		ol_tx_desc_update_group_credit(
+			pdev->txrx_pdev, compl_msg->desc_id, 1,
+			0, compl_msg->status);
+
 		if (!ol_tx_get_is_mgmt_over_wmi_enabled()) {
 			ol_tx_single_completion_handler(pdev->txrx_pdev,
 							compl_msg->status,
@@ -330,6 +366,15 @@ void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 			HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
 		sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
 		htt_credit_delta = sign * htt_credit_delta_abs;
+
+		if (pdev->cfg.is_high_latency &&
+		    !pdev->cfg.default_tx_comp_req) {
+			qdf_atomic_add(htt_credit_delta,
+				       &pdev->htt_tx_credit.target_delta);
+			htt_credit_delta = htt_tx_credit_update(pdev);
+		}
+
+		htt_tx_group_credit_process(pdev, msg_word);
 		ol_tx_credit_completion_handler(pdev->txrx_pdev,
 						htt_credit_delta);
 		break;
@@ -535,6 +580,10 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
 		ol_rx_indication_handler(pdev->txrx_pdev,
 					 htt_t2h_msg, peer_id,
 					 tid, num_mpdu_ranges);
+
+		if (pdev->cfg.is_high_latency)
+			return;
+
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
@@ -563,6 +612,27 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
 					compl->payload[num_msdus];
 			}
 		}
+
+		if (pdev->cfg.is_high_latency) {
+			if (!pdev->cfg.default_tx_comp_req) {
+				int credit_delta;
+
+				qdf_atomic_add(num_msdus,
+					       &pdev->htt_tx_credit.
+								target_delta);
+				credit_delta = htt_tx_credit_update(pdev);
+
+				if (credit_delta) {
+					ol_tx_target_credit_update(
+							pdev->txrx_pdev,
+							credit_delta);
+				}
+			} else {
+				ol_tx_target_credit_update(pdev->txrx_pdev,
+							   num_msdus);
+			}
+		}
+
 		ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
 					 status, msg_word + 1);
 		HTT_TX_SCHED(pdev);
@@ -637,6 +707,12 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
 			break;
 		}
 
+		if (qdf_unlikely(pdev->cfg.is_high_latency)) {
+			qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND ");
+			qdf_print("not supported on high latency.\n");
+			break;
+		}
+
 		peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
 		tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
 		offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);

+ 302 - 59
core/dp/htt/htt_tx.c

@@ -228,6 +228,7 @@ int htt_tx_frag_alloc(htt_pdev_handle pdev,
 	return 0;
 }
 #else
+
 /**
  * htt_tx_desc_get_size() - get tx descripotrs size
  * @pdev:	htt device instance pointer
@@ -238,22 +239,28 @@ int htt_tx_frag_alloc(htt_pdev_handle pdev,
  */
 static inline void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
 {
-	/*
-	 * Start with the size of the base struct
-	 * that actually gets downloaded.
-	 *
-	 * Add the fragmentation descriptor elements.
-	 * Add the most that the OS may deliver, plus one more
-	 * in case the txrx code adds a prefix fragment (for
-	 * TSO or audio interworking SNAP header)
-	 */
-	pdev->tx_descs.size =
+	if (pdev->cfg.is_high_latency) {
+		pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
+	} else {
+		/*
+		 * Start with the size of the base struct
+		 * that actually gets downloaded.
+		 *
+		 * Add the fragmentation descriptor elements.
+		 * Add the most that the OS may deliver, plus one more
+		 * in case the txrx code adds a prefix fragment (for
+		 * TSO or audio interworking SNAP header)
+		 */
+		pdev->tx_descs.size =
 		sizeof(struct htt_host_tx_desc_t)
 		+ (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
-		   /* 2x uint32_t */
+		/* 2x uint32_t */
 		+ 4; /* uint32_t fragmentation list terminator */
+	}
 }
 
+#ifndef CONFIG_HL_SUPPORT
+
 /**
  * htt_tx_frag_desc_field_update() - Update fragment descriptor field
  * @pdev:	htt device instance pointer
@@ -273,6 +280,7 @@ static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
 	*fptr = (uint32_t)htt_tx_get_paddr(pdev, (char *)desc_v_ptr) +
 		HTT_TX_DESC_LEN;
 }
+#endif
 
 /**
  * htt_tx_frag_desc_attach() - Attach fragment descriptor
@@ -300,6 +308,8 @@ static inline int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
 static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev) {}
 #endif /* HELIUMPLUS */
 
+#ifdef CONFIG_HL_SUPPORT
+
 /**
  * htt_tx_attach() - Attach HTT device instance
  * @pdev:		htt device instance pointer
@@ -309,6 +319,180 @@ static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev) {}
  *
  * Return: 0 Success
  */
+int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
+{
+	int i, i_int, pool_size;
+	uint32_t **p;
+	uint32_t num_link = 0;
+	uint16_t num_page, num_desc_per_page;
+	void **cacheable_pages = NULL;
+
+	htt_tx_desc_get_size(pdev);
+
+	/*
+	 * Make sure tx_descs.size is a multiple of 4-bytes.
+	 * It should be, but round up just to be sure.
+	 */
+	pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
+
+	pdev->tx_descs.pool_elems = desc_pool_elems;
+	pdev->tx_descs.alloc_cnt = 0;
+	pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
+	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_descs.desc_pages,
+				  pdev->tx_descs.size,
+				  pdev->tx_descs.pool_elems,
+				  qdf_get_dma_mem_context((&pdev->tx_descs),
+							  memctx), true);
+	if ((0 == pdev->tx_descs.desc_pages.num_pages) ||
+	    (NULL == pdev->tx_descs.desc_pages.cacheable_pages)) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			   "HTT desc alloc fail");
+		goto out_fail;
+	}
+	num_page = pdev->tx_descs.desc_pages.num_pages;
+	num_desc_per_page = pdev->tx_descs.desc_pages.num_element_per_page;
+
+	/* link tx descriptors into a freelist */
+	cacheable_pages = pdev->tx_descs.desc_pages.cacheable_pages;
+
+	pdev->tx_descs.freelist = (uint32_t *)cacheable_pages[0];
+	p = (uint32_t **)pdev->tx_descs.freelist;
+	for (i = 0; i < num_page; i++) {
+		for (i_int = 0; i_int < num_desc_per_page; i_int++) {
+			if (i_int == (num_desc_per_page - 1)) {
+				/*
+				 * Last element on this page,
+				 * should point next page
+				 */
+				if (!cacheable_pages[i + 1]) {
+					TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+						   "over flow num link %d\n",
+						   num_link);
+					goto free_htt_desc;
+				}
+				*p = (uint32_t *)cacheable_pages[i + 1];
+			} else {
+				*p = (uint32_t *)
+					(((char *)p) + pdev->tx_descs.size);
+			}
+			num_link++;
+			p = (uint32_t **) *p;
+			/* Last link established exit */
+			if (num_link == (pdev->tx_descs.pool_elems - 1))
+				break;
+		}
+	}
+	*p = NULL;
+
+	if (htt_tx_frag_desc_attach(pdev, desc_pool_elems)) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			   "HTT Frag descriptor alloc fail");
+		goto free_htt_desc;
+	}
+
+	/* success */
+	return 0;
+
+free_htt_desc:
+	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
+				 qdf_get_dma_mem_context((&pdev->tx_descs),
+							 memctx), true);
+out_fail:
+	return -ENOBUFS;
+}
+
+void htt_tx_detach(struct htt_pdev_t *pdev)
+{
+	if (!pdev) {
+		qdf_print("htt tx detach invalid instance");
+		return;
+	}
+
+	htt_tx_frag_desc_detach(pdev);
+	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
+				 qdf_get_dma_mem_context((&pdev->tx_descs),
+							 memctx), true);
+}
+
+/**
+ * htt_tx_set_frag_desc_addr() - set up the fragmentation descriptor address
+ * @pdev: pointer to the HTT instance making the allocation
+ * @htt_tx_desc: Host tx decriptor that does not include HTC hdr
+ * @index: index to alloc htt tx desc
+ *
+ *
+ * Return: None
+ */
+static inline void
+htt_tx_set_frag_desc_addr(struct htt_pdev_t *pdev,
+			  struct htt_tx_msdu_desc_t *htt_tx_desc,
+			  uint16_t index)
+{
+	return;
+}
+
+/**
+ * htt_tx_desc_frags_table_set() - set up the descriptor and payload
+ *				   to correspondinf fragments
+ * @pdev: pointer to the HTT instance making the allocation
+ * @htt_tx_desc: Host tx decriptor that does not include HTC hdr
+ * @paddr: fragment physical address
+ * @frag_desc_paddr_lo: frag descriptor address
+ * @reset: reset
+ *
+ * Return: None
+ */
+void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
+				 void *desc,
+				 qdf_dma_addr_t paddr,
+				 qdf_dma_addr_t frag_desc_paddr,
+				 int reset)
+{
+	/* fragments table only applies to LL systems */
+	return;
+}
+
+/**
+ * htt_tx_credit_update() - get the number of credits by which the amount of
+ *			    target credits needs to be updated
+ * @pdev: htt context
+ *
+ * Return: number of credits
+ */
+int htt_tx_credit_update(struct htt_pdev_t *pdev)
+{
+	int credit_delta;
+	credit_delta = QDF_MIN(qdf_atomic_read(
+			&pdev->htt_tx_credit.target_delta),
+			qdf_atomic_read(&pdev->htt_tx_credit.bus_delta));
+	if (credit_delta) {
+		qdf_atomic_add(-credit_delta,
+			       &pdev->htt_tx_credit.target_delta);
+		qdf_atomic_add(-credit_delta,
+			       &pdev->htt_tx_credit.bus_delta);
+	}
+	return credit_delta;
+}
+
+/**
+ * htt_tx_get_paddr() - get physical address for htt desc
+ *
+ * Get HTT descriptor physical address from virtaul address
+ * Find page first and find offset
+ * Not required for HL systems
+ *
+ * Return: Physical address of descriptor
+ */
+static inline
+qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
+				char *target_vaddr)
+{
+	return 0;
+}
+
+
+#else
+
 int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
 {
 	int i, i_int, pool_size;
@@ -399,14 +583,55 @@ void htt_tx_detach(struct htt_pdev_t *pdev)
 		qdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
 }
 
-/**
- * htt_tx_get_paddr() - get physical address for htt desc
- *
- * Get HTT descriptor physical address from virtaul address
- * Find page first and find offset
- *
- * Return: Physical address of descriptor
- */
+static void
+htt_tx_set_frag_desc_addr(struct htt_pdev_t *pdev,
+			  struct htt_tx_msdu_desc_t *htt_tx_desc,
+			  uint16_t index)
+{
+	uint32_t *fragmentation_descr_field_ptr;
+	fragmentation_descr_field_ptr = (uint32_t *)
+		((uint32_t *)htt_tx_desc) +
+		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
+	/*
+	 * The fragmentation descriptor is allocated from consistent
+	 * memory. Therefore, we can use the address directly rather
+	 * than having to map it from a virtual/CPU address to a
+	 * physical/bus address.
+	 */
+	htt_tx_frag_desc_field_update(pdev, fragmentation_descr_field_ptr,
+				      index, htt_tx_desc);
+
+		return;
+}
+
+void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
+				 void *htt_tx_desc,
+				 qdf_dma_addr_t paddr,
+				 qdf_dma_addr_t frag_desc_paddr,
+				 int reset)
+{
+	uint32_t *fragmentation_descr_field_ptr;
+
+	fragmentation_descr_field_ptr = (uint32_t *)
+		((uint32_t *) htt_tx_desc) +
+		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
+	if (reset) {
+#if defined(HELIUMPLUS_PADDR64)
+		*fragmentation_descr_field_ptr = frag_desc_paddr;
+#else
+		*fragmentation_descr_field_ptr =
+			htt_tx_get_paddr(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
+#endif
+	} else {
+		*fragmentation_descr_field_ptr = paddr;
+	}
+}
+
+void htt_tx_pending_discard(htt_pdev_handle pdev)
+{
+	htc_flush_surprise_remove(pdev->htc_pdev);
+}
+
 static qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
 				char *target_vaddr)
 {
@@ -434,6 +659,8 @@ static qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
 	return page_info->page_p_addr + offset;
 }
 
+#endif
+
 /*--- descriptor allocation functions ---------------------------------------*/
 
 void *htt_tx_desc_alloc(htt_pdev_handle pdev, qdf_dma_addr_t *paddr,
@@ -441,7 +668,6 @@ void *htt_tx_desc_alloc(htt_pdev_handle pdev, qdf_dma_addr_t *paddr,
 {
 	struct htt_host_tx_desc_t *htt_host_tx_desc;    /* includes HTC hdr */
 	struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include  HTC hdr */
-	uint32_t *fragmentation_descr_field_ptr;
 
 	htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
 	if (!htt_host_tx_desc)
@@ -461,17 +687,7 @@ void *htt_tx_desc_alloc(htt_pdev_handle pdev, qdf_dma_addr_t *paddr,
 	 * would be helpful to have separate htt_tx_desc_alloc functions for
 	 * HL vs. LL, to remove the below conditional branch.
 	 */
-	fragmentation_descr_field_ptr = (uint32_t *)
-		((uint32_t *) htt_tx_desc) +
-		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
-	/*
-	 * The fragmentation descriptor is allocated from consistent
-	 * memory. Therefore, we can use the address directly rather
-	 * than having to map it from a virtual/CPU address to a
-	 * physical/bus address.
-	 */
-	htt_tx_frag_desc_field_update(pdev, fragmentation_descr_field_ptr,
-		index, htt_tx_desc);
+	htt_tx_set_frag_desc_addr(pdev, htt_tx_desc, index);
 
 	/*
 	 * Include the headroom for the HTC frame header when specifying the
@@ -502,40 +718,12 @@ void htt_tx_desc_free(htt_pdev_handle pdev, void *tx_desc)
 
 /*--- descriptor field access methods ---------------------------------------*/
 
-void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
-				 void *htt_tx_desc,
-				 qdf_dma_addr_t paddr,
-				 qdf_dma_addr_t frag_desc_paddr,
-				 int reset)
-{
-	uint32_t *fragmentation_descr_field_ptr;
-
-	fragmentation_descr_field_ptr = (uint32_t *)
-		((uint32_t *) htt_tx_desc) +
-		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
-	if (reset) {
-#if defined(HELIUMPLUS_PADDR64)
-		*fragmentation_descr_field_ptr = frag_desc_paddr;
-#else
-		*fragmentation_descr_field_ptr =
-			htt_tx_get_paddr(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
-#endif
-	} else {
-		*fragmentation_descr_field_ptr = paddr;
-	}
-}
-
 /* PUT THESE AS inline IN ol_htt_tx_api.h */
 
 void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)
 {
 }
 
-void htt_tx_pending_discard(htt_pdev_handle pdev)
-{
-	htc_flush_surprise_remove(pdev->htc_pdev);
-}
-
 void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc)
 {
 }
@@ -628,6 +816,7 @@ int htt_tx_send_std(htt_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t msdu_id)
 
 }
 
+#ifndef CONFIG_HL_SUPPORT
 #ifdef FEATURE_RUNTIME_PM
 /**
  * htt_tx_resume_handler() - resume callback for the htt endpoint
@@ -645,6 +834,7 @@ void htt_tx_resume_handler(void *context)
 void
 htt_tx_resume_handler(void *context) { }
 #endif
+#endif
 
 qdf_nbuf_t
 htt_tx_send_batch(htt_pdev_handle pdev, qdf_nbuf_t head_msdu, int num_msdus)
@@ -1420,6 +1610,9 @@ htt_tx_desc_init(htt_pdev_handle pdev,
 		HTT_TX_DESC_EXT_TID_SET(local_word0, msdu_info->info.ext_tid);
 		HTT_TX_DESC_CKSUM_OFFLOAD_SET(local_word0,
 					      msdu_info->action.cksum_offload);
+		if (pdev->cfg.is_high_latency)
+			HTT_TX_DESC_TX_COMP_SET(local_word0, msdu_info->action.
+								tx_comp_req);
 		HTT_TX_DESC_NO_ENCRYPT_SET(local_word0,
 					   msdu_info->action.do_encrypt ?
 					   0 : 1);
@@ -1510,3 +1703,53 @@ htt_tx_desc_init(htt_pdev_handle pdev,
 	qdf_nbuf_data_attr_set(msdu, data_attr);
 }
 
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+/**
+ * htt_tx_group_credit_process() - process group data for
+ *				   credit update indication
+ * @pdev: pointer to htt device.
+ * @msg_word: htt msg
+ *
+ * Return: None
+ */
+void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word)
+{
+	int group_credit_sign;
+	int32_t group_credit;
+	u_int32_t group_credit_abs, vdev_id_mask, ac_mask;
+	u_int8_t group_abs, group_id;
+	u_int8_t group_offset = 0, more_group_present = 0;
+	more_group_present = HTT_TX_CREDIT_TXQ_GRP_GET(*msg_word);
+
+	while (more_group_present) {
+		/* Parse the Group Data */
+		group_id = HTT_TXQ_GROUP_ID_GET(*(msg_word+1
+						+group_offset));
+		group_credit_abs =
+			HTT_TXQ_GROUP_CREDIT_COUNT_GET(*(msg_word+1
+						+group_offset));
+		group_credit_sign =
+			HTT_TXQ_GROUP_SIGN_GET(*(msg_word+1
+						+group_offset)) ? -1 : 1;
+		group_credit = group_credit_sign * group_credit_abs;
+		group_abs = HTT_TXQ_GROUP_ABS_GET(*(msg_word+1
+						+group_offset));
+
+		vdev_id_mask =
+			HTT_TXQ_GROUP_VDEV_ID_MASK_GET(*(msg_word+2
+						+group_offset));
+		ac_mask = HTT_TXQ_GROUP_AC_MASK_GET(*(msg_word+2
+						+group_offset));
+
+		ol_txrx_update_tx_queue_groups(pdev->txrx_pdev, group_id,
+					       group_credit, group_abs,
+					       vdev_id_mask, ac_mask);
+		more_group_present = HTT_TXQ_GROUP_EXT_GET(*(msg_word+1
+						+group_offset));
+		group_offset += HTT_TX_GROUP_INDEX_OFFSET;
+	}
+	ol_tx_update_group_credit_stats(pdev->txrx_pdev);
+}
+#endif
+

+ 15 - 1
core/dp/htt/htt_types.h

@@ -170,6 +170,11 @@ struct ipa_uc_rx_ring_elem_t {
 	uint32_t rx_packet_leng;
 };
 
+struct htt_tx_credit_t {
+	qdf_atomic_t bus_delta;
+	qdf_atomic_t target_delta;
+};
+
 #if defined(HELIUMPLUS_PADDR64)
 /**
  * msdu_ext_frag_desc:
@@ -242,6 +247,7 @@ struct htt_pdev_t {
 
 	struct htt_htc_pkt_union *htt_htc_pkt_freelist;
 	struct {
+		int is_high_latency;
 		int is_full_reorder_offload;
 		int default_tx_comp_req;
 		int ce_classify_enabled;
@@ -354,6 +360,9 @@ struct htt_pdev_t {
 		struct htt_rx_hash_bucket **hash_table;
 		uint32_t listnode_offset;
 	} rx_ring;
+#ifdef CONFIG_HL_SUPPORT
+	int rx_desc_size_hl;
+#endif
 	long rx_fw_desc_offset;
 	int rx_mpdu_range_offset_words;
 	int rx_ind_msdu_byte_idx;
@@ -384,13 +393,18 @@ struct htt_pdev_t {
 	struct {
 		int htc_err_cnt;
 	} stats;
-
+#ifdef CONFIG_HL_SUPPORT
+	int cur_seq_num_hl;
+#endif
 	struct htt_tx_mgmt_desc_ctxt tx_mgmt_desc_ctxt;
 	struct targetdef_s *targetdef;
 	struct ce_reg_def *target_ce_def;
 
 	struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
 	struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
+
+	struct htt_tx_credit_t htt_tx_credit;
+
 #ifdef DEBUG_RX_RING_BUFFER
 	struct rx_buf_debug *rx_buff_list;
 	int rx_buff_index;

+ 5 - 0
core/dp/ol/inc/ol_cfg.h

@@ -32,7 +32,11 @@
 #include <cdp_txrx_cmn.h>       /* ol_pdev_handle */
 #include <cds_ieee80211_common.h>   /* ieee80211_qosframe_htc_addr4 */
 #include <enet.h>               /* LLC_SNAP_HDR_LEN */
+#if defined(CONFIG_HL_SUPPORT)
+#include "wlan_tgt_def_config_hl.h"
+#else
 #include "wlan_tgt_def_config.h"
+#endif
 #include "ol_txrx_ctrl_api.h"   /* txrx_pdev_cfg_param_t */
 
 /**
@@ -256,6 +260,7 @@ int ol_cfg_netbuf_frags_max(ol_pdev_handle pdev);
  *      1 -> free the tx frame as soon as the download completes
  */
 int ol_cfg_tx_free_at_download(ol_pdev_handle pdev);
+void ol_cfg_set_tx_free_at_download(ol_pdev_handle pdev);
 
 /**
  * @brief Low water mark for target tx credit.

+ 19 - 0
core/dp/ol/inc/ol_htt_api.h

@@ -365,4 +365,23 @@ static inline void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
 void htt_rx_mon_note_capture_channel(htt_pdev_handle pdev, int mon_ch);
 
 void ol_htt_mon_note_chan(ol_txrx_pdev_handle pdev, int mon_ch);
+
+#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
+
+void htt_dump_bundle_stats(struct htt_pdev_t *pdev);
+void htt_clear_bundle_stats(struct htt_pdev_t *pdev);
+#else
+
+static inline void htt_dump_bundle_stats(struct htt_pdev_t *pdev)
+{
+	return;
+}
+
+static inline void htt_clear_bundle_stats(struct htt_pdev_t *pdev)
+{
+	return;
+}
+#endif
+
+
 #endif /* _OL_HTT_API__H_ */

+ 5 - 3
core/dp/ol/inc/ol_htt_rx_api.h

@@ -824,11 +824,13 @@ void
 htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
 				    qdf_nbuf_t rx_frag_ind_msg,
 				    int *seq_num_start, int *seq_num_end);
+
 /**
- * @brief Return the HL rx desc size
- * @param pdev - the HTT instance the rx data was received on
- * @param msdu_desc - the hl rx desc pointer
+ * htt_rx_msdu_rx_desc_size_hl() - Return the HL rx desc size
+ * @pdev: the HTT instance the rx data was received on.
+ * @msdu_desc: the hl rx desc pointer
  *
+ * Return: HL rx desc size
  */
 uint16_t htt_rx_msdu_rx_desc_size_hl(htt_pdev_handle pdev, void *msdu_desc);
 

+ 10 - 0
core/dp/ol/inc/ol_htt_tx_api.h

@@ -393,6 +393,9 @@ static inline int htt_tx_frag_alloc(htt_pdev_handle pdev,
 	return 0;
 }
 #endif /* defined(HELIUMPLUS_PADDR64) */
+
+#if defined(CONFIG_HL_SUPPORT)
+
 /**
  * @brief Discard all tx frames in the process of being downloaded.
  * @details
@@ -404,7 +407,14 @@ static inline int htt_tx_frag_alloc(htt_pdev_handle pdev,
  * @param[OUT] frag_paddr_lo - physical address of the fragment descriptor
  *                             (MSDU Link Extension Descriptor)
  */
+static inline void htt_tx_pending_discard(htt_pdev_handle pdev)
+{
+	return;
+}
+#else
+
 void htt_tx_pending_discard(htt_pdev_handle pdev);
+#endif
 
 /**
  * @brief Download a MSDU descriptor and (a portion of) the MSDU payload.

+ 159 - 0
core/dp/ol/inc/ol_txrx_htt_api.h

@@ -153,6 +153,147 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
 
 void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits);
 
+struct rate_report_t {
+	u_int16_t id;
+	u_int16_t phy:4;
+	u_int32_t rate;
+};
+
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
+
+/**
+ * @brief Process a link status report for all peers.
+ * @details
+ *  The ol_txrx_peer_link_status_handler function performs basic peer link
+ *  status analysis
+ *
+ *  According to the design, there are 3 kinds of peers which will be
+ *  treated differently:
+ *  1) normal: not do any flow control for the peer
+ *  2) limited: will apply flow control for the peer, but frames are allowed
+ *              to send
+ *  3) paused: will apply flow control for the peer, no frame is allowed
+ *             to send
+ *
+ * @param pdev - the data physical device that sent the tx frames
+ * @param status - the number of peers need to be handled
+ * @param peer_link_report - the link status dedail message
+ */
+void
+ol_txrx_peer_link_status_handler(
+	ol_txrx_pdev_handle pdev,
+	u_int16_t peer_num,
+	struct rate_report_t *peer_link_status);
+
+
+#else
+static inline void ol_txrx_peer_link_status_handler(
+	ol_txrx_pdev_handle pdev,
+	u_int16_t peer_num,
+	struct rate_report_t *peer_link_status)
+{
+	return;
+}
+#endif
+
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+/**
+ * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
+ *				      vdev id mask and ac mask is not matching
+ * @pdev: the data physical device
+ * @group_id: TXQ group id
+ * @credit: TXQ group credit count
+ * @absolute: TXQ group absolute
+ * @vdev_id_mask: TXQ vdev group id mask
+ * @ac_mask: TQX access category mask
+ *
+ * Return: None
+ */
+void
+ol_txrx_update_tx_queue_groups(
+	ol_txrx_pdev_handle pdev,
+	u_int8_t group_id,
+	int32_t credit,
+	u_int8_t absolute,
+	u_int32_t vdev_id_mask,
+	u_int32_t ac_mask
+);
+
+/**
+ * ol_tx_desc_update_group_credit() - update group credits for txq group
+ * @pdev: the data physical device
+ * @tx_desc_id: desc id of tx completion message
+ * @credit: number of credits to update
+ * @absolute: absolute value
+ * @status: tx completion message status
+ *
+ * Return: None
+ */
+void
+ol_tx_desc_update_group_credit(
+	ol_txrx_pdev_handle pdev,
+	u_int16_t tx_desc_id,
+	int credit, u_int8_t absolute, enum htt_tx_status status);
+
+#ifdef DEBUG_HL_LOGGING
+
+/**
+ * ol_tx_update_group_credit_stats() - update group credits stats for txq groups
+ * @pdev: the data physical device
+ *
+ * Return: None
+ */
+void
+ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev);
+
+/**
+ * ol_tx_dump_group_credit_stats() - dump group credits stats for txq groups
+ * @pdev: the data physical device
+ *
+ * Return: None
+ */
+void
+ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev);
+
+/**
+ * ol_tx_clear_group_credit_stats() - clear group credits stats for txq groups
+ * @pdev: the data physical device
+ *
+ * Return: None
+ */
+void
+ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev);
+#else
+
+static inline void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+	return;
+}
+
+static inline void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+	return;
+}
+
+static inline void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)
+{
+	return;
+}
+#endif
+
+#else
+static inline void
+ol_tx_desc_update_group_credit(
+	ol_txrx_pdev_handle pdev,
+	u_int16_t tx_desc_id,
+	int credit, u_int8_t absolute, enum htt_tx_status status)
+{
+	return;
+}
+#endif
+
 /**
  * @brief Init the total amount of target credit.
  * @details
@@ -576,4 +717,22 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
 				  uint16_t peer_id,
 				  uint8_t tid, uint8_t is_offload);
 
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
+/**
+ * ol_tx_get_max_tx_groups_supported() - get max TCQ groups supported
+ * @pdev: the data physical device that received the frames
+ *
+ * Return: number of max groups supported
+ */
+u_int32_t ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t *pdev);
+#else
+
+static inline u_int32_t
+ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t *pdev)
+{
+	return 0;
+}
+#endif
+
 #endif /* _OL_TXRX_HTT_API__H_ */

+ 297 - 0
target/inc/wlan_tgt_def_config_hl.h

@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef __WLAN_TGT_DEF_CONFIG_H__
+#define __WLAN_TGT_DEF_CONFIG_H__
+
+/*
+ * TODO: please help to consider if we need a seperate config file from LL case.
+ */
+
+/*
+ * set of default target config , that can be over written by platform
+ */
+
+#ifdef QCA_SUPPORT_INTEGRATED_SOC
+#define CFG_TGT_NUM_VDEV                3 /*STA, P2P device, P2P GO/Cli*/
+#else
+/*
+ * default limit of VAPs per device.
+ */
+#define CFG_TGT_NUM_VDEV                3
+#endif
+/*
+ * We would need 1 AST entry per peer. Scale it by a factor of 2 to minimize
+ * hash collisions.
+ * TODO: This scaling factor would be taken care inside the WAL in the future.
+ */
+#define CFG_TGT_NUM_PEER_AST            2
+
+/* # of WDS entries to support.
+ */
+#define CFG_TGT_WDS_ENTRIES             0
+
+/* MAC DMA burst size. 0: 128B - default, 1: 256B, 2: 64B
+ */
+#define CFG_TGT_DEFAULT_DMA_BURST_SIZE   0
+
+/* Fixed delimiters to be inserted after every MPDU
+ */
+#define CFG_TGT_DEFAULT_MAC_AGGR_DELIM   0
+
+/*
+ * This value may need to be fine tuned, but a constant value will
+ * probably always be appropriate; it is probably not necessary to
+ * determine this value dynamically.
+ */
+#define CFG_TGT_AST_SKID_LIMIT          6
+/*
+ * total number of peers per device.
+ * currently set to 8 to bring up IP3.9 for memory size problem
+ */
+#define CFG_TGT_NUM_PEERS               8
+/*
+ *  max number of peers per device.
+ */
+#define CFG_TGT_NUM_PEERS_MAX           8
+/*
+ * In offload mode target supports features like WOW, chatter and other
+ * protocol offloads. In order to support them some functionalities like
+ * reorder buffering, PN checking need to be done in target. This determines
+ * maximum number of peers suported by target in offload mode
+ */
+#define CFG_TGT_NUM_OFFLOAD_PEERS       0
+/*
+ * Number of reorder buffers used in offload mode
+ */
+#define CFG_TGT_NUM_OFFLOAD_REORDER_BUFFS   0
+/*
+ * keys per peer node
+ */
+#define CFG_TGT_NUM_PEER_KEYS           2
+/*
+ * total number of TX/RX data TIDs
+ */
+#define CFG_TGT_NUM_TIDS      (2 * (CFG_TGT_NUM_PEERS + \
+					CFG_TGT_NUM_VDEV))
+/*
+ * max number of Tx TIDS
+ */
+#define CFG_TGT_NUM_TIDS_MAX   (2 * (CFG_TGT_NUM_PEERS_MAX + \
+					CFG_TGT_NUM_VDEV))
+/*
+ * number of multicast keys.
+ */
+#define CFG_TGT_NUM_MCAST_KEYS          8
+/*
+ * A value of 3 would probably suffice - one for the control stack, one for
+ * the data stack, and one for debugging.
+ * This value may need to be fine tuned, but a constant value will
+ * probably always be appropriate; it is probably not necessary to
+ * determine this value dynamically.
+ */
+#define CFG_TGT_NUM_PDEV_HANDLERS       8
+/*
+ * A value of 3 would probably suffice - one for the control stack, one for
+ * the data stack, and one for debugging.
+ * This value may need to be fine tuned, but a constant value will
+ * probably always be appropriate; it is probably not necessary to
+ * determine this value dynamically.
+ */
+#define CFG_TGT_NUM_VDEV_HANDLERS       4
+/*
+ * set this to 8:
+ *     one for WAL interals (connection pause)
+ *     one for the control stack,
+ *     one for the data stack
+ *     and one for debugging
+ * This value may need to be fine tuned, but a constant value will
+ * probably always be appropriate; it is probably not necessary to
+ * determine this value dynamically.
+ */
+#define CFG_TGT_NUM_HANDLERS            14
+/*
+ * set this to 3: one for the control stack, one for
+ * the data stack, and one for debugging.
+ * This value may need to be fine tuned, but a constant value will
+ * probably always be appropriate; it is probably not necessary to
+ * determine this value dynamically.
+ */
+#define CFG_TGT_NUM_PEER_HANDLERS       32
+/*
+ * set this to 0x7 (Peregrine = 3 chains).
+ * need to be set dynamically based on the HW capability.
+ * this is rome
+ */
+#define CFG_TGT_DEFAULT_TX_CHAIN_MASK   0x3
+/*
+ * set this to 0x7 (Peregrine = 3 chains).
+ * need to be set dynamically based on the HW capability.
+ * this is rome
+ */
+#define CFG_TGT_DEFAULT_RX_CHAIN_MASK   0x3
+/* 100 ms for video, best-effort, and background */
+#define CFG_TGT_RX_TIMEOUT_LO_PRI       100
+/* 40 ms for voice*/
+#define CFG_TGT_RX_TIMEOUT_HI_PRI       40
+
+/* AR9888 unified is default in ethernet mode */
+#define CFG_TGT_RX_DECAP_MODE (0x2)
+/* Decap to native Wifi header */
+#define CFG_TGT_RX_DECAP_MODE_NWIFI (0x1)
+
+/* Decap to raw mode header */
+#define CFG_TGT_RX_DECAP_MODE_RAW   (0x0)
+
+/* maximum number of pending scan requests */
+#define CFG_TGT_DEFAULT_SCAN_MAX_REQS   0x4
+
+/* maximum number of scan event handlers */
+#define CFG_TGT_DEFAULT_SCAN_MAX_HANDLERS   0x4
+
+/* maximum number of VDEV that could use BMISS offload */
+#define CFG_TGT_DEFAULT_BMISS_OFFLOAD_MAX_VDEV   0x2
+
+/* maximum number of VDEV offload Roaming to support */
+#define CFG_TGT_DEFAULT_ROAM_OFFLOAD_MAX_VDEV   0x2
+
+/* maximum number of AP profiles pushed to offload Roaming */
+#define CFG_TGT_DEFAULT_ROAM_OFFLOAD_MAX_PROFILES   0x8
+
+/* maximum number of VDEV offload GTK to support */
+#define CFG_TGT_DEFAULT_GTK_OFFLOAD_MAX_VDEV   0x2
+/* default: mcast->ucast disabled */
+
+#define CFG_TGT_DEFAULT_NUM_MCAST_GROUPS 0
+#define CFG_TGT_DEFAULT_NUM_MCAST_TABLE_ELEMS 0
+#define CFG_TGT_DEFAULT_MCAST2UCAST_MODE 0 /* disabled */
+
+/*
+ * Specify how much memory the target should allocate for a debug log of
+ * tx PPDU meta-information (how large the PPDU was, when it was sent,
+ * whether it was successful, etc.)
+ * The size of the log records is configurable, from a minimum of 28 bytes
+ * to a maximum of about 300 bytes.  A typical configuration would result
+ * in each log record being about 124 bytes.
+ * Thus, 1KB of log space can hold about 30 small records, 3 large records,
+ * or about 8 typical-sized records.
+ */
+#define CFG_TGT_DEFAULT_TX_DBG_LOG_SIZE 1024 /* bytes */
+
+/* target based fragment timeout and MPDU duplicate detection */
+#define CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
+/*  Default VoW configuration
+ */
+#define CFG_TGT_DEFAULT_VOW_CONFIG   0
+
+/*
+ * total number of descriptors to use in the target
+ */
+#ifndef HIF_SDIO
+#define CFG_TGT_NUM_MSDU_DESC    (32)
+#else
+#define CFG_TGT_NUM_MSDU_DESC    (0)
+#endif
+/*
+ * Maximum number of frag table entries
+ */
+#define CFG_TGT_MAX_FRAG_TABLE_ENTRIES 2
+
+/*
+ * number of vdevs that can support tdls
+ */
+#define CFG_TGT_NUM_TDLS_VDEVS    1
+
+/*
+ * number of peers that each Tdls vdev can track
+ */
+#define CFG_TGT_NUM_TDLS_CONN_TABLE_ENTRIES    32
+/*
+ * number of TDLS concurrent sleep STAs
+ */
+#define CFG_TGT_NUM_TDLS_CONC_SLEEP_STAS    1
+
+/*
+ * number of TDLS concurrent buffer STAs
+ */
+#define CFG_TGT_NUM_TDLS_CONC_BUFFER_STAS    1
+
+#define CFG_TGT_MAX_MULTICAST_FILTER_ENTRIES 5
+/*
+ * Maximum number of VDEV that beacon tx offload will support
+ */
+#ifdef HIF_SDIO
+#define CFG_TGT_DEFAULT_BEACON_TX_OFFLOAD_MAX_VDEV 2
+#else
+#define CFG_TGT_DEFAULT_BEACON_TX_OFFLOAD_MAX_VDEV 1
+#endif
+
+/*
+ * ht enable highest MCS by default
+ */
+#define CFG_TGT_DEFAULT_GTX_HT_MASK     0x8080
+/*
+ * vht enable highest MCS by default
+ */
+#define CFG_TGT_DEFAULT_GTX_VHT_MASK        0x80200
+/*
+ * resv for furture use, bit 30 is used for fix tpc, bit0-3 for Power save
+ * balance
+ */
+#define CFG_TGT_DEFAULT_GTX_USR_CFG     0xa
+/*
+ * threshold to enable GTX
+ */
+#define CFG_TGT_DEFAULT_GTX_PER_THRESHOLD   3
+/*
+ * margin to move back when per > margin + threshold
+ */
+#define CFG_TGT_DEFAULT_GTX_PER_MARGIN      2
+/*
+ * step for every move
+ */
+#define CFG_TGT_DEFAULT_GTX_TPC_STEP        1
+/*
+ * lowest TPC
+ */
+#define CFG_TGT_DEFAULT_GTX_TPC_MIN     0
+/*
+ * enable all BW 20/40/80/160
+ */
+#define CFG_TGT_DEFAULT_GTX_BW_MASK     0xf
+
+/*
+ * number of vdevs that can support OCB
+ */
+#define CFG_TGT_NUM_OCB_VDEVS			1
+
+/*
+ * maximum number of channels that can do OCB
+ */
+#define CFG_TGT_NUM_OCB_CHANNELS		2
+
+/*
+ * maximum number of channels in an OCB schedule
+ */
+#define CFG_TGT_NUM_OCB_SCHEDULES		2
+
+#endif  /*__WLAN_TGT_DEF_CONFIG_H__ */