Browse Source

qcacld-3.0: Add IPA UC WDI 1.0 backward compatibility

Update qcacld-3.0 for IPA UC WDI 1.0 backward compatibility for
Rome WIFI device.

Change-Id: I33084efd6dd3434d1f6baec49de43fab75c63e7f
CRs-fixed: 952114
Manikandan Mohan 9 years ago
parent
commit
d2f458f35c
4 changed files with 340 additions and 103 deletions
  1. 4 0
      Kbuild
  2. 95 0
      core/dp/htt/htt_h2t.c
  3. 103 51
      core/dp/htt/htt_rx.c
  4. 138 52
      core/dp/htt/htt_tx.c

+ 4 - 0
Kbuild

@@ -914,6 +914,10 @@ CDEFINES += -DADRASTEA_SHADOW_REGISTERS
 CDEFINES += -DADRASTEA_RRI_ON_DDR
 endif
 
+ifneq (y,$(filter y,$(CONFIG_CNSS_EOS) $(CONFIG_ICNSS) $(CONFIG_CNSS_ADRASTEA)))
+CDEFINES += -DQCA_WIFI_2_0
+endif
+
 ifeq ($(CONFIG_WLAN_FASTPATH), y)
 CDEFINES +=	-DWLAN_FEATURE_FASTPATH
 endif

+ 95 - 0
core/dp/htt/htt_h2t.c

@@ -648,6 +648,100 @@ htt_h2t_aggr_cfg_msg(struct htt_pdev_t *pdev,
  * Return: 0 success
  *         A_NO_MEMORY No memory fail
  */
+#ifdef QCA_WIFI_2_0
+/* Rome Support only WDI 1.0 */
+int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
+{
+	struct htt_htc_pkt *pkt;
+	cdf_nbuf_t msg;
+	uint32_t *msg_word;
+
+	pkt = htt_htc_pkt_alloc(pdev);
+	if (!pkt)
+		return A_NO_MEMORY;
+
+	/* show that this is not a tx frame download
+	 * (not required, but helpful)
+	 */
+	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
+
+	/* reserve room for HTC header */
+	msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
+			     HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+			     false);
+	if (!msg) {
+		htt_htc_pkt_free(pdev, pkt);
+		return A_NO_MEMORY;
+	}
+	/* set the length of the message */
+	cdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
+
+	/* fill in the message contents */
+	msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
+				pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word,
+		(unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev));
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word,
+		(unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_SET(*msg_word,
+		(unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
+
+	msg_word++;
+	*msg_word = 0;
+	HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_SET(*msg_word,
+	       (unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr);
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+			       htt_h2t_send_complete_free_netbuf,
+			       cdf_nbuf_data(msg),
+			       cdf_nbuf_len(msg),
+			       pdev->htc_endpoint,
+			       1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+	htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+	return A_OK;
+}
+#else
 int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
 {
 	struct htt_htc_pkt *pkt;
@@ -791,6 +885,7 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
 
 	return A_OK;
 }
+#endif
 
 /**
  * htt_h2t_ipa_uc_set_active() - Propagate WDI path enable/disable to firmware

+ 103 - 51
core/dp/htt/htt_rx.c

@@ -2288,150 +2288,202 @@ fail1:
 }
 
 #ifdef IPA_OFFLOAD
+#ifdef QCA_WIFI_3_0
 /**
- * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
+ * htt_rx_ipa_uc_alloc_wdi2_rsc() - Allocate WDI2.0 resources
  * @pdev: htt context
- * @rx_ind_ring_size: rx ring size
+ * @rx_ind_ring_elements: rx ring elements
  *
  * Return: 0 success
  */
-int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
+int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
 			 unsigned int rx_ind_ring_elements)
 {
-	pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
+	/* Allocate RX2 indication ring */
+	/* RX2 IND ring element
+	 *   4bytes: pointer
+	 *   2bytes: VDEV ID
+	 *   2bytes: length */
+	pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
 		cdf_os_mem_alloc_consistent(
 			pdev->osdev,
 			rx_ind_ring_elements *
 			sizeof(struct ipa_uc_rx_ring_elem_t),
-			&pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+			&pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
-						 rx_ind_ring_base),
+						 rx2_ind_ring_base),
 						memctx));
-	if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
+	if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
 		cdf_print("%s: RX IND RING alloc fail", __func__);
 		return -ENOBUFS;
 	}
 
 	/* RX indication ring size, by bytes */
-	pdev->ipa_uc_rx_rsc.rx_ind_ring_size =
+	pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
 		rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
-	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
-		pdev->ipa_uc_rx_rsc.rx_ind_ring_size);
+	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+		pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
 
 	/* Allocate RX process done index */
-	pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
+	pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
 		cdf_os_mem_alloc_consistent(
 			pdev->osdev,
 			4,
-			&pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
+			&pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
 						 rx_ipa_prc_done_idx),
 						memctx));
-	if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
+	if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
 		cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
 		cdf_os_mem_free_consistent(
 			pdev->osdev,
-			pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
-			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
-			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
-						 rx_ind_ring_base),
+						 rx2_ind_ring_base),
 						memctx));
 		return -ENOBUFS;
 	}
-	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
+	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
+	return 0;
+}
+#else
+int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
+			 unsigned int rx_ind_ring_elements)
+{
+	return 0;
+}
+#endif
 
-	pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
+/**
+ * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
+ * @pdev: htt context
+ * @rx_ind_ring_size: rx ring size
+ *
+ * Return: 0 success
+ */
+int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
+			 unsigned int rx_ind_ring_elements)
+{
+	int ret = 0;
+	/* Allocate RX indication ring */
+	/* RX IND ring element
+	 *   4bytes: pointer
+	 *   2bytes: VDEV ID
+	 *   2bytes: length */
+	pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
 		cdf_os_mem_alloc_consistent(
 			pdev->osdev,
 			rx_ind_ring_elements *
-			sizeof(cdf_dma_addr_t),
-			&pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+			sizeof(struct ipa_uc_rx_ring_elem_t),
+			&pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
-						 rx2_ind_ring_base),
+						 rx_ind_ring_base),
 						memctx));
-	if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
+	if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
 		cdf_print("%s: RX IND RING alloc fail", __func__);
 		return -ENOBUFS;
 	}
 
 	/* RX indication ring size, by bytes */
-	pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
-		rx_ind_ring_elements * sizeof(cdf_dma_addr_t);
-	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
-		pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
+	pdev->ipa_uc_rx_rsc.rx_ind_ring_size =
+		rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
+	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+		pdev->ipa_uc_rx_rsc.rx_ind_ring_size);
 
 	/* Allocate RX process done index */
-	pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
+	pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
 		cdf_os_mem_alloc_consistent(
 			pdev->osdev,
 			4,
-			&pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
+			&pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
 						 rx_ipa_prc_done_idx),
 						memctx));
-	if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
+	if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
 		cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
 		cdf_os_mem_free_consistent(
 			pdev->osdev,
-			pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
-			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
-			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
-						 rx2_ind_ring_base),
+						 rx_ind_ring_base),
 						memctx));
 		return -ENOBUFS;
 	}
-	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
-	return 0;
+	cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
+
+	ret = htt_rx_ipa_uc_alloc_wdi2_rsc(pdev, rx_ind_ring_elements);
+	return ret;
 }
 
-int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
+#ifdef QCA_WIFI_3_0
+/**
+ * htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
+ * @pdev: htt context
+ *
+ * Return: None
+ */
+void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
 {
-	if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
+	if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
 		cdf_os_mem_free_consistent(
 			pdev->osdev,
-			pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
-			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
-			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
-						 rx_ind_ring_base),
+						 rx2_ind_ring_base),
 						memctx));
 	}
 
-	if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
+	if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
 		cdf_os_mem_free_consistent(
 			pdev->osdev,
 			4,
 			pdev->ipa_uc_rx_rsc.
 			rx_ipa_prc_done_idx.vaddr,
-			pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
+			pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
 						 rx_ipa_prc_done_idx),
 						memctx));
 	}
-	if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
+}
+#else
+void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
+{
+	return;
+}
+#endif
+
+int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+	if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
 		cdf_os_mem_free_consistent(
 			pdev->osdev,
-			pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
-			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
-			pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+			pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
-						 rx2_ind_ring_base),
+						 rx_ind_ring_base),
 						memctx));
 	}
 
-	if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
+	if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
 		cdf_os_mem_free_consistent(
 			pdev->osdev,
 			4,
 			pdev->ipa_uc_rx_rsc.
 			rx_ipa_prc_done_idx.vaddr,
-			pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
+			pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
 						 rx2_ipa_prc_done_idx),
 						memctx));
 	}
+
+	htt_rx_ipa_uc_free_wdi2_rsc(pdev);
 	return 0;
 }
 #endif /* IPA_OFFLOAD */

+ 138 - 52
core/dp/htt/htt_tx.c

@@ -833,17 +833,19 @@ void htt_tx_desc_display(void *tx_desc)
 #endif
 
 #ifdef IPA_OFFLOAD
+#ifdef QCA_WIFI_2_0
 /**
- * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
+ * htt_tx_ipa_uc_wdi_tx_buf_alloc() - Alloc WDI TX buffers
  * @pdev: htt context
- * @uc_tx_buf_sz: single tx buffer size
- * @uc_tx_buf_cnt: total tx buffer count
- * @uc_tx_partition_base: tx buffer partition start
+ * @uc_tx_buf_sz: TX buffer size
+ * @uc_tx_buf_cnt: TX Buffer count
+ * @uc_tx_partition_base: IPA UC TX partition base value
+ *
+ * Allocate WDI TX buffers. Also note Rome supports only WDI 1.0.
  *
  * Return: 0 success
- *         ENOBUFS No memory fail
  */
-int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 			 unsigned int uc_tx_buf_sz,
 			 unsigned int uc_tx_buf_cnt,
 			 unsigned int uc_tx_partition_base)
@@ -853,52 +855,68 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 	cdf_dma_addr_t buffer_paddr;
 	uint32_t *header_ptr;
 	uint32_t *ring_vaddr;
-	int return_code = 0;
-	unsigned int tx_comp_ring_size;
+#define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 16
+#define IPA_UC_TX_BUF_FRAG_HDR_OFFSET 32
 
-	/* Allocate CE Write Index WORD */
-	pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
-		cdf_os_mem_alloc_consistent(
-			pdev->osdev,
-			4,
-			&pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
-			cdf_get_dma_mem_context(
-				(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
-				memctx));
-	if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
-		cdf_print("%s: CE Write Index WORD alloc fail", __func__);
-		return -ENOBUFS;
-	}
+	ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
+	/* Allocate TX buffers as many as possible */
+	for (tx_buffer_count = 0;
+	     tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
+		buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
+					      uc_tx_buf_sz, 0, 4, false);
+		if (!buffer_vaddr) {
+			cdf_print("%s: TX BUF alloc fail, loop index: %d",
+				  __func__, tx_buffer_count);
+			return tx_buffer_count;
+		}
 
-	/* Allocate TX COMP Ring */
-	tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
-	pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
-		cdf_os_mem_alloc_consistent(
-			pdev->osdev,
-			tx_comp_ring_size,
-			&pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
-			cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
-						 tx_comp_base),
-						memctx));
-	if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
-		cdf_print("%s: TX COMP ring alloc fail", __func__);
-		return_code = -ENOBUFS;
-		goto free_tx_ce_idx;
-	}
+		/* Init buffer */
+		cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
+		header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
 
-	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);
+		/* HTT control header */
+		*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
+		header_ptr++;
 
-	/* Allocate TX BUF vAddress Storage */
-	pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
-		(cdf_nbuf_t *) cdf_mem_malloc(uc_tx_buf_cnt *
-					      sizeof(cdf_nbuf_t));
-	if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
-		cdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
-		return_code = -ENOBUFS;
-		goto free_tx_comp_base;
+		/* PKT ID */
+		*header_ptr |= ((uint16_t) uc_tx_partition_base +
+				tx_buffer_count) << 16;
+
+		cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
+		buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
+		header_ptr++;
+		*header_ptr = (uint32_t) (buffer_paddr +
+						IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
+		header_ptr++;
+		*header_ptr = 0xFFFFFFFF;
+
+		/* FRAG Header */
+		header_ptr++;
+		*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
+
+		*ring_vaddr = buffer_paddr;
+		pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
+			buffer_vaddr;
+		/* Memory barrier to ensure actual value updated */
+
+		ring_vaddr++;
 	}
-	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
-		     uc_tx_buf_cnt * sizeof(cdf_nbuf_t));
+	return tx_buffer_count;
+}
+#else
+int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
+			 unsigned int uc_tx_buf_sz,
+			 unsigned int uc_tx_buf_cnt,
+			 unsigned int uc_tx_partition_base)
+{
+	unsigned int tx_buffer_count;
+	cdf_nbuf_t buffer_vaddr;
+	uint32_t buffer_paddr;
+	uint32_t *header_ptr;
+	uint32_t *ring_vaddr;
+#define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 20
+#define IPA_UC_TX_BUF_FRAG_HDR_OFFSET 64
+#define IPA_UC_TX_BUF_TSO_HDR_SIZE 6
 
 	ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
 	/* Allocate TX buffers as many as possible */
@@ -909,7 +927,7 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 		if (!buffer_vaddr) {
 			cdf_print("%s: TX BUF alloc fail, loop index: %d",
 				  __func__, tx_buffer_count);
-			return 0;
+			return tx_buffer_count;
 		}
 
 		/* Init buffer */
@@ -930,7 +948,8 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 
 		/* Frag Desc Pointer */
 		/* 64bits descriptor, Low 32bits */
-		*header_ptr = (uint32_t) (buffer_paddr + 20);
+		*header_ptr = (uint32_t) (buffer_paddr +
+						IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
 		header_ptr++;
 
 		/* 64bits descriptor, high 32bits */
@@ -943,8 +962,8 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 
 		/* FRAG Header */
 		/* 6 words TSO header */
-		header_ptr += 6;
-		*header_ptr = buffer_paddr + 64;
+		header_ptr += IPA_UC_TX_BUF_TSO_HDR_SIZE;
+		*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
 
 		*ring_vaddr = buffer_paddr;
 		pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
@@ -953,8 +972,75 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 
 		ring_vaddr += 2;
 	}
+	return tx_buffer_count;
+}
+#endif
+
+/**
+ * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
+ * @pdev: htt context
+ * @uc_tx_buf_sz: single tx buffer size
+ * @uc_tx_buf_cnt: total tx buffer count
+ * @uc_tx_partition_base: tx buffer partition start
+ *
+ * Return: 0 success
+ *         ENOBUFS No memory fail
+ */
+int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+			 unsigned int uc_tx_buf_sz,
+			 unsigned int uc_tx_buf_cnt,
+			 unsigned int uc_tx_partition_base)
+{
+	int return_code = 0;
+	unsigned int tx_comp_ring_size;
+
+	/* Allocate CE Write Index WORD */
+	pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			4,
+			&pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+			cdf_get_dma_mem_context(
+				(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
+				memctx));
+	if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
+		cdf_print("%s: CE Write Index WORD alloc fail", __func__);
+		return -ENOBUFS;
+	}
+
+	/* Allocate TX COMP Ring */
+	tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
+	pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
+		cdf_os_mem_alloc_consistent(
+			pdev->osdev,
+			tx_comp_ring_size,
+			&pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+			cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
+						 tx_comp_base),
+						memctx));
+	if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
+		cdf_print("%s: TX COMP ring alloc fail", __func__);
+		return_code = -ENOBUFS;
+		goto free_tx_ce_idx;
+	}
+
+	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);
+
+	/* Allocate TX BUF vAddress Storage */
+	pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
+		(cdf_nbuf_t *) cdf_mem_malloc(uc_tx_buf_cnt *
+					      sizeof(cdf_nbuf_t));
+	if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
+		cdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
+		return_code = -ENOBUFS;
+		goto free_tx_comp_base;
+	}
+	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
+		     uc_tx_buf_cnt * sizeof(cdf_nbuf_t));
+
+	pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = htt_tx_ipa_uc_wdi_tx_buf_alloc(
+		pdev, uc_tx_buf_sz, uc_tx_buf_cnt, uc_tx_partition_base);
 
-	pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
 
 	return 0;