Browse Source

qcacmn: Add changes for Napier flow control

Adds support for flow control on convergence branch.
Allocate Tx descriptors dynamically when vdev comes up.
Tx queue is paused and unpaused internally in host based on the
stop and start thresholds.
Changes are added under compilation flag QCA_LL_TX_FLOW_CONTROL_V2.

Change-Id: I0ccb80b0099f39efad52ccd7d47f2709fdee2a93
CRs-Fixed: 2040457
Manjunathappa Prakash 7 years ago
parent
commit
ced7ea6cf2

+ 8 - 6
dp/inc/cdp_txrx_flow_ctrl_v2.h

@@ -36,7 +36,7 @@
 /**
  * cdp_register_pause_cb() - Register flow control callback function pointer
  * @soc - data path soc handle
- * @pause_cb - callback function pointer
+ * @pause_cb - Pause callback intend to register
  *
  * Register flow control callback function pointer and client context pointer
  *
@@ -44,7 +44,7 @@
  */
 static inline QDF_STATUS
 cdp_register_pause_cb(ol_txrx_soc_handle soc,
-		ol_tx_pause_callback_fp pause_cb)
+		tx_pause_callback pause_cb)
 {
 	if (!soc || !soc->ops || !soc->ops->flowctl_ops) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
@@ -53,7 +53,7 @@ cdp_register_pause_cb(ol_txrx_soc_handle soc,
 	}
 
 	if (soc->ops->flowctl_ops->register_pause_cb)
-		return soc->ops->flowctl_ops->register_pause_cb(pause_cb);
+		return soc->ops->flowctl_ops->register_pause_cb(soc, pause_cb);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -93,19 +93,21 @@ cdp_set_desc_global_pool_size(ol_txrx_soc_handle soc,
  * return none
  */
 static inline void
-cdp_dump_flow_pool_info(ol_txrx_soc_handle soc)
+cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
 {
+	void *dp_soc = (void *)soc;
+
 	if (!soc || !soc->ops || !soc->ops->flowctl_ops) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
 		return;
 	}
 
+
 	if (soc->ops->flowctl_ops->dump_flow_pool_info)
-		return soc->ops->flowctl_ops->dump_flow_pool_info();
+		return soc->ops->flowctl_ops->dump_flow_pool_info(dp_soc);
 
 	return;
 }
 
-
 #endif /* _CDP_TXRX_FC_V2_H_ */

+ 1 - 0
dp/inc/cdp_txrx_handle.h

@@ -31,5 +31,6 @@ struct cdp_pdev;
 struct cdp_vdev;
 struct cdp_peer;
 struct cdp_raw_ast;
+struct cdp_soc;
 
 #endif

+ 2 - 2
dp/inc/cdp_txrx_mob_def.h

@@ -394,10 +394,10 @@ typedef void (*tx_flow_control_fp)(void *osif_dev,
 			 bool tx_resume);
 
 /**
- * @typedef ol_tx_pause_callback_fp
+ * @typedef tx_pause_callback
  * @brief OSIF function registered with the data path
  */
-typedef void (*ol_tx_pause_callback_fp)(uint8_t vdev_id,
+typedef void (*tx_pause_callback)(uint8_t vdev_id,
 		enum netif_action_type action,
 		enum netif_reason_type reason);
 

+ 3 - 3
dp/inc/cdp_txrx_ops.h

@@ -726,10 +726,10 @@ struct cdp_cfg_ops {
  * @dump_flow_pool_info:
  */
 struct cdp_flowctl_ops {
-	QDF_STATUS (*register_pause_cb)(ol_tx_pause_callback_fp);
-
+	QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
+					tx_pause_callback);
 	void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
-	void (*dump_flow_pool_info)(void);
+	void (*dump_flow_pool_info)(void *);
 };
 
 /**

+ 5 - 0
dp/wifi3.0/dp_internal.h

@@ -348,4 +348,9 @@ static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
 	return 0;
 }
 #endif /* CONFIG_WIN */
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+void dp_tx_dump_flow_pool_info(void *soc);
+int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
+	bool force);
+#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
 #endif /* #ifndef _DP_INTERNAL_H_ */

+ 21 - 3
dp/wifi3.0/dp_main.c

@@ -29,6 +29,7 @@
 #include "dp_types.h"
 #include "dp_internal.h"
 #include "dp_tx.h"
+#include "dp_tx_desc.h"
 #include "dp_rx.h"
 #include <cdp_txrx_handle.h>
 #include <wlan_cfg.h>
@@ -38,6 +39,7 @@
 #include "dp_rx_mon.h"
 #include "htt_stats.h"
 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
+#include "cdp_txrx_flow_ctrl_v2.h"
 
 #define DP_INTR_POLL_TIMER_MS	10
 #define DP_WDS_AGING_TIMER_DEFAULT_MS	6000
@@ -1593,7 +1595,7 @@ static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
 
 /*
  * dp_soc_detach_wifi3() - Detach txrx SOC
- * @txrx_soc: DP SOC handle
+ * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
  *
  */
 static void dp_soc_detach_wifi3(void *txrx_soc)
@@ -1864,6 +1866,7 @@ static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
 	struct dp_soc *soc = pdev->soc;
 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
+	int tx_ring_size;
 
 	if (!vdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -1896,6 +1899,7 @@ static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
 	vdev->dscp_tid_map_id = 0;
 	vdev->mcast_enhancement_en = 0;
+	tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx);
 
 	/* TODO: Initialize default HTT meta data that will be used in
 	 * TCL descriptors for packets transmitted from this VDEV
@@ -1909,6 +1913,11 @@ static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
 
 	dp_tx_vdev_attach(vdev);
 
+	if (QDF_STATUS_SUCCESS != dp_tx_flow_pool_map_handler(pdev, vdev_id,
+					FLOW_TYPE_VDEV, vdev_id, tx_ring_size))
+		goto fail1;
+
+
 #ifdef DP_INTR_POLL_BASED
 	if (wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
 		if (pdev->vdev_count == 1)
@@ -1932,6 +1941,9 @@ static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
 
 	return (struct cdp_vdev *)vdev;
 
+fail1:
+	dp_tx_vdev_detach(vdev);
+	qdf_mem_free(vdev);
 fail0:
 	return NULL;
 }
@@ -2012,6 +2024,8 @@ static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
 	}
 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 
+	dp_tx_flow_pool_unmap_handler(pdev, vdev->vdev_id, FLOW_TYPE_VDEV,
+		vdev->vdev_id);
 	dp_tx_vdev_detach(vdev);
 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
 		FL("deleting vdev object %p (%pM)"), vdev, vdev->mac_addr.raw);
@@ -3983,7 +3997,7 @@ static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value)
 		break;
 
 	case CDP_DUMP_TX_FLOW_POOL_INFO:
-		/* TODO: NOT IMPLEMENTED */
+		cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
 		break;
 
 	case CDP_TXRX_DESC_STATS:
@@ -4181,7 +4195,11 @@ static struct cdp_misc_ops dp_ops_misc = {
 };
 
 static struct cdp_flowctl_ops dp_ops_flowctl = {
-	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
+	/* WIFI 3.0 DP implement as required. */
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	.register_pause_cb = dp_txrx_register_pause_cb,
+	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
+#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
 };
 
 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {

+ 67 - 41
dp/wifi3.0/dp_tx.c

@@ -30,7 +30,11 @@
 #endif
 
 #ifdef TX_PER_PDEV_DESC_POOL
-	#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
+#else /* QCA_LL_TX_FLOW_CONTROL_V2 */
+#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
+#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
 #else
 	#ifdef TX_PER_VDEV_DESC_POOL
@@ -67,6 +71,7 @@
 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
 		qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
 {
+	/* get flow id */
 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
 
@@ -500,7 +505,6 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
 		uint32_t *meta_data)
 {
-	QDF_STATUS status;
 	uint8_t align_pad;
 	uint8_t is_exception = 0;
 	uint8_t htt_hdr_size;
@@ -509,18 +513,8 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
 	struct dp_pdev *pdev = vdev->pdev;
 	struct dp_soc *soc = pdev->soc;
 
-	/* Flow control/Congestion Control processing */
-	status = dp_tx_flow_control(vdev);
-	if (QDF_STATUS_E_RESOURCES == status) {
-		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
-				"%s Tx Resource Full\n", __func__);
-		DP_STATS_INC(vdev, tx_i.dropped.res_full, 1);
-		/* TODO Stop Tx Queues */
-	}
-
 	/* Allocate software Tx descriptor */
 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
-
 	if (qdf_unlikely(!tx_desc)) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 			"%s Tx Desc Alloc Failed\n", __func__);
@@ -643,23 +637,12 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
 		uint8_t desc_pool_id)
 {
 	struct dp_tx_desc_s *tx_desc;
-	QDF_STATUS status;
 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
 	struct dp_pdev *pdev = vdev->pdev;
 	struct dp_soc *soc = pdev->soc;
 
-	/* Flow control/Congestion Control processing */
-	status = dp_tx_flow_control(vdev);
-	if (QDF_STATUS_E_RESOURCES == status) {
-		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
-				"%s Tx Resource Full\n", __func__);
-		DP_STATS_INC(vdev, tx_i.dropped.res_full, 1);
-		/* TODO Stop Tx Queues */
-	}
-
 	/* Allocate software Tx descriptor */
 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
-
 	if (!tx_desc) {
 		DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
 		return NULL;
@@ -2278,6 +2261,60 @@ QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
 	return QDF_STATUS_SUCCESS;
 }
 
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+/* Pools will be allocated dynamically */
+static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
+					int num_desc)
+{
+	uint8_t i;
+
+	for (i = 0; i < num_pool; i++) {
+		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
+		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
+	}
+
+	return 0;
+}
+
+static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
+{
+	uint8_t i;
+
+	for (i = 0; i < num_pool; i++)
+		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
+}
+#else /* QCA_LL_TX_FLOW_CONTROL_V2! */
+static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
+					int num_desc)
+{
+	uint8_t i;
+
+	/* Allocate software Tx descriptor pools */
+	for (i = 0; i < num_pool; i++) {
+		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+					"%s Tx Desc Pool alloc %d failed %p\n",
+					__func__, i, soc);
+			return ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
+{
+	uint8_t i;
+
+	for (i = 0; i < num_pool; i++) {
+		if (dp_tx_desc_pool_free(soc, i)) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"%s Tx Desc Pool Free failed\n", __func__);
+		}
+	}
+}
+
+#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
+
 /**
  * dp_tx_soc_detach() - detach soc from dp tx
  * @soc: core txrx main context
@@ -2299,14 +2336,8 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
 
-	for (i = 0; i < num_pool; i++) {
-		if (dp_tx_desc_pool_free(soc, i)) {
-			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
-					"%s Tx Desc Pool Free failed\n",
-					__func__);
-			return QDF_STATUS_E_RESOURCES;
-		}
-	}
+	dp_tx_flow_control_deinit(soc);
+	dp_tx_delete_static_pools(soc, num_pool);
 
 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
 			"%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
@@ -2357,24 +2388,19 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
  */
 QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
 {
+	uint8_t i;
 	uint8_t num_pool;
 	uint32_t num_desc;
 	uint32_t num_ext_desc;
-	uint8_t i;
 
 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
 
-	/* Allocate software Tx descriptor pools */
-	for (i = 0; i < num_pool; i++) {
-		if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
-			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
-					"%s Tx Desc Pool alloc %d failed %p\n",
-					__func__, i, soc);
-			goto fail;
-		}
-	}
+	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
+		goto fail;
+
+	dp_tx_flow_control_init(soc);
 
 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
 			"%s Tx Desc Alloc num_pool = %d, descs = %d\n",

+ 0 - 4
dp/wifi3.0/dp_tx.h

@@ -164,9 +164,5 @@ static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
 {
 	return;
 }
-static inline QDF_STATUS dp_tx_flow_control(struct dp_vdev *vdev)
-{
-	return  QDF_STATUS_SUCCESS;
-}
 /* TODO TX_FEATURE_NOT_YET */
 #endif

+ 18 - 13
dp/wifi3.0/dp_tx_desc.c

@@ -73,32 +73,34 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
 	uint16_t num_page, num_desc_per_page;
 	struct dp_tx_desc_s *tx_desc_elem;
 	uint32_t desc_size;
+	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
 
 	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
-	soc->tx_desc[pool_id].elem_size = desc_size;
+	tx_desc_pool->elem_size = desc_size;
 	qdf_mem_multi_pages_alloc(soc->osdev,
-		&soc->tx_desc[pool_id].desc_pages, desc_size, num_elem,
+		&tx_desc_pool->desc_pages, desc_size, num_elem,
 		0, true);
-	if (!soc->tx_desc[pool_id].desc_pages.num_pages) {
+	if (!tx_desc_pool->desc_pages.num_pages) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 			"Multi page alloc fail, tx desc");
 		goto fail_exit;
 	}
 
-	num_page = soc->tx_desc[pool_id].desc_pages.num_pages;
+
+	num_page = tx_desc_pool->desc_pages.num_pages;
 	num_desc_per_page =
-		soc->tx_desc[pool_id].desc_pages.num_element_per_page;
-	soc->tx_desc[pool_id].freelist = (struct dp_tx_desc_s *)
-		*soc->tx_desc[pool_id].desc_pages.cacheable_pages;
+		tx_desc_pool->desc_pages.num_element_per_page;
+	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
+			*tx_desc_pool->desc_pages.cacheable_pages;
 	if (qdf_mem_multi_page_link(soc->osdev,
-		&soc->tx_desc[pool_id].desc_pages, desc_size, num_elem, true)) {
+		&tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 			"invalid tx desc allocation - overflow num link");
 		goto free_tx_desc;
 	}
 
 	/* Set unique IDs for each Tx descriptor */
-	tx_desc_elem = soc->tx_desc[pool_id].freelist;
+	tx_desc_elem = tx_desc_pool->freelist;
 	count = 0;
 	pool_id_32 = (uint32_t)pool_id;
 	while (tx_desc_elem) {
@@ -113,12 +115,12 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
 		count++;
 	}
 
-	TX_DESC_LOCK_CREATE(&soc->tx_desc[pool_id].lock);
+	TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
 	return QDF_STATUS_SUCCESS;
 
 free_tx_desc:
 	qdf_mem_multi_pages_free(soc->osdev,
-		&soc->tx_desc[pool_id].desc_pages, 0, true);
+		&tx_desc_pool->desc_pages, 0, true);
 
 fail_exit:
 	return QDF_STATUS_E_FAULT;
@@ -134,9 +136,12 @@ fail_exit:
  */
 QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
 {
+	struct dp_tx_desc_pool_s *tx_desc_pool =
+				&((soc)->tx_desc[(pool_id)]);
+
 	qdf_mem_multi_pages_free(soc->osdev,
-		&soc->tx_desc[pool_id].desc_pages, 0, true);
-	TX_DESC_LOCK_DESTROY(&soc->tx_desc[pool_id].lock);
+		&tx_desc_pool->desc_pages, 0, true);
+	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
 	return QDF_STATUS_SUCCESS;
 }
 

+ 180 - 3
dp/wifi3.0/dp_tx_desc.h

@@ -22,6 +22,7 @@
 #include "dp_types.h"
 #include "dp_tx.h"
 #include "dp_internal.h"
+#include "cds_api.h"
 
 /**
  * 21 bits cookie
@@ -37,10 +38,17 @@
 #define DP_TX_DESC_ID_OFFSET_MASK  0x0003FF
 #define DP_TX_DESC_ID_OFFSET_OS    0
 
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+#define TX_DESC_LOCK_CREATE(lock)
+#define TX_DESC_LOCK_DESTROY(lock)
+#define TX_DESC_LOCK_LOCK(lock)
+#define TX_DESC_LOCK_UNLOCK(lock)
+#else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
 #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
+#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
 #define MAX_POOL_BUFF_COUNT 10000
 
 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
@@ -55,6 +63,171 @@ void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
 		uint16_t num_elem);
 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+void dp_tx_flow_control_init(struct dp_soc *);
+void dp_tx_flow_control_deinit(struct dp_soc *);
+
+QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
+	tx_pause_callback pause_cb);
+void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
+struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
+	uint8_t flow_pool_id, uint16_t flow_pool_size);
+
+QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
+	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size);
+void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
+	uint8_t flow_type, uint8_t flow_pool_id);
+
+/**
+ * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
+ * @pool: flow pool
+ *
+ * Caller needs to take lock and do sanity checks.
+ *
+ * Return: tx descriptor
+ */
+static inline
+struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
+{
+	struct dp_tx_desc_s *tx_desc = pool->freelist;
+
+	pool->freelist = pool->freelist->next;
+	pool->avail_desc--;
+	return tx_desc;
+}
+
+/**
+ * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
+ * @pool: flow pool
+ * @tx_desc: tx descriptor
+ *
+ * Caller needs to take lock and do sanity checks.
+ *
+ * Return: none
+ */
+static inline
+void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
+			struct dp_tx_desc_s *tx_desc)
+{
+	tx_desc->next = pool->freelist;
+	pool->freelist = tx_desc;
+	pool->avail_desc++;
+}
+
+
+/**
+ * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
+ *
+ * @soc Handle to DP SoC structure
+ * @pool_id
+ *
+ * Return:
+ */
+static inline struct dp_tx_desc_s *
+dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
+{
+	struct dp_tx_desc_s *tx_desc = NULL;
+	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
+
+	if (pool) {
+		qdf_spin_lock_bh(&pool->flow_pool_lock);
+		if (pool->avail_desc) {
+			tx_desc = dp_tx_get_desc_flow_pool(pool);
+			tx_desc->pool_id = desc_pool_id;
+			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
+			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
+				pool->status = FLOW_POOL_ACTIVE_PAUSED;
+				qdf_spin_unlock_bh(&pool->flow_pool_lock);
+				/* pause network queues */
+				soc->pause_cb(desc_pool_id,
+					       WLAN_STOP_ALL_NETIF_QUEUE,
+					       WLAN_DATA_FLOW_CONTROL);
+			} else {
+				qdf_spin_unlock_bh(&pool->flow_pool_lock);
+			}
+		} else {
+			pool->pkt_drop_no_desc++;
+			qdf_spin_unlock_bh(&pool->flow_pool_lock);
+		}
+	} else {
+		soc->pool_stats.pkt_drop_no_pool++;
+	}
+
+
+	return tx_desc;
+}
+
+/**
+ * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
+ *
+ * @soc Handle to DP SoC structure
+ * @pool_id
+ * @tx_desc
+ *
+ * Return: None
+ */
+static inline void
+dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
+		uint8_t desc_pool_id)
+{
+	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
+
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
+	dp_tx_put_desc_flow_pool(pool, tx_desc);
+	switch (pool->status) {
+	case FLOW_POOL_ACTIVE_PAUSED:
+		if (pool->avail_desc > pool->start_th) {
+			soc->pause_cb(pool->flow_pool_id,
+				       WLAN_WAKE_ALL_NETIF_QUEUE,
+				       WLAN_DATA_FLOW_CONTROL);
+			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
+		}
+		break;
+	case FLOW_POOL_INVALID:
+		if (pool->avail_desc == pool->pool_size) {
+			dp_tx_desc_pool_free(soc, desc_pool_id);
+			pool->status = FLOW_POOL_INACTIVE;
+			qdf_spin_unlock_bh(&pool->flow_pool_lock);
+			qdf_print("%s %d pool is freed!!\n",
+				 __func__, __LINE__);
+			return;
+		}
+		break;
+
+	case FLOW_POOL_ACTIVE_UNPAUSED:
+		break;
+	default:
+		qdf_print("%s %d pool is INACTIVE State!!\n",
+				 __func__, __LINE__);
+		break;
+	};
+
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
+
+}
+#else /* QCA_LL_TX_FLOW_CONTROL_V2 */
+
+static inline void dp_tx_flow_control_init(struct dp_soc *handle)
+{
+}
+
+static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
+{
+}
+
+static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
+	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
+	uint16_t flow_pool_size)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
+static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
+	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
+{
+}
+
 /**
  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
  *
@@ -64,7 +237,7 @@ void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
  * Return:
  */
 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
-		uint8_t desc_pool_id)
+						uint8_t desc_pool_id)
 {
 	struct dp_tx_desc_s *tx_desc = NULL;
 
@@ -162,6 +335,7 @@ dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
 
 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
 }
+#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
 
 /**
  * dp_tx_desc_find() - find dp tx descriptor from cokie
@@ -175,8 +349,10 @@ dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
 static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
 		uint8_t pool_id, uint16_t page_id, uint16_t offset)
 {
-	return soc->tx_desc[pool_id].desc_pages.cacheable_pages[page_id] +
-		soc->tx_desc[pool_id].elem_size * offset;
+	struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
+
+	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
+		tx_desc_pool->elem_size * offset;
 }
 
 /**
@@ -337,6 +513,7 @@ void dp_tso_num_seg_free(struct dp_soc *soc,
 	TX_DESC_LOCK_UNLOCK(&soc->tx_tso_num_seg[pool_id].lock);
 }
 #endif
+
 /*
  * dp_tx_me_alloc_buf() Alloc descriptor from me pool
  * @pdev DP_PDEV handle for datapath

+ 417 - 0
dp/wifi3.0/dp_tx_flow_control.c

@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+#include <cds_api.h>
+
+/* OS abstraction libraries */
+#include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
+#include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
+#include <qdf_util.h>           /* qdf_unlikely */
+#include "dp_types.h"
+#include "dp_tx_desc.h"
+
+#include <cdp_txrx_handle.h>
+#include "dp_internal.h"
+#define INVALID_FLOW_ID 0xFF
+#define MAX_INVALID_BIN 3
+
+/**
+ * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
+ *
+ * @ctx: Handle to struct dp_soc.
+ *
+ * Return: none
+ */
+void dp_tx_dump_flow_pool_info(void *ctx)
+{
+	struct dp_soc *soc = ctx;
+	struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
+	struct dp_tx_desc_pool_s *pool = NULL;
+	struct dp_tx_desc_pool_s tmp_pool;
+	int i;
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"No of pool map received %d", pool_stats->pool_map_count);
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"No of pool unmap received %d",	pool_stats->pool_unmap_count);
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"Pkt dropped due to unavailablity of pool %d",
+		pool_stats->pkt_drop_no_pool);
+
+	/*
+	 * Nested spin lock.
+	 * Always take in below order.
+	 * flow_pool_array_lock -> flow_pool_lock
+	 */
+	qdf_spin_lock_bh(&soc->flow_pool_array_lock);
+	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
+		pool = &soc->tx_desc[i];
+		if (pool->status > FLOW_POOL_INVALID)
+			continue;
+		qdf_spin_lock_bh(&pool->flow_pool_lock);
+		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
+		qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"Flow_pool_id %d :: status %d",
+			tmp_pool.flow_pool_id, tmp_pool.status);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"Total %d :: Available %d",
+			tmp_pool.pool_size, tmp_pool.avail_desc);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"Start threshold %d :: Stop threshold %d",
+			 tmp_pool.start_th, tmp_pool.stop_th);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"Member flow_id  %d :: flow_type %d",
+			tmp_pool.flow_pool_id, tmp_pool.flow_type);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"Pkt dropped due to unavailablity of descriptors %d",
+			tmp_pool.pkt_drop_no_desc);
+		qdf_spin_lock_bh(&soc->flow_pool_array_lock);
+	}
+	qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
+}
+
+/**
+ * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
+ *
+ * @soc: Handle to struct dp_soc.
+ *
+ * Return: None
+ */
+void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
+{
+
+	if (!soc) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"%s: soc is null\n", __func__);
+		return;
+	}
+	qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
+}
+
+/**
+ * dp_tx_create_flow_pool() - create flow pool
+ * @soc: Handle to struct dp_soc
+ * @flow_pool_id: flow pool id
+ * @flow_pool_size: flow pool size
+ *
+ * Return: flow_pool pointer / NULL for error
+ */
+struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
+	uint8_t flow_pool_id, uint16_t flow_pool_size)
+{
+	struct dp_tx_desc_pool_s *pool;
+	uint32_t stop_threshold;
+	uint32_t start_threshold;
+
+	if (!soc) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		   "%s: soc is NULL\n", __func__);
+		return NULL;
+	}
+	pool = &soc->tx_desc[flow_pool_id];
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
+	if (pool->status == FLOW_POOL_INVALID) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		   "%s: flow pool already allocated\n", __func__);
+		if (pool->avail_desc > pool->start_th)
+			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
+		else
+			pool->status = FLOW_POOL_ACTIVE_PAUSED;
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
+		return pool;
+	}
+
+	if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size))
+		return NULL;
+
+	stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
+	start_threshold = stop_threshold +
+		wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
+
+	pool->flow_pool_id = flow_pool_id;
+	pool->pool_size = flow_pool_size;
+	pool->avail_desc = flow_pool_size;
+	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
+	/* INI is in percentage so divide by 100 */
+	pool->start_th = (start_threshold * flow_pool_size)/100;
+	pool->stop_th = (stop_threshold * flow_pool_size)/100;
+
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
+
+	return pool;
+}
+
+/**
+ * dp_tx_delete_flow_pool() - delete flow pool
+ * @soc: Handle to struct dp_soc
+ * @pool: flow pool pointer
+ * @force: free pool forcefully
+ *
+ * Delete flow_pool if all tx descriptors are available.
+ * Otherwise put it in FLOW_POOL_INVALID state.
+ * If force is set then pull all available descriptors to
+ * global pool.
+ *
+ * Return: 0 for success or error
+ */
+int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
+	bool force)
+{
+	if (!soc || !pool) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		   "%s: pool or soc is NULL\n", __func__);
+		QDF_ASSERT(0);
+		return ENOMEM;
+	}
+
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
+	if (pool->avail_desc < pool->pool_size) {
+		pool->status = FLOW_POOL_INVALID;
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
+		return EAGAIN;
+	}
+
+	/* We have all the descriptors for the pool, we can delete the pool */
+	dp_tx_desc_pool_free(soc, pool->flow_pool_id);
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
+	return 0;
+}
+
+/**
+ * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
+ * @pdev: Handle to struct dp_pdev
+ * @pool: flow_pool
+ * @vdev_id: flow_id /vdev_id
+ *
+ * Return: none
+ */
+static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
+	struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
+{
+	struct dp_vdev *vdev;
+	struct dp_soc *soc = pdev->soc;
+
+	vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
+					(struct cdp_pdev *)pdev, vdev_id);
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		   "%s: invalid vdev_id %d\n",
+		   __func__, vdev_id);
+		return;
+	}
+
+	vdev->pool = pool;
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
+	pool->pool_owner_ctx = soc;
+	pool->flow_pool_id = vdev_id;
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
+}
+
+/**
+ * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
+ * @pdev: Handle to struct dp_pdev
+ * @pool: flow_pool
+ * @vdev_id: flow_id /vdev_id
+ *
+ * Return: none
+ */
+static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
+		struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
+{
+	struct dp_vdev *vdev;
+	struct dp_soc *soc = pdev->soc;
+
+	vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc,
+					(struct cdp_pdev *)pdev, vdev_id);
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		   "%s: invalid vdev_id %d\n",
+		   __func__, vdev_id);
+		return;
+	}
+
+	vdev->pool = NULL;
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
+	pool->flow_pool_id = INVALID_FLOW_ID;
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
+}
+
+/**
+ * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
+ * @pdev: Handle to struct dp_pdev
+ * @flow_id: flow id
+ * @flow_type: flow type
+ * @flow_pool_id: pool id
+ * @flow_pool_size: pool size
+ *
+ * Process below target to host message
+ * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
+ *
+ * Return: none
+ */
+QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
+	uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
+{
+	struct dp_soc *soc = pdev->soc;
+	struct dp_tx_desc_pool_s *pool;
+	enum htt_flow_type type = flow_type;
+
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+		"%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
+		__func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
+
+	if (qdf_unlikely(!soc)) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"%s: soc is NULL", __func__);
+		return QDF_STATUS_E_FAULT;
+	}
+	soc->pool_stats.pool_map_count++;
+
+	pool = dp_tx_create_flow_pool(soc, flow_pool_id,
+			flow_pool_size);
+	if (pool == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			   "%s: creation of flow_pool %d size %d failed\n",
+			   __func__, flow_pool_id, flow_pool_size);
+		return QDF_STATUS_E_RESOURCES;
+	}
+
+	switch (type) {
+
+	case FLOW_TYPE_VDEV:
+		dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
+		break;
+	default:
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		   "%s: flow type %d not supported !!!\n",
+		   __func__, type);
+		break;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
+ * @pdev: Handle to struct dp_pdev
+ * @flow_id: flow id
+ * @flow_type: flow type
+ * @flow_pool_id: pool id
+ *
+ * Process below target to host message
+ * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
+ *
+ * Return: none
+ */
+void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
+	uint8_t flow_type, uint8_t flow_pool_id)
+{
+	struct dp_soc *soc = pdev->soc;
+	struct dp_tx_desc_pool_s *pool;
+	enum htt_flow_type type = flow_type;
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+		"%s: flow_id %d flow_type %d flow_pool_id %d\n",
+		__func__, flow_id, flow_type, flow_pool_id);
+
+	if (qdf_unlikely(!pdev)) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"%s: pdev is NULL", __func__);
+		return;
+	}
+	soc->pool_stats.pool_unmap_count++;
+
+	pool = &soc->tx_desc[flow_pool_id];
+	if (!pool) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		   "%s: flow_pool not available flow_pool_id %d\n",
+		   __func__, type);
+		return;
+	}
+
+	switch (type) {
+
+	case FLOW_TYPE_VDEV:
+		dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
+		break;
+	default:
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		   "%s: flow type %d not supported !!!\n",
+		   __func__, type);
+		return;
+	}
+
+	/* only delete if all descriptors are available */
+	dp_tx_delete_flow_pool(soc, pool, false);
+}
+
+/**
+ * dp_tx_flow_control_init() - Initialize tx flow control
+ * @tx_desc_pool: Handle to flow_pool
+ *
+ * Return: none
+ */
+void dp_tx_flow_control_init(struct dp_soc *soc)
+{
+	qdf_spinlock_create(&soc->flow_pool_array_lock);
+}
+
+/**
+ * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
+ * @tx_desc_pool: Handle to flow_pool
+ *
+ * Return: none
+ */
+void dp_tx_flow_control_deinit(struct dp_soc *soc)
+{
+	qdf_spinlock_destroy(&soc->flow_pool_array_lock);
+}
+
+/**
+ * dp_txrx_register_pause_cb() - Register pause callback
+ * @ctx: Handle to struct dp_soc
+ * @pause_cb: Tx pause_cb
+ *
+ * Return: none
+ */
+QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
+	tx_pause_callback pause_cb)
+{
+	struct dp_soc *soc = (struct dp_soc *)handle;
+
+	if (!soc || !pause_cb) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			FL("soc or pause_cb is NULL"));
+		return QDF_STATUS_E_INVAL;
+	}
+	soc->pause_cb = pause_cb;
+
+	return QDF_STATUS_SUCCESS;
+}

+ 58 - 5
dp/wifi3.0/dp_types.h

@@ -100,7 +100,7 @@ enum rx_pktlog_mode {
 struct dp_soc_cmn;
 struct dp_pdev;
 struct dp_vdev;
-union dp_tx_desc_list_elem_t;
+struct dp_tx_desc_s;
 struct dp_soc;
 union dp_rx_desc_list_elem_t;
 
@@ -231,6 +231,7 @@ struct dp_tx_ext_desc_pool_s {
  * @pkt_offset: Offset from which the actual packet data starts
  * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
  *		Tx completion of ME packet
+ * @pool: handle to flow_pool this descriptor belongs to.
  */
 struct dp_tx_desc_s {
 	struct dp_tx_desc_s *next;
@@ -250,6 +251,22 @@ struct dp_tx_desc_s {
 	void *tso_num_desc;
 };
 
+/**
+ * enum flow_pool_status - flow pool status
+ * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
+ *				and network queues are unpaused
+ * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
+ *			   and network queues are paused
+ * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
+ * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
+ */
+enum flow_pool_status {
+	FLOW_POOL_ACTIVE_UNPAUSED = 0,
+	FLOW_POOL_ACTIVE_PAUSED = 1,
+	FLOW_POOL_INVALID = 2,
+	FLOW_POOL_INACTIVE = 3,
+};
+
 /**
  * struct dp_tx_tso_seg_pool_s
  * @pool_size: total number of pool elements
@@ -283,23 +300,51 @@ struct dp_tx_tso_num_seg_pool_s {
 /**
  * struct dp_tx_desc_pool_s - Tx Descriptor pool information
  * @elem_size: Size of each descriptor in the pool
- * @elem_count: Total number of descriptors in the pool
- * @num_allocated: Number of used descriptors
+ * @pool_size: Total number of descriptors in the pool
  * @num_free: Number of free descriptors
+ * @num_allocated: Number of used descriptors
  * @freelist: Chain of free descriptors
  * @desc_pages: multiple page allocation information for actual descriptors
+ * @num_invalid_bin: Deleted pool with pending Tx completions.
+ * @flow_pool_array_lock: Lock when operating on flow_pool_array.
+ * @flow_pool_array: List of allocated flow pools
  * @lock- Lock for descriptor allocation/free from/to the pool
  */
 struct dp_tx_desc_pool_s {
 	uint16_t elem_size;
-	uint16_t elem_count;
 	uint32_t num_allocated;
-	uint32_t num_free;
 	struct dp_tx_desc_s *freelist;
 	struct qdf_mem_multi_page_t desc_pages;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	uint16_t pool_size;
+	uint8_t flow_pool_id;
+	uint8_t num_invalid_bin;
+	uint16_t avail_desc;
+	enum flow_pool_status status;
+	enum htt_flow_type flow_type;
+	uint16_t stop_th;
+	uint16_t start_th;
+	uint16_t pkt_drop_no_desc;
+	qdf_spinlock_t flow_pool_lock;
+	void *pool_owner_ctx;
+#else
+	uint16_t elem_count;
+	uint32_t num_free;
 	qdf_spinlock_t lock;
+#endif
 };
 
+/**
+ * struct dp_txrx_pool_stats - flow pool related statistics
+ * @pool_map_count: flow pool map received
+ * @pool_unmap_count: flow pool unmap received
+ * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
+ */
+struct dp_txrx_pool_stats {
+	uint16_t pool_map_count;
+	uint16_t pool_unmap_count;
+	uint16_t pkt_drop_no_pool;
+};
 
 struct dp_srng {
 	void *hal_srng;
@@ -487,6 +532,11 @@ struct dp_soc {
 	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
 	uint32_t wbm_idle_scatter_buf_size;
 
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	qdf_spinlock_t flow_pool_array_lock;
+	tx_pause_callback pause_cb;
+	struct dp_txrx_pool_stats pool_stats;
+#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
 	/* Tx SW descriptor pool */
 	struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
 
@@ -998,6 +1048,9 @@ struct dp_vdev {
 
 	/* Address search flags to be configured in HAL descriptor */
 	uint8_t hal_desc_addr_search_flags;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	struct dp_tx_desc_pool_s *pool;
+#endif
 };
 
 

+ 53 - 3
wlan_cfg/wlan_cfg.c

@@ -93,13 +93,25 @@
 #define RXDMA_MONITOR_DEST_RING_SIZE 2048
 #define RXDMA_MONITOR_STATUS_RING_SIZE 2048
 
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+
+/* Per vdev pools */
+#define WLAN_CFG_NUM_TX_DESC_POOL	3
+#define WLAN_CFG_NUM_TXEXT_DESC_POOL	3
+
+#else /* QCA_LL_TX_FLOW_CONTROL_V2 */
+
 #ifdef TX_PER_PDEV_DESC_POOL
-#define WLAN_CFG_NUM_TX_DESC_POOL 	MAX_PDEV_CNT
+#define WLAN_CFG_NUM_TX_DESC_POOL	MAX_PDEV_CNT
 #define WLAN_CFG_NUM_TXEXT_DESC_POOL	MAX_PDEV_CNT
-#else
+
+#else /* TX_PER_PDEV_DESC_POOL */
+
 #define WLAN_CFG_NUM_TX_DESC_POOL 3
 #define WLAN_CFG_NUM_TXEXT_DESC_POOL 3
+
 #endif /* TX_PER_PDEV_DESC_POOL */
+#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
 
 #define WLAN_CFG_TX_RING_MASK_0 0x1
 #define WLAN_CFG_TX_RING_MASK_1 0x2
@@ -260,6 +272,10 @@ struct wlan_cfg_dp_soc_ctxt {
 	int nss_cfg;
 	int hw_macid[MAX_PDEV_CNT];
 	int base_hw_macid;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	int tx_flow_stop_queue_th;
+	int tx_flow_start_queue_offset;
+#endif
 };
 
 /**
@@ -300,7 +316,7 @@ struct wlan_cfg_dp_soc_ctxt *wlan_cfg_soc_attach()
 	wlan_cfg_ctx->num_tcl_data_rings = WLAN_CFG_NUM_TCL_DATA_RINGS;
 	wlan_cfg_ctx->per_pdev_rx_ring = WLAN_CFG_PER_PDEV_RX_RING;
 	wlan_cfg_ctx->num_reo_dest_rings = WLAN_CFG_NUM_REO_DEST_RING;
-	wlan_cfg_ctx->num_tx_desc_pool = WLAN_CFG_NUM_TX_DESC_POOL;
+	wlan_cfg_ctx->num_tx_desc_pool = MAX_TXDESC_POOLS;
 	wlan_cfg_ctx->num_tx_ext_desc_pool = WLAN_CFG_NUM_TXEXT_DESC_POOL;
 	wlan_cfg_ctx->num_tx_desc = WLAN_CFG_NUM_TX_DESC;
 	wlan_cfg_ctx->num_tx_ext_desc = WLAN_CFG_NUM_TX_EXT_DESC;
@@ -662,3 +678,37 @@ int wlan_cfg_get_int_timer_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg)
 {
 	return cfg->int_timer_threshold_other;
 }
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * wlan_cfg_get_tx_flow_stop_queue_th() - Get flow control stop threshold
+ * @cfg: config context
+ *
+ * Return: stop threshold
+ */
+int wlan_cfg_get_tx_flow_stop_queue_th(struct wlan_cfg_dp_soc_ctxt *cfg)
+{
+#ifdef QCA_WIFI_NAPIER_EMULATION
+	/* TODO remove this hack when INI hookup is ready */
+	return 15;
+#else
+	return cfg->tx_flow_stop_queue_th;
+#endif
+}
+
+/**
+ * wlan_cfg_get_tx_flow_start_queue_offset() - Get flow control start offset
+ *					for TX to resume
+ * @cfg: config context
+ *
+ * Return: stop threshold
+ */
+int wlan_cfg_get_tx_flow_start_queue_offset(struct wlan_cfg_dp_soc_ctxt *cfg)
+{
+#ifdef QCA_WIFI_NAPIER_EMULATION
+	/* TODO remove this hack when INI hookup is ready */
+	return 10;
+#else
+	return cfg->tx_flow_start_queue_offset;
+#endif
+}
+#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */

+ 5 - 1
wlan_cfg/wlan_cfg.h

@@ -425,7 +425,6 @@ int wlan_cfg_get_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg);
  */
 void wlan_cfg_set_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg, int nss_cfg);
 
-
 /*
  * wlan_cfg_get_int_batch_threshold_tx - Get interrupt mitigation cfg for Tx
  * @wlan_cfg_soc_ctx
@@ -489,4 +488,9 @@ int wlan_cfg_tx_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg);
  * Return: Tx Completion ring size
  */
 int wlan_cfg_tx_comp_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg);
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+int wlan_cfg_get_tx_flow_stop_queue_th(struct wlan_cfg_dp_soc_ctxt *cfg);
+
+int wlan_cfg_get_tx_flow_start_queue_offset(struct wlan_cfg_dp_soc_ctxt *cfg);
+#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
 #endif