Browse Source

qcacld-3.0: Add logic to bundle packets for HL

Add logic to queue and bundle packets before
giving it to scheduler to ensure predictive HTC
bundling for improvement in TX throughput.

Change-Id: Ib00135ca59b4c5f0f05edc93dca115bf17174a36
CRs-Fixed: 2561671
Nirav Shah 5 years ago
parent
commit
fb9b1df409

+ 1 - 0
Kbuild

@@ -2841,6 +2841,7 @@ cppflags-$(CONFIG_WLAN_DP_PER_RING_TYPE_CONFIG) += -DWLAN_DP_PER_RING_TYPE_CONFI
 cppflags-$(CONFIG_SAP_DHCP_FW_IND) += -DSAP_DHCP_FW_IND
 cppflags-$(CONFIG_WLAN_DP_PENDING_MEM_FLUSH) += -DWLAN_DP_PENDING_MEM_FLUSH
 cppflags-$(CONFIG_WLAN_SUPPORT_DATA_STALL) += -DWLAN_SUPPORT_DATA_STALL
+cppflags-$(CONFIG_WLAN_SUPPORT_TXRX_HL_BUNDLE) += -DWLAN_SUPPORT_TXRX_HL_BUNDLE
 
 ifdef CONFIG_MAX_LOGS_PER_SEC
 ccflags-y += -DWLAN_MAX_LOGS_PER_SEC=$(CONFIG_MAX_LOGS_PER_SEC)

+ 1 - 0
configs/genoa.usb.debug_defconfig

@@ -15,6 +15,7 @@ CONFIG_TX_RESOURCE_LOW_TH_IN_PER := 2
 CONFIG_FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_DBS_GROUP_CREDIT_SHARING := y
 CONFIG_CREDIT_REP_THROUGH_CREDIT_UPDATE := y
+CONFIG_WLAN_SUPPORT_TXRX_HL_BUNDLE := y
 CONFIG_HL_DP_SUPPORT := y
 
 # Enable Motion Detection Feature

+ 1 - 0
configs/genoa.usb.perf_defconfig

@@ -15,6 +15,7 @@ CONFIG_TX_RESOURCE_LOW_TH_IN_PER := 2
 CONFIG_FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL := y
 CONFIG_FEATURE_HL_DBS_GROUP_CREDIT_SHARING := y
 CONFIG_CREDIT_REP_THROUGH_CREDIT_UPDATE := y
+CONFIG_WLAN_SUPPORT_TXRX_HL_BUNDLE := y
 CONFIG_HL_DP_SUPPORT := y
 
 # Enable Motion Detection Feature

+ 20 - 0
core/cds/src/cds_api.c

@@ -368,6 +368,24 @@ cds_cdp_update_del_ack_params(struct wlan_objmgr_psoc *psoc,
 {}
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+static inline void
+cds_cdp_update_bundle_params(struct wlan_objmgr_psoc *psoc,
+			     struct txrx_pdev_cfg_param_t *cdp_cfg)
+{
+	cdp_cfg->bundle_timer_value =
+		cfg_get(psoc, CFG_DP_HL_BUNDLE_TIMER_VALUE);
+	cdp_cfg->bundle_size =
+		cfg_get(psoc, CFG_DP_HL_BUNDLE_SIZE);
+}
+#else
+static inline void
+cds_cdp_update_bundle_params(struct wlan_objmgr_psoc *psoc,
+			     struct txrx_pdev_cfg_param_t *cdp_cfg)
+{
+}
+#endif
+
 /**
  * cds_cdp_cfg_attach() - attach data path config module
  * @cds_cfg: generic platform level config instance
@@ -407,6 +425,8 @@ static void cds_cdp_cfg_attach(struct wlan_objmgr_psoc *psoc)
 
 	cds_cdp_update_del_ack_params(psoc, &cdp_cfg);
 
+	cds_cdp_update_bundle_params(psoc, &cdp_cfg);
+
 	gp_cds_context->cfg_ctx = cdp_cfg_attach(soc, gp_cds_context->qdf_ctx,
 					(void *)(&cdp_cfg));
 	if (!gp_cds_context->cfg_ctx) {

+ 10 - 0
core/dp/ol/inc/ol_cfg.h

@@ -128,6 +128,11 @@ struct txrx_pdev_cfg_t {
 	/* the maximum number of replaced tcp ack frames */
 	uint16_t del_ack_pkt_count;
 #endif
+
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+	uint16_t bundle_timer_value;
+	uint16_t bundle_size;
+#endif
 };
 
 /**
@@ -779,6 +784,11 @@ void ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t *cfg_ctx,
 }
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+int ol_cfg_get_bundle_timer_value(struct cdp_cfg *cfg_pdev);
+int ol_cfg_get_bundle_size(struct cdp_cfg *cfg_pdev);
+#else
+#endif
 /**
  * ol_cfg_get_wrr_skip_weight() - brief Query for the param of wrr_skip_weight
  * @pdev: handle to the physical device.

+ 35 - 0
core/dp/txrx/ol_cfg.c

@@ -136,6 +136,22 @@ void ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t *cfg_ctx,
 }
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+static inline
+void ol_cfg_update_bundle_params(struct txrx_pdev_cfg_t *cfg_ctx,
+				 struct txrx_pdev_cfg_param_t *cfg_param)
+{
+	cfg_ctx->bundle_timer_value = cfg_param->bundle_timer_value;
+	cfg_ctx->bundle_size = cfg_param->bundle_size;
+}
+#else
+static inline
+void ol_cfg_update_bundle_params(struct txrx_pdev_cfg_t *cfg_ctx,
+				 struct txrx_pdev_cfg_param_t *cfg_param)
+{
+}
+#endif
+
 /* FIX THIS -
  * For now, all these configuration parameters are hardcoded.
  * Many of these should actually be determined dynamically instead.
@@ -195,6 +211,8 @@ struct cdp_cfg *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
 
 	ol_cfg_update_del_ack_params(cfg_ctx, cfg_param);
 
+	ol_cfg_update_bundle_params(cfg_ctx, cfg_param);
+
 	ol_tx_set_flow_control_parameters((struct cdp_cfg *)cfg_ctx, cfg_param);
 
 	for (i = 0; i < QCA_WLAN_AC_ALL; i++) {
@@ -213,6 +231,23 @@ struct cdp_cfg *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
 	return (struct cdp_cfg *)cfg_ctx;
 }
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+
+int ol_cfg_get_bundle_timer_value(struct cdp_cfg *cfg_pdev)
+{
+	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
+
+	return cfg->bundle_timer_value;
+}
+
+int ol_cfg_get_bundle_size(struct cdp_cfg *cfg_pdev)
+{
+	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
+
+	return cfg->bundle_size;
+}
+#endif
+
 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
 /**
  * ol_cfg_get_del_ack_timer_value() - get delayed ack timer value

+ 25 - 0
core/dp/txrx/ol_tx.h

@@ -81,6 +81,31 @@ qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list);
 #define OL_TX_LL ol_tx_ll_wrapper
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+void ol_tx_hl_vdev_bundle_timer(void *context);
+
+void ol_tx_hl_queue_flush_all(struct ol_txrx_vdev_t *vdev);
+qdf_nbuf_t
+ol_tx_hl_pdev_queue_send_all(struct ol_txrx_pdev_t *pdev);
+#else
+static inline
+void ol_tx_hl_vdev_bundle_timer(void *context)
+{
+}
+
+static inline
+void ol_tx_hl_queue_flush_all(struct ol_txrx_vdev_t *vdev)
+{
+}
+
+static inline
+qdf_nbuf_t
+ol_tx_hl_pdev_queue_send_all(struct ol_txrx_pdev_t *pdev)
+{
+	return NULL;
+}
+#endif
+
 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
 void ol_tx_vdev_ll_pause_queue_send(void *context);
 void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev);

+ 270 - 0
core/dp/txrx/ol_tx_hl.c

@@ -1422,6 +1422,269 @@ ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 }
 #else
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+void
+ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
+{
+	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
+	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
+								    pdev_id);
+	struct ol_txrx_vdev_t *vdev;
+
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		vdev->bundling_required = false;
+		ol_txrx_info("vdev_id %d bundle_require %d\n",
+			     vdev->vdev_id, vdev->bundling_required);
+	}
+}
+
+void
+ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
+			      uint32_t time_in_ms, uint32_t high_th,
+			      uint32_t low_th)
+{
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)
+				ol_txrx_get_vdev_from_vdev_id(vdev_id);
+	bool old_bundle_required;
+
+	if ((!vdev) || (low_th > high_th))
+		return;
+
+	old_bundle_required = vdev->bundling_required;
+	if (tx_bytes > ((high_th * time_in_ms * 1500) / 1000))
+		vdev->bundling_required = true;
+	else if (tx_bytes < ((low_th * time_in_ms * 1500) / 1000))
+		vdev->bundling_required = false;
+
+	if (old_bundle_required != vdev->bundling_required)
+		ol_txrx_info("vdev_id %d bundle_require %d tx_bytes %ld time_in_ms %d high_th %d low_th %d\n",
+			     vdev->vdev_id, vdev->bundling_required, tx_bytes,
+			     time_in_ms, high_th, low_th);
+}
+
+/**
+ * ol_tx_hl_queue_flush_all() - drop all packets in vdev bundle queue
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+void
+ol_tx_hl_queue_flush_all(struct ol_txrx_vdev_t *vdev)
+{
+	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
+	if (vdev->bundle_queue.txq.depth != 0) {
+		qdf_timer_stop(&vdev->bundle_queue.timer);
+		vdev->pdev->total_bundle_queue_length -=
+				vdev->bundle_queue.txq.depth;
+		qdf_nbuf_tx_free(vdev->bundle_queue.txq.head, 1/*error*/);
+		vdev->bundle_queue.txq.depth = 0;
+		vdev->bundle_queue.txq.head = NULL;
+		vdev->bundle_queue.txq.tail = NULL;
+	}
+	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
+}
+
+/**
+ * ol_tx_hl_vdev_queue_append() - append pkt in tx queue
+ * @vdev: vdev handle
+ * @msdu_list: msdu list
+ *
+ * Return: none
+ */
+static void
+ol_tx_hl_vdev_queue_append(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu_list)
+{
+	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
+
+	if (!vdev->bundle_queue.txq.head) {
+		qdf_timer_start(
+			&vdev->bundle_queue.timer,
+			ol_cfg_get_bundle_timer_value(vdev->pdev->ctrl_pdev));
+		vdev->bundle_queue.txq.head = msdu_list;
+		vdev->bundle_queue.txq.tail = msdu_list;
+	} else {
+		qdf_nbuf_set_next(vdev->bundle_queue.txq.tail, msdu_list);
+	}
+
+	while (qdf_nbuf_next(msdu_list)) {
+		vdev->bundle_queue.txq.depth++;
+		vdev->pdev->total_bundle_queue_length++;
+		msdu_list = qdf_nbuf_next(msdu_list);
+	}
+
+	vdev->bundle_queue.txq.depth++;
+	vdev->pdev->total_bundle_queue_length++;
+	vdev->bundle_queue.txq.tail = msdu_list;
+	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
+}
+
+/**
+ * ol_tx_hl_vdev_queue_send_all() - send all packets in vdev bundle queue
+ * @vdev: vdev handle
+ * @call_sched: invoke scheduler
+ *
+ * Return: NULL for success
+ */
+static qdf_nbuf_t
+ol_tx_hl_vdev_queue_send_all(struct ol_txrx_vdev_t *vdev, bool call_sched,
+			     bool in_timer_context)
+{
+	qdf_nbuf_t msdu_list = NULL;
+	qdf_nbuf_t skb_list_head, skb_list_tail;
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
+				pdev->cfg.request_tx_comp;
+	int pkt_to_sent;
+
+	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
+
+	if (!vdev->bundle_queue.txq.depth) {
+		qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
+		return msdu_list;
+	}
+
+	if (likely((qdf_atomic_read(&vdev->tx_desc_count) +
+		    vdev->bundle_queue.txq.depth) <
+		    vdev->queue_stop_th)) {
+		qdf_timer_stop(&vdev->bundle_queue.timer);
+		vdev->pdev->total_bundle_queue_length -=
+				vdev->bundle_queue.txq.depth;
+		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
+					  vdev->bundle_queue.txq.head,
+					  tx_comp_req, call_sched);
+		vdev->bundle_queue.txq.depth = 0;
+		vdev->bundle_queue.txq.head = NULL;
+		vdev->bundle_queue.txq.tail = NULL;
+	} else {
+		pkt_to_sent = vdev->queue_stop_th -
+			qdf_atomic_read(&vdev->tx_desc_count);
+
+		if (pkt_to_sent) {
+			skb_list_head = vdev->bundle_queue.txq.head;
+
+			while (pkt_to_sent) {
+				skb_list_tail =
+					vdev->bundle_queue.txq.head;
+				vdev->bundle_queue.txq.head =
+				    qdf_nbuf_next(vdev->bundle_queue.txq.head);
+				vdev->pdev->total_bundle_queue_length--;
+				vdev->bundle_queue.txq.depth--;
+				pkt_to_sent--;
+				if (!vdev->bundle_queue.txq.head) {
+					qdf_timer_stop(
+						&vdev->bundle_queue.timer);
+					break;
+				}
+			}
+
+			qdf_nbuf_set_next(skb_list_tail, NULL);
+			msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
+						  skb_list_head, tx_comp_req,
+						  call_sched);
+		}
+
+		if (in_timer_context &&	vdev->bundle_queue.txq.head) {
+			qdf_timer_start(
+				&vdev->bundle_queue.timer,
+				ol_cfg_get_bundle_timer_value(
+					vdev->pdev->ctrl_pdev));
+		}
+	}
+	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
+
+	return msdu_list;
+}
+
+/**
+ * ol_tx_hl_pdev_queue_send_all() - send all packets from all vdev bundle queue
+ * @pdev: pdev handle
+ *
+ * Return: NULL for success
+ */
+qdf_nbuf_t
+ol_tx_hl_pdev_queue_send_all(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_txrx_vdev_t *vdev;
+	qdf_nbuf_t msdu_list;
+
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		msdu_list = ol_tx_hl_vdev_queue_send_all(vdev, false, false);
+		if (msdu_list)
+			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
+	}
+	ol_tx_sched(pdev);
+	return NULL; /* all msdus were accepted */
+}
+
+/**
+ * ol_tx_hl_vdev_bundle_timer() - bundle timer function
+ * @vdev: vdev handle
+ *
+ * Return: none
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+void
+ol_tx_hl_vdev_bundle_timer(struct timer_list *t)
+{
+	qdf_nbuf_t msdu_list;
+	struct ol_txrx_vdev_t *vdev = from_timer(vdev, t, bundle_queue.timer);
+
+	vdev->no_of_bundle_sent_in_timer++;
+	msdu_list = ol_tx_hl_vdev_queue_send_all(vdev, true, true);
+	if (msdu_list)
+		qdf_nbuf_tx_free(msdu_list, 1/*error*/);
+}
+#else
+void
+ol_tx_hl_vdev_bundle_timer(void *ctx)
+{
+	qdf_nbuf_t msdu_list;
+	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)ctx;
+
+	vdev->no_of_bundle_sent_in_timer++;
+	msdu_list = ol_tx_hl_vdev_queue_send_all(vdev, true, true);
+	if (msdu_list)
+		qdf_nbuf_tx_free(msdu_list, 1/*error*/);
+}
+#endif
+
+qdf_nbuf_t
+ol_tx_hl(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu_list)
+{
+	struct ol_txrx_pdev_t *pdev = vdev->pdev;
+	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
+				pdev->cfg.request_tx_comp;
+
+	/* No queuing for high priority packets */
+	if (ol_tx_desc_is_high_prio(msdu_list)) {
+		vdev->no_of_pkt_not_added_in_queue++;
+		return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
+					     tx_comp_req, true);
+	} else if (vdev->bundling_required &&
+	    (ol_cfg_get_bundle_size(vdev->pdev->ctrl_pdev) > 1)) {
+		ol_tx_hl_vdev_queue_append(vdev, msdu_list);
+
+		if (pdev->total_bundle_queue_length >=
+		    ol_cfg_get_bundle_size(vdev->pdev->ctrl_pdev)) {
+			vdev->no_of_bundle_sent_after_threshold++;
+			return ol_tx_hl_pdev_queue_send_all(pdev);
+		}
+	} else {
+		if (vdev->bundle_queue.txq.depth != 0) {
+			ol_tx_hl_vdev_queue_append(vdev, msdu_list);
+			return ol_tx_hl_vdev_queue_send_all(vdev, true, false);
+		} else {
+			vdev->no_of_pkt_not_added_in_queue++;
+			return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
+					     tx_comp_req, true);
+		}
+	}
+
+	return NULL; /* all msdus were accepted */
+}
+
+#else
+
 qdf_nbuf_t
 ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 {
@@ -1433,6 +1696,7 @@ ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 			     msdu_list, tx_comp_req, true);
 }
 #endif
+#endif
 
 qdf_nbuf_t ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
 			    enum ol_tx_spec tx_spec,
@@ -2084,6 +2348,12 @@ void ol_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
 		txrx_nofl_info("q_paused %d prio_q_paused %d",
 			       qdf_atomic_read(&vdev->os_q_paused),
 			       vdev->prio_q_paused);
+		txrx_nofl_info("no_of_bundle_sent_after_threshold %lld",
+			       vdev->no_of_bundle_sent_after_threshold);
+		txrx_nofl_info("no_of_bundle_sent_in_timer %lld",
+			       vdev->no_of_bundle_sent_in_timer);
+		txrx_nofl_info("no_of_pkt_not_added_in_queue %lld",
+			       vdev->no_of_pkt_not_added_in_queue);
 	}
 	qdf_spin_unlock_bh(&pdev->tx_mutex);
 }

+ 2 - 0
core/dp/txrx/ol_tx_queue.c

@@ -68,6 +68,8 @@ ol_tx_queue_vdev_flush(struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev)
 	struct ol_txrx_peer_t *peer, *peers[PEER_ARRAY_COUNT];
 	int i, j, peer_count;
 
+	ol_tx_hl_queue_flush_all(vdev);
+
 	/* flush VDEV TX queues */
 	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
 		txq = &vdev->txqs[i];

+ 4 - 0
core/dp/txrx/ol_tx_send.c

@@ -120,6 +120,7 @@ static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc)
 void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
 {
 	struct ol_txrx_vdev_t *vdev;
+	bool trigger_unpause = false;
 
 	qdf_spin_lock_bh(&pdev->tx_mutex);
 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
@@ -143,9 +144,12 @@ void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
 				       WLAN_WAKE_NON_PRIORITY_QUEUE,
 				       WLAN_DATA_FLOW_CONTROL);
 			qdf_atomic_set(&vdev->os_q_paused, 0);
+			trigger_unpause = true;
 		}
 	}
 	qdf_spin_unlock_bh(&pdev->tx_mutex);
+	if (trigger_unpause)
+		ol_tx_hl_pdev_queue_send_all(pdev);
 }
 #endif
 

+ 18 - 0
core/dp/txrx/ol_txrx.c

@@ -1794,6 +1794,17 @@ ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
 	vdev->txrx_stats.txack_success = 0;
 	vdev->txrx_stats.txack_failed = 0;
 
+	vdev->bundling_required = false;
+	qdf_spinlock_create(&vdev->bundle_queue.mutex);
+	vdev->bundle_queue.txq.head = NULL;
+	vdev->bundle_queue.txq.tail = NULL;
+	vdev->bundle_queue.txq.depth = 0;
+	qdf_timer_init(
+		pdev->osdev,
+		&vdev->bundle_queue.timer,
+		ol_tx_hl_vdev_bundle_timer,
+		vdev, QDF_TIMER_TYPE_SW);
+
 	/* Default MAX Q depth for every VDEV */
 	vdev->ll_pause.max_q_depth =
 		ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
@@ -1998,6 +2009,9 @@ ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
 	qdf_timer_free(&vdev->ll_pause.timer);
 	qdf_spinlock_destroy(&vdev->ll_pause.mutex);
 
+	qdf_timer_free(&vdev->bundle_queue.timer);
+	qdf_spinlock_destroy(&vdev->bundle_queue.mutex);
+
 	qdf_spin_lock_bh(&vdev->flow_control_lock);
 	vdev->osif_flow_control_cb = NULL;
 	vdev->osif_flow_control_is_pause = NULL;
@@ -5906,6 +5920,10 @@ static struct cdp_misc_ops ol_ops_misc = {
 	.pdev_reset_driver_del_ack = ol_tx_pdev_reset_driver_del_ack,
 	.vdev_set_driver_del_ack_enable = ol_tx_vdev_set_driver_del_ack_enable
 #endif
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+	.vdev_set_bundle_require_flag = ol_tx_vdev_set_bundle_require,
+	.pdev_reset_bundle_require_flag = ol_tx_pdev_reset_bundle_require,
+#endif
 };
 
 static struct cdp_flowctl_ops ol_ops_flowctl = {

+ 22 - 0
core/dp/txrx/ol_txrx.h

@@ -809,4 +809,26 @@ void ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t *soc_hdl,
 
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+void ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
+				   uint32_t time_in_ms, uint32_t high_th,
+				   uint32_t low_th);
+
+void ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
+
+#else
+
+static inline
+void ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
+				   uint32_t time_in_ms, uint32_t high_th,
+				   uint32_t low_th)
+{
+}
+
+static inline
+void ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
+{
+}
+#endif
+
 #endif /* _OL_TXRX__H_ */

+ 15 - 0
core/dp/txrx/ol_txrx_types.h

@@ -1109,6 +1109,7 @@ struct ol_txrx_pdev_t {
 
 	/* Current noise-floor reading for the pdev channel */
 	int16_t chan_noise_floor;
+	uint32_t total_bundle_queue_length;
 };
 
 #define OL_TX_HL_DEL_ACK_HASH_SIZE    256
@@ -1348,6 +1349,20 @@ struct ol_txrx_vdev_t {
 	uint64_t fwd_rx_packets;
 	bool is_wisa_mode_enable;
 	uint8_t mac_id;
+
+	uint64_t no_of_bundle_sent_after_threshold;
+	uint64_t no_of_bundle_sent_in_timer;
+	uint64_t no_of_pkt_not_added_in_queue;
+	bool bundling_required;
+	struct {
+		struct {
+			qdf_nbuf_t head;
+			qdf_nbuf_t tail;
+			int depth;
+		} txq;
+		qdf_spinlock_t mutex;
+		qdf_timer_t timer;
+	} bundle_queue;
 };
 
 struct ol_rx_reorder_array_elem_t {

+ 52 - 1
core/hdd/inc/hdd_dp_cfg.h

@@ -1230,6 +1230,46 @@
 #define CFG_DP_ENABLE_NUD_TRACKING_ALL
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+
+#define CFG_DP_HL_BUNDLE_HIGH_TH \
+		CFG_INI_UINT( \
+		"tx_bundle_high_threashold", \
+		0, \
+		70000, \
+		4330, \
+		CFG_VALUE_OR_DEFAULT, \
+		"tx bundle high threashold")
+
+#define CFG_DP_HL_BUNDLE_LOW_TH \
+		CFG_INI_UINT( \
+		"tx_bundle_low_threashold", \
+		0, \
+		70000, \
+		4000, \
+		CFG_VALUE_OR_DEFAULT, \
+		"tx bundle low threashold")
+
+#define CFG_DP_HL_BUNDLE_TIMER_VALUE \
+		CFG_INI_UINT( \
+		"tx_bundle_timer_in_ms", \
+		10, \
+		10000, \
+		100, \
+		CFG_VALUE_OR_DEFAULT, \
+		"tx bundle timer value in ms")
+
+#define CFG_DP_HL_BUNDLE_SIZE \
+		CFG_INI_UINT( \
+		"tx_bundle_size", \
+		0, \
+		64, \
+		16, \
+		CFG_VALUE_OR_DEFAULT, \
+		"tx bundle size")
+
+#endif
+
 /*
  * <ini>
  * gWmiCreditCount - Credit count for WMI exchange
@@ -1301,6 +1341,16 @@
 #define CFG_DP_DRIVER_TCP_DELACK
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+#define CFG_DP_HL_BUNDLE \
+	CFG(CFG_DP_HL_BUNDLE_HIGH_TH) \
+	CFG(CFG_DP_HL_BUNDLE_LOW_TH) \
+	CFG(CFG_DP_HL_BUNDLE_TIMER_VALUE) \
+	CFG(CFG_DP_HL_BUNDLE_SIZE)
+#else
+#define CFG_DP_HL_BUNDLE
+#endif
+
 #define CFG_HDD_DP_ALL \
 	CFG(CFG_DP_NAPI_CE_CPU_MASK) \
 	CFG(CFG_DP_RX_THREAD_CPU_MASK) \
@@ -1323,5 +1373,6 @@
 	CFG_DP_DRIVER_TCP_DELACK \
 	CFG_HDD_DP_LEGACY_TX_FLOW \
 	CFG_DP_ENABLE_NUD_TRACKING_ALL \
-	CFG_DP_CONFIG_DP_TRACE_ALL
+	CFG_DP_CONFIG_DP_TRACE_ALL \
+	CFG_DP_HL_BUNDLE
 #endif

+ 7 - 0
core/hdd/inc/wlan_hdd_cfg.h

@@ -226,6 +226,13 @@ struct hdd_config {
 #ifdef WLAN_FEATURE_TSF_PLUS
 	uint8_t tsf_ptp_options;
 #endif /* WLAN_FEATURE_TSF_PLUS */
+
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+	uint32_t pkt_bundle_threshold_high;
+	uint32_t pkt_bundle_threshold_low;
+	uint16_t pkt_bundle_timer_value;
+	uint16_t pkt_bundle_size;
+#endif
 };
 
 /**

+ 1 - 0
core/hdd/inc/wlan_hdd_main.h

@@ -1213,6 +1213,7 @@ struct hdd_adapter {
 #ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
 	unsigned long prev_rx_packets;
 	unsigned long prev_tx_packets;
+	unsigned long prev_tx_bytes;
 	uint64_t prev_fwd_tx_packets;
 	uint64_t prev_fwd_rx_packets;
 #endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/

+ 34 - 1
core/hdd/src/wlan_hdd_main.c

@@ -8586,11 +8586,34 @@ static void hdd_ipa_set_perf_level(struct hdd_context *hdd_ctx,
 }
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+static inline
+void hdd_set_vdev_bundle_require_flag(uint16_t vdev_id,
+				      struct hdd_context *hdd_ctx,
+				      uint64_t tx_bytes)
+{
+	struct hdd_config *cfg = hdd_ctx->config;
+
+	cdp_vdev_set_bundle_require_flag(cds_get_context(QDF_MODULE_ID_SOC),
+					 vdev_id, tx_bytes,
+					 cfg->bus_bw_compute_interval,
+					 cfg->pkt_bundle_threshold_high,
+					 cfg->pkt_bundle_threshold_low);
+}
+#else
+static inline
+void hdd_set_vdev_bundle_require_flag(uint16_t vdev_id,
+				      struct hdd_context *hdd_ctx,
+				      uint64_t tx_bytes)
+{
+}
+#endif
+
 #define HDD_BW_GET_DIFF(_x, _y) (unsigned long)((ULONG_MAX - (_y)) + (_x) + 1)
 static void __hdd_bus_bw_work_handler(struct hdd_context *hdd_ctx)
 {
 	struct hdd_adapter *adapter = NULL, *con_sap_adapter = NULL;
-	uint64_t tx_packets = 0, rx_packets = 0;
+	uint64_t tx_packets = 0, rx_packets = 0, tx_bytes = 0;
 	uint64_t fwd_tx_packets = 0, fwd_rx_packets = 0;
 	uint64_t fwd_tx_packets_diff = 0, fwd_rx_packets_diff = 0;
 	uint64_t total_tx = 0, total_rx = 0;
@@ -8632,6 +8655,8 @@ static void __hdd_bus_bw_work_handler(struct hdd_context *hdd_ctx)
 					      adapter->prev_tx_packets);
 		rx_packets += HDD_BW_GET_DIFF(adapter->stats.rx_packets,
 					      adapter->prev_rx_packets);
+		tx_bytes = HDD_BW_GET_DIFF(adapter->stats.tx_bytes,
+					   adapter->prev_tx_bytes);
 
 		if (adapter->device_mode == QDF_SAP_MODE ||
 		    adapter->device_mode == QDF_P2P_GO_MODE ||
@@ -8663,6 +8688,9 @@ static void __hdd_bus_bw_work_handler(struct hdd_context *hdd_ctx)
 		hdd_set_driver_del_ack_enable(adapter->vdev_id, hdd_ctx,
 					      rx_packets);
 
+		hdd_set_vdev_bundle_require_flag(adapter->vdev_id, hdd_ctx,
+						 tx_bytes);
+
 		total_rx += adapter->stats.rx_packets;
 		total_tx += adapter->stats.tx_packets;
 
@@ -8671,6 +8699,7 @@ static void __hdd_bus_bw_work_handler(struct hdd_context *hdd_ctx)
 		adapter->prev_rx_packets = adapter->stats.rx_packets;
 		adapter->prev_fwd_tx_packets = fwd_tx_packets;
 		adapter->prev_fwd_rx_packets = fwd_rx_packets;
+		adapter->prev_tx_bytes = adapter->stats.tx_bytes;
 		qdf_spin_unlock_bh(&hdd_ctx->bus_bw_lock);
 		connected = true;
 	}
@@ -13468,6 +13497,8 @@ static void __hdd_bus_bw_compute_timer_stop(struct hdd_context *hdd_ctx)
 	hdd_reset_tcp_delack(hdd_ctx);
 	cdp_pdev_reset_driver_del_ack(cds_get_context(QDF_MODULE_ID_SOC),
 				      OL_TXRX_PDEV_ID);
+	cdp_pdev_reset_bundle_require_flag(cds_get_context(QDF_MODULE_ID_SOC),
+					   OL_TXRX_PDEV_ID);
 }
 
 void hdd_bus_bw_compute_timer_stop(struct hdd_context *hdd_ctx)
@@ -13496,6 +13527,7 @@ void hdd_bus_bw_compute_prev_txrx_stats(struct hdd_adapter *adapter)
 	qdf_spin_lock_bh(&hdd_ctx->bus_bw_lock);
 	adapter->prev_tx_packets = adapter->stats.tx_packets;
 	adapter->prev_rx_packets = adapter->stats.rx_packets;
+	adapter->prev_tx_bytes = adapter->stats.tx_bytes;
 	cdp_get_intra_bss_fwd_pkts_count(cds_get_context(QDF_MODULE_ID_SOC),
 					 adapter->vdev_id,
 					 &adapter->prev_fwd_tx_packets,
@@ -13512,6 +13544,7 @@ void hdd_bus_bw_compute_reset_prev_txrx_stats(struct hdd_adapter *adapter)
 	adapter->prev_rx_packets = 0;
 	adapter->prev_fwd_tx_packets = 0;
 	adapter->prev_fwd_rx_packets = 0;
+	adapter->prev_tx_bytes = 0;
 	qdf_spin_unlock_bh(&hdd_ctx->bus_bw_lock);
 }
 

+ 22 - 0
core/hdd/src/wlan_hdd_tx_rx.c

@@ -3148,6 +3148,26 @@ static void hdd_ini_tcp_del_ack_settings(struct hdd_config *config,
 }
 #endif
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+static void hdd_dp_hl_bundle_cfg_update(struct hdd_config *config,
+					struct wlan_objmgr_psoc *psoc)
+{
+	config->pkt_bundle_threshold_high =
+		cfg_get(psoc, CFG_DP_HL_BUNDLE_HIGH_TH);
+	config->pkt_bundle_threshold_low =
+		cfg_get(psoc, CFG_DP_HL_BUNDLE_LOW_TH);
+	config->pkt_bundle_timer_value =
+		cfg_get(psoc, CFG_DP_HL_BUNDLE_TIMER_VALUE);
+	config->pkt_bundle_size =
+		cfg_get(psoc, CFG_DP_HL_BUNDLE_SIZE);
+}
+#else
+static void hdd_dp_hl_bundle_cfg_update(struct hdd_config *config,
+					struct wlan_objmgr_psoc *psoc)
+{
+}
+#endif
+
 void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
 		       struct hdd_context *hdd_ctx)
 {
@@ -3161,6 +3181,8 @@ void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
 
 	hdd_ini_tcp_del_ack_settings(config, psoc);
 
+	hdd_dp_hl_bundle_cfg_update(config, psoc);
+
 	config->napi_cpu_affinity_mask =
 		cfg_get(psoc, CFG_DP_NAPI_CE_CPU_MASK);
 	config->rx_thread_affinity_mask =