Browse Source

qcacld-3.0: Fix data stall during TDLS Offchan Operation

In non concurrency scenario with TDLS Offchan link, FW is sending
two pauses namely PAUSE_TYPE_CHOP and PAUSE_TYPE_CHOP_TDLS_OFFCHAN
back to back when DUT dwells between TDLS Off channel and AP channel
respectively. HOST handles these events as vdev based event instead
of peer based event and remove all the tx queues from scheduler for
all the peer irrespective of the pause type. This will lead to data
stall in TDLS offchan scenarios, as at any point of time, host will
be receiving one of the two pauses and keep its peer queues out of
scheduler.

Changes are done to decouple the handling of pause/unpause events
for TDLS peers with Offchan enabled from vdev based pause/unpause.
TDLS peers will be paused/unpaused based on PAUSE_TYPE_CHOP_TDLS_OFFCHAN
pause type. Other pause/unpause events will be handled as usual.

Change-Id: Iccb46fd6d121d5df6d53633c9978ddc8e02f588f
CRs-Fixed: 2558612
nakul kachhwaha 5 years ago
parent
commit
e3e92ae6db

+ 1 - 1
components/pmo/core/src/wlan_pmo_suspend_resume.c

@@ -655,7 +655,7 @@ static void pmo_unpause_all_vdev(struct wlan_objmgr_psoc *psoc,
 		if (pmo_core_vdev_get_pause_bitmap(psoc_ctx, vdev_id)) {
 			cdp_fc_vdev_unpause(pmo_core_psoc_get_dp_handle(psoc),
 					    vdev_dp,
-					    0xffffffff);
+					    0xffffffff, 0);
 			if (psoc_ctx->pause_bitmap_notifier)
 				psoc_ctx->pause_bitmap_notifier(vdev_id, 0);
 		}

+ 58 - 10
core/dp/txrx/ol_tx_classify.c

@@ -334,33 +334,79 @@ ol_tx_tid(
 static inline
 struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
 						struct ol_txrx_vdev_t *vdev,
+						uint8_t *dest_addr,
 						uint8_t *peer_id)
 {
 	struct ol_txrx_peer_t *peer = NULL;
+	uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
+	enum peer_debug_id_type id_type = PEER_DEBUG_ID_OL_INTERNAL;
+
+	struct ol_txrx_peer_t *(*find_peer)(struct ol_txrx_pdev_t *pdev,
+					    uint8_t *peer_mac_addr,
+					    int mac_addr_is_aligned,
+					    u8 check_valid,
+					    enum peer_debug_id_type dbg_id)
+		= ol_txrx_peer_find_hash_find_get_ref;
 
 	if (vdev->hlTdlsFlag) {
-		peer = ol_txrx_peer_find_hash_find_get_ref(pdev,
-					vdev->hl_tdls_ap_mac_addr.raw, 0, 1,
-					PEER_DEBUG_ID_OL_INTERNAL);
+		peer = find_peer(pdev, vdev->hl_tdls_ap_mac_addr.raw,
+				 0, 1, id_type);
 
-		if (peer &&  (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
-			ol_txrx_peer_release_ref(peer,
-						 PEER_DEBUG_ID_OL_INTERNAL);
+		if (peer && (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
+			ol_txrx_peer_release_ref(peer, id_type);
 			peer = NULL;
 		} else {
-			if (peer)
+			if (peer) {
 				*peer_id = peer->local_id;
+				return peer;
+			}
 		}
 	}
-	if (!peer)
-		peer = ol_txrx_assoc_peer_find(vdev);
 
+	/* Packets destined to TDLS Peer or AP with 'No TDLS Link'.
+	 * Optimized to directly get the peer based on 'dest_addr'
+	 */
+	if (vdev->last_real_peer &&
+	    !qdf_mem_cmp(vdev->last_real_peer->mac_addr.raw,
+			 dest_addr, QDF_MAC_ADDR_SIZE)) {
+		ol_txrx_peer_get_ref(vdev->last_real_peer, id_type);
+		*peer_id = vdev->last_real_peer->local_id;
+		peer = vdev->last_real_peer;
+	} else {
+		/* packets destined for other peers or AP with TDLS Link */
+		if (vdev->last_real_peer &&
+		    !qdf_mem_cmp(vdev->hl_tdls_ap_mac_addr.raw,
+				 zero_mac_addr,
+				 QDF_MAC_ADDR_SIZE)) {
+		/* With No TDLS Link return last_real_peer for both AP
+		 * and other bss peer
+		 */
+			ol_txrx_peer_get_ref(vdev->last_real_peer, id_type);
+			*peer_id = vdev->last_real_peer->local_id;
+			peer = vdev->last_real_peer;
+		} else { /* packet destined for other peers and AP when
+			  * STA has TDLS link
+			  */
+			peer = find_peer(pdev, vdev->hl_tdls_ap_mac_addr.raw,
+					 0, 1, id_type);
+
+			if (peer &&
+			    (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
+				ol_txrx_peer_release_ref(peer, id_type);
+				peer = NULL;
+			} else {
+				if (peer)
+					*peer_id = peer->local_id;
+			}
+		}
+	}
 	return peer;
 }
 
 #else
 static struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
 						struct ol_txrx_vdev_t *vdev,
+						uint8_t *dest_addr,
 						uint8_t *peer_id)
 {
 	struct ol_txrx_peer_t *peer = NULL;
@@ -503,7 +549,9 @@ ol_tx_classify(
 			 * then the frame is either for the AP itself, or is
 			 * supposed to be sent to the AP for forwarding.
 			 */
-			peer = ol_tx_tdls_peer_find(pdev, vdev, &peer_id);
+			peer = ol_tx_tdls_peer_find(pdev, vdev,
+						    dest_addr,
+						    &peer_id);
 		} else {
 			peer = ol_txrx_peer_find_hash_find_get_ref(pdev,
 								   dest_addr,

+ 45 - 2
core/dp/txrx/ol_tx_hl.c

@@ -1492,8 +1492,11 @@ ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
 
 	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
 	if (!vdev->last_real_peer && peer &&
-	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
+	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID)) {
 		vdev->last_real_peer = peer;
+		qdf_mem_zero(vdev->hl_tdls_ap_mac_addr.raw,
+			     QDF_MAC_ADDR_SIZE);
+	}
 	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
 }
 
@@ -1537,10 +1540,50 @@ void ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *pvdev,
 
 	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
 	if (!vdev->last_real_peer && peer &&
-	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
+	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID)) {
 		vdev->last_real_peer = peer;
+		qdf_mem_zero(vdev->hl_tdls_ap_mac_addr.raw,
+			     QDF_MAC_ADDR_SIZE);
+	}
 	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
 }
+
+/**
+ * ol_txrx_set_peer_as_tdls_peer() - mark peer as tdls peer
+ * @ppeer: cdp peer
+ * @value: false/true
+ *
+ * Return: None
+ */
+void ol_txrx_set_peer_as_tdls_peer(void *ppeer, bool val)
+{
+	ol_txrx_peer_handle peer = ppeer;
+
+	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
+			  peer, qdf_atomic_read(&peer->ref_cnt));
+
+	/* Mark peer as tdls */
+	peer->is_tdls_peer = val;
+}
+
+/**
+ * ol_txrx_set_tdls_offchan_enabled() - set tdls offchan enabled
+ * @ppeer: cdp peer
+ * @value: false/true
+ *
+ * Return: None
+ */
+void ol_txrx_set_tdls_offchan_enabled(void *ppeer, bool val)
+{
+	ol_txrx_peer_handle peer = ppeer;
+
+	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
+			  peer, qdf_atomic_read(&peer->ref_cnt));
+
+	/* Set TDLS Offchan operation enable/disable */
+	if (peer->is_tdls_peer)
+		peer->tdls_offchan_enabled = val;
+}
 #endif
 
 #if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)

+ 43 - 9
core/dp/txrx/ol_tx_queue.c

@@ -517,6 +517,25 @@ ol_txrx_peer_tid_unpause_base(
 		}
 	}
 }
+
+/**
+ * ol_txrx_peer_unpause_base() - unpause all txqs for a given peer
+ * @pdev: the physical device object
+ * @peer: peer device object
+ *
+ * Return: None
+ */
+static void
+ol_txrx_peer_unpause_base(
+	struct ol_txrx_pdev_t *pdev,
+	struct ol_txrx_peer_t *peer)
+{
+	int i;
+
+	for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
+		ol_txrx_peer_tid_unpause_base(pdev, peer, i);
+}
+
 #ifdef QCA_BAD_PEER_TX_FLOW_CL
 /**
  * ol_txrx_peer_unpause_but_no_mgmt_q_base() - unpause all txqs except
@@ -564,7 +583,8 @@ ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
 }
 
 void
-ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
+ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
+		   uint32_t pause_type)
 {
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
@@ -578,7 +598,15 @@ ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
-		ol_txrx_peer_pause_base(pdev, peer);
+		if (pause_type == PAUSE_TYPE_CHOP) {
+			if (!(peer->is_tdls_peer && peer->tdls_offchan_enabled))
+				ol_txrx_peer_pause_base(pdev, peer);
+		} else if (pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
+			if (peer->is_tdls_peer && peer->tdls_offchan_enabled)
+				ol_txrx_peer_pause_base(pdev, peer);
+		} else {
+			ol_txrx_peer_pause_base(pdev, peer);
+		}
 	}
 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
@@ -587,7 +615,8 @@ ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
 }
 
 
-void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
+void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
+			  uint32_t pause_type)
 {
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
@@ -604,10 +633,15 @@ void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
 
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
-		int i;
-
-		for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
-			ol_txrx_peer_tid_unpause_base(pdev, peer, i);
+		if (pause_type == PAUSE_TYPE_CHOP) {
+			if (!(peer->is_tdls_peer && peer->tdls_offchan_enabled))
+				ol_txrx_peer_unpause_base(pdev, peer);
+		} else if (pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
+			if (peer->is_tdls_peer && peer->tdls_offchan_enabled)
+				ol_txrx_peer_unpause_base(pdev, peer);
+		} else {
+			ol_txrx_peer_unpause_base(pdev, peer);
+		}
 	}
 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
@@ -1685,7 +1719,7 @@ void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
 		cdp_fc_vdev_pause(
 			cds_get_context(QDF_MODULE_ID_SOC),
-			(struct cdp_vdev *)vdev, reason);
+			(struct cdp_vdev *)vdev, reason, 0);
 	}
 
 }
@@ -1703,7 +1737,7 @@ void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
 
 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
 		cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
-				    (struct cdp_vdev *)vdev, reason);
+				    (struct cdp_vdev *)vdev, reason, 0);
 	}
 
 }

+ 4 - 2
core/dp/txrx/ol_tx_queue.h

@@ -239,8 +239,10 @@ void ol_txrx_vdev_flush(struct cdp_vdev *pvdev);
 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
    (defined(QCA_LL_TX_FLOW_CONTROL_V2)) || \
    defined(CONFIG_HL_SUPPORT)
-void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason);
-void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason);
+void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
+			uint32_t pause_type);
+void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
+			  uint32_t pause_type);
 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 
 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)

+ 3 - 1
core/dp/txrx/ol_txrx.c

@@ -2267,7 +2267,7 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
 
 	/* store provided params */
 	peer->vdev = vdev;
-	peer->ctrl_peer = peer->ctrl_peer;
+	peer->ctrl_peer = ctrl_peer;
 	qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
 		     QDF_MAC_ADDR_SIZE);
 
@@ -5926,6 +5926,8 @@ static struct cdp_peer_ops ol_ops_peer = {
 	.add_last_real_peer = ol_txrx_add_last_real_peer,
 	.is_vdev_restore_last_peer = is_vdev_restore_last_peer,
 	.update_last_real_peer = ol_txrx_update_last_real_peer,
+	.set_tdls_offchan_enabled = ol_txrx_set_tdls_offchan_enabled,
+	.set_peer_as_tdls_peer = ol_txrx_set_peer_as_tdls_peer,
 #endif /* CONFIG_HL_SUPPORT */
 	.peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
 };

+ 2 - 0
core/dp/txrx/ol_txrx.h

@@ -295,6 +295,8 @@ void ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
 bool is_vdev_restore_last_peer(void *ppeer);
 void ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *pvdev,
 				   uint8_t *peer_id, bool restore_last_peer);
+void ol_txrx_set_peer_as_tdls_peer(void *ppeer, bool val);
+void ol_txrx_set_tdls_offchan_enabled(void *ppeer, bool val);
 #endif
 
 #if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)

+ 7 - 5
core/dp/txrx/ol_txrx_flow_control.c

@@ -1366,10 +1366,11 @@ ol_txrx_map_to_netif_reason_type(uint32_t reason)
  * ol_txrx_vdev_pause() - pause vdev network queues
  * @vdev: vdev handle
  * @reason: network queue pause reason
- *
+ * @pause_type: type of pause
  * Return: none
  */
-void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
+void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
+			uint32_t pause_type)
 {
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
@@ -1394,7 +1395,8 @@ void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
  *
  * Return: none
  */
-void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
+void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
+			  uint32_t pause_type)
 {
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
@@ -1425,7 +1427,7 @@ void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
 
 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
-		ol_txrx_vdev_pause((struct cdp_vdev *)vdev, reason);
+		ol_txrx_vdev_pause((struct cdp_vdev *)vdev, reason, 0);
 	}
 }
 
@@ -1441,6 +1443,6 @@ void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
 
 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
-		ol_txrx_vdev_unpause((struct cdp_vdev *)vdev, reason);
+		ol_txrx_vdev_unpause((struct cdp_vdev *)vdev, reason, 0);
 	}
 }

+ 4 - 2
core/dp/txrx/ol_txrx_legacy_flow_control.c

@@ -54,7 +54,8 @@
  * will be paused.
  *
  */
-void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
+void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
+			uint32_t pause_type)
 {
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 
@@ -81,7 +82,8 @@ void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
  * LL systems that use per-vdev tx queues for MCC or thermal throttling.
  *
  */
-void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
+void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
+			  uint32_t pause_type)
 {
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 	/* TO DO: log the queue unpause */

+ 2 - 0
core/dp/txrx/ol_txrx_types.h

@@ -1523,6 +1523,8 @@ struct ol_txrx_peer_t {
 	qdf_time_t last_deauth_rcvd;
 	qdf_atomic_t fw_create_pending;
 	qdf_timer_t peer_unmap_timer;
+	bool is_tdls_peer; /* Mark peer as tdls peer */
+	bool tdls_offchan_enabled; /* TDLS OffChan operation in use */
 };
 
 struct ol_rx_remote_data {

+ 25 - 15
core/hdd/src/wlan_hdd_assoc.c

@@ -1930,6 +1930,30 @@ static void hdd_set_peer_authorized_event(uint32_t vdev_id)
 	complete(&adapter->sta_authorized_event);
 }
 
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+static inline
+void hdd_set_unpause_queue(void *soc, struct hdd_adapter *adapter, void *peer)
+{
+	void *vdev;
+	unsigned long rc;
+	/* wait for event from firmware to set the event */
+	rc = wait_for_completion_timeout(
+			&adapter->sta_authorized_event,
+			msecs_to_jiffies(HDD_PEER_AUTHORIZE_WAIT));
+	if (!rc)
+		hdd_debug("timeout waiting for sta_authorized_event");
+
+	vdev = (void *)cdp_peer_get_vdev(soc, peer);
+	cdp_fc_vdev_unpause(soc, (struct cdp_vdev *)vdev,
+			    OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED,
+			    0);
+}
+#else
+static inline
+void hdd_set_unpause_queue(void *soc, struct hdd_adapter *adapter, void *peer)
+{ }
+#endif
+
 QDF_STATUS hdd_change_peer_state(struct hdd_adapter *adapter,
 				 uint8_t *peer_mac,
 				 enum ol_txrx_peer_state sta_state,
@@ -1982,21 +2006,7 @@ QDF_STATUS hdd_change_peer_state(struct hdd_adapter *adapter,
 
 		if (adapter->device_mode == QDF_STA_MODE ||
 		    adapter->device_mode == QDF_P2P_CLIENT_MODE) {
-#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
-			void *vdev;
-			unsigned long rc;
-
-			/* wait for event from firmware to set the event */
-			rc = wait_for_completion_timeout(
-				&adapter->sta_authorized_event,
-				msecs_to_jiffies(HDD_PEER_AUTHORIZE_WAIT));
-			if (!rc)
-				hdd_debug("timeout waiting for sta_authorized_event");
-
-			vdev = (void *)cdp_peer_get_vdev(soc, peer);
-			cdp_fc_vdev_unpause(soc, (struct cdp_vdev *)vdev,
-					OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED);
-#endif
+			hdd_set_unpause_queue(soc, adapter, peer);
 		}
 	}
 	return QDF_STATUS_SUCCESS;

+ 66 - 36
core/wma/src/wma_data.c

@@ -1446,6 +1446,66 @@ QDF_STATUS wma_tx_detach(tp_wma_handle wma_handle)
 
 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
 	defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
+static void wma_process_vdev_tx_pause_evt(void *soc,
+					  tp_wma_handle wma,
+					  wmi_tx_pause_event_fixed_param *event,
+					  uint8_t vdev_id)
+{
+	struct cdp_vdev *dp_handle =
+			wlan_vdev_get_dp_handle(wma->interfaces[vdev_id].vdev);
+
+	/* PAUSE action, add bitmap */
+	if (event->action == ACTION_PAUSE) {
+		/* Exclude TDLS_OFFCHAN_CHOP from vdev based pauses */
+		if (event->pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
+			cdp_fc_vdev_pause(soc,
+					  dp_handle,
+					  OL_TXQ_PAUSE_REASON_FW,
+					  event->pause_type);
+		} else {
+			/*
+			 * Now only support per-dev pause so it is not
+			 * necessary to pause a paused queue again.
+			 */
+			if (!wma_vdev_get_pause_bitmap(vdev_id))
+				cdp_fc_vdev_pause(soc,
+						  dp_handle,
+						  OL_TXQ_PAUSE_REASON_FW,
+						  event->pause_type);
+
+			wma_vdev_set_pause_bit(vdev_id,
+					       event->pause_type);
+		}
+	}
+	/* UNPAUSE action, clean bitmap */
+	else if (event->action == ACTION_UNPAUSE) {
+		/* Exclude TDLS_OFFCHAN_CHOP from vdev based pauses */
+		if (event->pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
+			cdp_fc_vdev_unpause(soc,
+					    dp_handle,
+					    OL_TXQ_PAUSE_REASON_FW,
+					    event->pause_type);
+		} else {
+		/* Handle unpause only if already paused */
+			if (wma_vdev_get_pause_bitmap(vdev_id)) {
+				wma_vdev_clear_pause_bit(vdev_id,
+							 event->pause_type);
+
+				if (wma->interfaces[vdev_id].pause_bitmap)
+					return;
+
+				/* PAUSE BIT MAP is cleared
+				 * UNPAUSE VDEV
+				 */
+				cdp_fc_vdev_unpause(soc, dp_handle,
+						    OL_TXQ_PAUSE_REASON_FW,
+						    event->pause_type);
+			}
+		}
+	} else {
+		WMA_LOGE("Not Valid Action Type %d", event->action);
+	}
+}
 
 int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event,
 				      uint32_t len)
@@ -1502,40 +1562,9 @@ int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event,
 				continue;
 			}
 
-			/* PAUSE action, add bitmap */
-			if (ACTION_PAUSE == wmi_event->action) {
-				/*
-				 * Now only support per-dev pause so it is not
-				 * necessary to pause a paused queue again.
-				 */
-				if (!wma_vdev_get_pause_bitmap(vdev_id))
-					cdp_fc_vdev_pause(soc,
-						dp_handle,
-						OL_TXQ_PAUSE_REASON_FW);
-				wma_vdev_set_pause_bit(vdev_id,
-					wmi_event->pause_type);
-			}
-			/* UNPAUSE action, clean bitmap */
-			else if (ACTION_UNPAUSE == wmi_event->action) {
-				/* Handle unpause only if already paused */
-				if (wma_vdev_get_pause_bitmap(vdev_id)) {
-					wma_vdev_clear_pause_bit(vdev_id,
-						wmi_event->pause_type);
-
-					if (!wma->interfaces[vdev_id].
-					    pause_bitmap) {
-						/* PAUSE BIT MAP is cleared
-						 * UNPAUSE VDEV
-						 */
-						cdp_fc_vdev_unpause(soc,
-							dp_handle,
-							OL_TXQ_PAUSE_REASON_FW);
-					}
-				}
-			} else {
-				WMA_LOGE("Not Valid Action Type %d",
-					 wmi_event->action);
-			}
+			wma_process_vdev_tx_pause_evt(soc, wma,
+						      wmi_event,
+						      vdev_id);
 
 			WMA_LOGD
 				("vdev_id %d, pause_map 0x%x, pause type %d, action %d",
@@ -3088,8 +3117,9 @@ void wma_tx_abort(uint8_t vdev_id)
 	WMA_LOGD("%s: vdevid %d bssid %pM", __func__, vdev_id, bssid);
 	wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST);
 	cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC),
-			handle,
-			OL_TXQ_PAUSE_REASON_TX_ABORT);
+			  handle,
+			  OL_TXQ_PAUSE_REASON_TX_ABORT,
+			  0);
 
 	/* Flush all TIDs except MGMT TID for this peer in Target */
 	peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);

+ 14 - 9
core/wma/src/wma_dev_if.c

@@ -1906,6 +1906,8 @@ QDF_STATUS wma_create_peer(tp_wma_handle wma, struct cdp_pdev *pdev,
 		 wma->interfaces[vdev_id].peer_count);
 
 	wlan_peer_set_dp_handle(obj_peer, peer);
+	if (peer_type == WMI_PEER_TYPE_TDLS)
+		cdp_peer_set_peer_as_tdls(dp_soc, peer, true);
 
 	if (roam_synch_in_progress) {
 		WMA_LOGD("%s: LFR3: Created peer %pK with peer_addr %pM vdev_id %d, peer_count - %d",
@@ -2178,7 +2180,7 @@ void wma_send_del_bss_response(tp_wma_handle wma, struct del_bss_resp *resp)
 	cdp_fc_vdev_flush(soc, handle);
 	WMA_LOGD("%s, vdev_id: %d, un-pausing tx_ll_queue for VDEV_STOP rsp",
 		 __func__, vdev_id);
-	cdp_fc_vdev_unpause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP);
+	cdp_fc_vdev_unpause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
 	wma_vdev_clear_pause_bit(vdev_id, PAUSE_TYPE_HOST);
 	qdf_atomic_set(&iface->bss_status, WMA_BSS_STATUS_STOPPED);
 	WMA_LOGD("%s: (type %d subtype %d) BSS is stopped",
@@ -2829,6 +2831,7 @@ QDF_STATUS wma_vdev_pre_start(uint8_t vdev_id, bool restart)
 	struct vdev_mlme_obj *mlme_obj;
 	struct wlan_objmgr_vdev *vdev = intr[vdev_id].vdev;
 	struct wlan_channel *des_chan;
+	void *dp_handle;
 
 	mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev);
 	if (!mlme_obj) {
@@ -2893,9 +2896,11 @@ QDF_STATUS wma_vdev_pre_start(uint8_t vdev_id, bool restart)
 	if (!restart) {
 		WMA_LOGD("%s, vdev_id: %d, unpausing tx_ll_queue at VDEV_START",
 			 __func__, vdev_id);
+
+		dp_handle =
+			wlan_vdev_get_dp_handle(wma->interfaces[vdev_id].vdev);
 		cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
-			wlan_vdev_get_dp_handle(wma->interfaces[vdev_id].vdev),
-			0xffffffff);
+				    dp_handle, 0xffffffff, 0);
 		wma_vdev_update_pause_bitmap(vdev_id, 0);
 	}
 
@@ -3673,7 +3678,7 @@ QDF_STATUS wma_set_cdp_vdev_pause_reason(tp_wma_handle wma, uint8_t vdev_id)
 		WMA_LOGE("%s Invalid txrx vdev", __func__);
 		return QDF_STATUS_E_INVAL;
 	}
-	cdp_fc_vdev_pause(soc, vdev, OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED);
+	cdp_fc_vdev_pause(soc, vdev, OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED, 0);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -4979,12 +4984,12 @@ void wma_delete_bss_ho_fail(tp_wma_handle wma, uint8_t vdev_id)
 
 	WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP (del_bss)",
 		 __func__,  vdev_id);
-	cdp_fc_vdev_pause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP);
+	cdp_fc_vdev_pause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
 	wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST);
 	cdp_fc_vdev_flush(soc, handle);
 	WMA_LOGD("%s, vdev_id: %d, un-pausing tx_ll_queue for VDEV_STOP rsp",
 		 __func__,  vdev_id);
-	cdp_fc_vdev_unpause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP);
+	cdp_fc_vdev_unpause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
 	wma_vdev_clear_pause_bit(vdev_id, PAUSE_TYPE_HOST);
 	qdf_atomic_set(&iface->bss_status, WMA_BSS_STATUS_STOPPED);
 	WMA_LOGD("%s: (type %d subtype %d) BSS is stopped",
@@ -5188,8 +5193,8 @@ void wma_delete_bss(tp_wma_handle wma, uint8_t vdev_id)
 		 __func__, vdev_id);
 	wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST);
 	cdp_fc_vdev_pause(soc,
-		wlan_vdev_get_dp_handle(iface->vdev),
-		OL_TXQ_PAUSE_REASON_VDEV_STOP);
+			  wlan_vdev_get_dp_handle(iface->vdev),
+			  OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
 
 	if (wma_send_vdev_stop_to_fw(wma, vdev_id)) {
 		WMA_LOGE("%s: %d Failed to send vdev stop", __func__, __LINE__);
@@ -5403,7 +5408,7 @@ QDF_STATUS wma_send_vdev_stop(uint8_t vdev_id)
 		 __func__, vdev_id);
 	cdp_fc_vdev_pause
 		(soc, wlan_vdev_get_dp_handle(wma->interfaces[vdev_id].vdev),
-		 OL_TXQ_PAUSE_REASON_VDEV_STOP);
+		 OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
 
 	status = mlme_set_vdev_stop_type(
 				wma->interfaces[vdev_id].vdev,

+ 5 - 0
core/wma/src/wma_features.c

@@ -3817,6 +3817,11 @@ int wma_update_tdls_peer_state(WMA_HANDLE handle,
 		ret = -EIO;
 		goto end_tdls_peer_state;
 	}
+
+	cdp_peer_set_tdls_offchan_enabled(soc,
+					  peer,
+					  !!peer_cap->peer_off_chan_support);
+
 	vdev = cdp_peer_get_vdev(soc, peer);
 
 	if (wmi_unified_update_tdls_peer_state_cmd(wma_handle->wmi_handle,