Переглянути джерело

qcacmn: Upgrading to htt_tx_wbm_completion_v3

Due to overlap with WBM block in QCN9224,
upgrading to htt_tx_wbm_completion_v3.

Change-Id: If6f7fd64686c9c1b35b253930e7918f570bd547c
Sai Rupesh Chevuru 3 роки тому
батько
коміт
90b1e60e57

+ 2 - 0
dp/wifi3.0/be/dp_be.c

@@ -1422,6 +1422,8 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
 	arch_ops->dp_rx_process = dp_rx_process_be;
 	arch_ops->tx_comp_get_params_from_hal_desc =
 		dp_tx_comp_get_params_from_hal_desc_be;
+	arch_ops->dp_tx_process_htt_completion =
+				dp_tx_process_htt_completion_be;
 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_be;
 	arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_be;
 	arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_be;

+ 204 - 0
dp/wifi3.0/be/dp_be_tx.c

@@ -25,6 +25,10 @@
 #include "hal_tx.h"
 #include <hal_be_api.h>
 #include <hal_be_tx.h>
+#include <dp_htt.h>
+#ifdef FEATURE_WDS
+#include "dp_txrx_wds.h"
+#endif
 
 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
 #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
@@ -46,6 +50,19 @@
 #endif
 #endif
 
+#define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
+	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
+#define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
+	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
+#define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
+	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
+#define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
+	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
+#define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
+	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
+#define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
+	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
+
 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
 
 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
@@ -130,6 +147,193 @@ void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
 }
 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
 
+static inline
+void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
+{
+	struct dp_vdev *vdev;
+	uint8_t vdev_id;
+	uint8_t tx_status;
+	uint32_t *htt_desc = (uint32_t *)status;
+
+	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
+	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
+		qdf_assert_always(!soc->mec_fw_offload);
+
+		/*
+		 * Get vdev id from HTT status word in case of MEC
+		 * notification
+		 */
+		vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
+		if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
+			return;
+
+		vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
+					     DP_MOD_ID_HTT_COMP);
+		if (!vdev)
+			return;
+		dp_tx_mec_handler(vdev, status);
+		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
+	}
+}
+
+void dp_tx_process_htt_completion_be(struct dp_soc *soc,
+				     struct dp_tx_desc_s *tx_desc,
+				     uint8_t *status,
+				     uint8_t ring_id)
+{
+	uint8_t tx_status;
+	struct dp_pdev *pdev;
+	struct dp_vdev *vdev = NULL;
+	struct hal_tx_completion_status ts = {0};
+	uint32_t *htt_desc = (uint32_t *)status;
+	struct dp_peer *peer;
+	struct cdp_tid_tx_stats *tid_stats = NULL;
+	struct htt_soc *htt_handle;
+	uint8_t vdev_id;
+
+	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
+	htt_handle = (struct htt_soc *)soc->htt_handle;
+	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
+
+	/*
+	 * There can be scenario where WBM consuming descriptor enqueued
+	 * from TQM2WBM first and TQM completion can happen before MEC
+	 * notification comes from FW2WBM. Avoid access any field of tx
+	 * descriptor in case of MEC notify.
+	 */
+	dp_tx_process_mec_notify_be(soc, status);
+
+	/*
+	 * If the descriptor is already freed in vdev_detach,
+	 * continue to next descriptor
+	 */
+	if (qdf_unlikely(!tx_desc->flags)) {
+		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
+				   tx_desc->id);
+		return;
+	}
+
+	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
+		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
+		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
+		goto release_tx_desc;
+	}
+
+	pdev = tx_desc->pdev;
+
+	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
+		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
+		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
+		goto release_tx_desc;
+	}
+
+	qdf_assert(tx_desc->pdev);
+
+	vdev_id = tx_desc->vdev_id;
+	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
+				     DP_MOD_ID_HTT_COMP);
+
+	if (qdf_unlikely(!vdev)) {
+		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
+		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
+		goto release_tx_desc;
+	}
+
+	switch (tx_status) {
+	case HTT_TX_FW2WBM_TX_STATUS_OK:
+	case HTT_TX_FW2WBM_TX_STATUS_DROP:
+	case HTT_TX_FW2WBM_TX_STATUS_TTL:
+	{
+		uint8_t tid;
+
+		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
+			ts.peer_id =
+				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
+						htt_desc[3]);
+			ts.tid =
+				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
+						htt_desc[3]);
+		} else {
+			ts.peer_id = HTT_INVALID_PEER;
+			ts.tid = HTT_INVALID_TID;
+		}
+		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
+		ts.ppdu_id =
+			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
+					htt_desc[2]);
+		ts.ack_frame_rssi =
+			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
+					htt_desc[2]);
+
+		ts.tsf = htt_desc[4];
+		ts.first_msdu = 1;
+		ts.last_msdu = 1;
+		tid = ts.tid;
+		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
+			tid = CDP_MAX_DATA_TIDS - 1;
+
+		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
+
+		if (qdf_unlikely(pdev->delay_stats_flag))
+			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
+		if (tx_status < CDP_MAX_TX_HTT_STATUS)
+			tid_stats->htt_status_cnt[tx_status]++;
+
+		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
+					     DP_MOD_ID_HTT_COMP);
+		if (qdf_likely(peer))
+			dp_tx_update_peer_basic_stats(
+						peer,
+						qdf_nbuf_len(tx_desc->nbuf),
+						tx_status,
+						pdev->enhanced_stats_en);
+
+		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
+		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
+		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
+
+		if (qdf_likely(peer))
+			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
+
+		break;
+	}
+	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
+	{
+		uint8_t reinject_reason;
+
+		reinject_reason =
+			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
+								htt_desc[1]);
+		dp_tx_reinject_handler(soc, vdev, tx_desc,
+				       status, reinject_reason);
+		break;
+	}
+	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
+	{
+		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
+		break;
+	}
+	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
+	{
+		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
+		goto release_tx_desc;
+	}
+	default:
+		dp_tx_comp_err("Invalid HTT tx_status %d\n",
+			       tx_status);
+		goto release_tx_desc;
+	}
+
+	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
+	return;
+
+release_tx_desc:
+	dp_tx_comp_free_buf(soc, tx_desc);
+	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
+	if (vdev)
+		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
+}
+
 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
 #ifdef DP_TX_IMPLICIT_RBM_MAPPING
 /*

+ 16 - 0
dp/wifi3.0/be/dp_be_tx.h

@@ -70,6 +70,22 @@ void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
 					    void *tx_comp_hal_desc,
 					    struct dp_tx_desc_s **r_tx_desc);
 
+/**
+ * dp_tx_process_htt_completion_be() - Tx HTT Completion Indication Handler
+ * @soc: Handle to DP soc structure
+ * @tx_desc: software descriptor head pointer
+ * @status : Tx completion status from HTT descriptor
+ * @ring_id: ring number
+ *
+ * This function will process HTT Tx indication messages from Target
+ *
+ * Return: none
+ */
+void dp_tx_process_htt_completion_be(struct dp_soc *soc,
+				     struct dp_tx_desc_s *tx_desc,
+				     uint8_t *status,
+				     uint8_t ring_id);
+
 /**
  * dp_tx_init_bank_profiles() - Init TX bank profiles
  * @soc: DP soc handle

+ 14 - 210
dp/wifi3.0/dp_tx.c

@@ -522,7 +522,7 @@ static void dp_tx_tso_desc_release(struct dp_soc *soc,
  *
  * Return:
  */
-static void
+void
 dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
 {
 	struct dp_pdev *pdev = tx_desc->pdev;
@@ -2088,8 +2088,7 @@ fail_return:
  *
  * Return: none
  */
-static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
-				       struct dp_tx_desc_s *desc)
+void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc)
 {
 	qdf_nbuf_t nbuf = desc->nbuf;
 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
@@ -3309,7 +3308,6 @@ int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  *
  * Return: none
  */
-static
 void dp_tx_reinject_handler(struct dp_soc *soc,
 			    struct dp_vdev *vdev,
 			    struct dp_tx_desc_s *tx_desc,
@@ -3440,10 +3438,10 @@ void dp_tx_reinject_handler(struct dp_soc *soc,
  *
  * Return: none
  */
-static void dp_tx_inspect_handler(struct dp_soc *soc,
-				  struct dp_vdev *vdev,
-				  struct dp_tx_desc_s *tx_desc,
-				  uint8_t *status)
+void dp_tx_inspect_handler(struct dp_soc *soc,
+			   struct dp_vdev *vdev,
+			   struct dp_tx_desc_s *tx_desc,
+			   uint8_t *status)
 {
 
 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
@@ -3578,9 +3576,8 @@ static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  * @ring_id: TCL or WBM ring number for transmit path
  * Return: none
  */
-static void dp_tx_compute_delay(struct dp_vdev *vdev,
-				struct dp_tx_desc_s *tx_desc,
-				uint8_t tid, uint8_t ring_id)
+void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
+			 uint8_t tid, uint8_t ring_id)
 {
 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
@@ -3952,7 +3949,7 @@ void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  *
  * Return: none
  */
-static inline void
+void
 dp_tx_comp_process_desc(struct dp_soc *soc,
 			struct dp_tx_desc_s *desc,
 			struct hal_tx_completion_status *ts,
@@ -4190,7 +4187,6 @@ void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  *
  * Return: none
  */
-static inline
 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
 				  struct dp_tx_desc_s *tx_desc,
 				  struct hal_tx_completion_status *ts,
@@ -4313,7 +4309,6 @@ out:
  *
  * Return: none
  */
-static inline
 void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
 				   uint8_t tx_status, bool update)
 {
@@ -4324,7 +4319,6 @@ void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
 	}
 }
 #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
-static inline
 void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
 				   uint8_t tx_status, bool update)
 {
@@ -4336,7 +4330,6 @@ void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
 }
 
 #else
-static inline
 void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
 				   uint8_t tx_status, bool update)
 {
@@ -4429,198 +4422,6 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
 }
 
-/**
- * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
- * @soc: Handle to DP soc structure
- * @tx_desc: software descriptor head pointer
- * @status : Tx completion status from HTT descriptor
- * @ring_id: ring number
- *
- * This function will process HTT Tx indication messages from Target
- *
- * Return: none
- */
-static
-void dp_tx_process_htt_completion(struct dp_soc *soc,
-				  struct dp_tx_desc_s *tx_desc, uint8_t *status,
-				  uint8_t ring_id)
-{
-	uint8_t tx_status;
-	struct dp_pdev *pdev;
-	struct dp_vdev *vdev;
-	struct hal_tx_completion_status ts = {0};
-	uint32_t *htt_desc = (uint32_t *)status;
-	struct dp_peer *peer;
-	struct cdp_tid_tx_stats *tid_stats = NULL;
-	struct htt_soc *htt_handle;
-	uint8_t vdev_id;
-
-	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
-	htt_handle = (struct htt_soc *)soc->htt_handle;
-	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
-
-	/*
-	 * There can be scenario where WBM consuming descriptor enqueued
-	 * from TQM2WBM first and TQM completion can happen before MEC
-	 * notification comes from FW2WBM. Avoid access any field of tx
-	 * descriptor in case of MEC notify.
-	 */
-	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
-		if (soc->mec_fw_offload)
-			return;
-
-		/*
-		 * Get vdev id from HTT status word in case of MEC
-		 * notification
-		 */
-		vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
-		if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
-			return;
-
-		vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
-				DP_MOD_ID_HTT_COMP);
-		if (!vdev)
-			return;
-		dp_tx_mec_handler(vdev, status);
-		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
-		return;
-	}
-
-	/*
-	 * If the descriptor is already freed in vdev_detach,
-	 * continue to next descriptor
-	 */
-	if (qdf_unlikely(!tx_desc->flags)) {
-		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", tx_desc->id);
-		return;
-	}
-
-	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
-		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
-		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
-		dp_tx_comp_free_buf(soc, tx_desc);
-		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
-		return;
-	}
-
-	pdev = tx_desc->pdev;
-
-	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
-		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
-		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
-		dp_tx_comp_free_buf(soc, tx_desc);
-		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
-		return;
-	}
-
-	qdf_assert(tx_desc->pdev);
-
-	vdev_id = tx_desc->vdev_id;
-	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
-			DP_MOD_ID_HTT_COMP);
-
-	if (qdf_unlikely(!vdev)) {
-		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
-		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
-		dp_tx_comp_free_buf(soc, tx_desc);
-		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
-		return;
-	}
-
-	switch (tx_status) {
-	case HTT_TX_FW2WBM_TX_STATUS_OK:
-	case HTT_TX_FW2WBM_TX_STATUS_DROP:
-	case HTT_TX_FW2WBM_TX_STATUS_TTL:
-	{
-		uint8_t tid;
-		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
-			ts.peer_id =
-				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
-						htt_desc[2]);
-			ts.tid =
-				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
-						htt_desc[2]);
-		} else {
-			ts.peer_id = HTT_INVALID_PEER;
-			ts.tid = HTT_INVALID_TID;
-		}
-		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
-		ts.ppdu_id =
-			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
-					htt_desc[1]);
-		ts.ack_frame_rssi =
-			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
-					htt_desc[1]);
-
-		ts.tsf = htt_desc[3];
-		ts.first_msdu = 1;
-		ts.last_msdu = 1;
-		tid = ts.tid;
-		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
-			tid = CDP_MAX_DATA_TIDS - 1;
-
-		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
-
-		if (qdf_unlikely(pdev->delay_stats_flag))
-			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
-		if (tx_status < CDP_MAX_TX_HTT_STATUS) {
-			tid_stats->htt_status_cnt[tx_status]++;
-		}
-
-		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
-					     DP_MOD_ID_HTT_COMP);
-
-		if (qdf_likely(peer))
-			dp_tx_update_peer_basic_stats(peer,
-						      qdf_nbuf_len(tx_desc->nbuf),
-						      tx_status,
-						      pdev->enhanced_stats_en);
-
-		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
-		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
-		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
-
-		if (qdf_likely(peer))
-			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
-
-		break;
-	}
-	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
-	{
-		uint8_t reinject_reason;
-
-		reinject_reason =
-			HTT_TX_WBM_COMPLETION_V2_REINJECT_REASON_GET(
-								htt_desc[0]);
-		dp_tx_reinject_handler(soc, vdev, tx_desc,
-				       status, reinject_reason);
-		break;
-	}
-	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
-	{
-		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
-		break;
-	}
-	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
-	{
-		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
-		goto release_tx_desc;
-	}
-	default:
-		dp_tx_comp_err("Invalid HTT tx_status %d\n",
-			       tx_status);
-		goto release_tx_desc;
-	}
-
-	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
-	return;
-
-release_tx_desc:
-	dp_tx_comp_free_buf(soc, tx_desc);
-	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
-	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
-}
-
 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
 static inline
 bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
@@ -4795,8 +4596,11 @@ more_data:
 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
 					htt_tx_status);
-			dp_tx_process_htt_completion(soc, tx_desc,
-					htt_tx_status, ring_id);
+			soc->arch_ops.dp_tx_process_htt_completion(
+							soc,
+							tx_desc,
+							htt_tx_status,
+							ring_id);
 		} else {
 			tx_desc->tx_status =
 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);

+ 23 - 0
dp/wifi3.0/dp_tx.h

@@ -240,6 +240,29 @@ QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
 QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
 					uint8_t num_pool,
 					uint16_t num_desc);
+void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc);
+void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
+void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
+			 uint8_t tid, uint8_t ring_id);
+void dp_tx_comp_process_tx_status(struct dp_soc *soc,
+				  struct dp_tx_desc_s *tx_desc,
+				  struct hal_tx_completion_status *ts,
+				  struct dp_peer *peer, uint8_t ring_id);
+void dp_tx_comp_process_desc(struct dp_soc *soc,
+			     struct dp_tx_desc_s *desc,
+			     struct hal_tx_completion_status *ts,
+			     struct dp_peer *peer);
+void dp_tx_reinject_handler(struct dp_soc *soc,
+			    struct dp_vdev *vdev,
+			    struct dp_tx_desc_s *tx_desc,
+			    uint8_t *status,
+			    uint8_t reinject_reason);
+void dp_tx_inspect_handler(struct dp_soc *soc,
+			   struct dp_vdev *vdev,
+			   struct dp_tx_desc_s *tx_desc,
+			   uint8_t *status);
+void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
+				   uint8_t tx_status, bool update);
 
 #ifndef QCA_HOST_MODE_WIFI_DISABLED
 /**

+ 4 - 0
dp/wifi3.0/dp_types.h

@@ -1665,6 +1665,10 @@ struct dp_arch_ops {
 	 void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc,
 						  void *tx_comp_hal_desc,
 						  struct dp_tx_desc_s **desc);
+	void (*dp_tx_process_htt_completion)(struct dp_soc *soc,
+					     struct dp_tx_desc_s *tx_desc,
+					     uint8_t *status,
+					     uint8_t ring_id);
 	uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
 				  hal_ring_handle_t hal_ring_hdl,
 				  uint8_t reo_ring_num, uint32_t quota);

+ 2 - 0
dp/wifi3.0/li/dp_li.c

@@ -389,6 +389,8 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
 	arch_ops->dp_rx_process = dp_rx_process_li;
 	arch_ops->tx_comp_get_params_from_hal_desc =
 		dp_tx_comp_get_params_from_hal_desc_li;
+	arch_ops->dp_tx_process_htt_completion =
+			dp_tx_process_htt_completion_li;
 	arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
 			dp_wbm_get_rx_desc_from_hal_desc_li;
 	arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;

+ 189 - 0
dp/wifi3.0/li/dp_li_tx.c

@@ -25,6 +25,10 @@
 #include <dp_htt.h>
 #include <hal_li_api.h>
 #include <hal_li_tx.h>
+#include "dp_peer.h"
+#ifdef FEATURE_WDS
+#include "dp_txrx_wds.h"
+#endif
 
 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
 
@@ -56,6 +60,191 @@ void dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc,
 	(*r_tx_desc)->peer_id = hal_tx_comp_get_peer_id(tx_comp_hal_desc);
 }
 
+static inline
+void dp_tx_process_mec_notify_li(struct dp_soc *soc, uint8_t *status)
+{
+	struct dp_vdev *vdev;
+	uint8_t vdev_id;
+	uint8_t tx_status;
+	uint32_t *htt_desc = (uint32_t *)status;
+
+	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
+	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
+		/*
+		 * Get vdev id from HTT status word in case of MEC
+		 * notification
+		 */
+		vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
+		if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
+			return;
+
+		vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
+					     DP_MOD_ID_HTT_COMP);
+		if (!vdev)
+			return;
+		dp_tx_mec_handler(vdev, status);
+		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
+	}
+}
+
+void dp_tx_process_htt_completion_li(struct dp_soc *soc,
+				     struct dp_tx_desc_s *tx_desc,
+				     uint8_t *status,
+				     uint8_t ring_id)
+{
+	uint8_t tx_status;
+	struct dp_pdev *pdev;
+	struct dp_vdev *vdev = NULL;
+	struct hal_tx_completion_status ts = {0};
+	uint32_t *htt_desc = (uint32_t *)status;
+	struct dp_peer *peer;
+	struct cdp_tid_tx_stats *tid_stats = NULL;
+	struct htt_soc *htt_handle;
+	uint8_t vdev_id;
+
+	tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
+	htt_handle = (struct htt_soc *)soc->htt_handle;
+	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
+
+	/*
+	 * There can be scenario where WBM consuming descriptor enqueued
+	 * from TQM2WBM first and TQM completion can happen before MEC
+	 * notification comes from FW2WBM. Avoid access any field of tx
+	 * descriptor in case of MEC notify.
+	 */
+	dp_tx_process_mec_notify_li(soc, status);
+
+	/*
+	 * If the descriptor is already freed in vdev_detach,
+	 * continue to next descriptor
+	 */
+	if (qdf_unlikely(!tx_desc->flags)) {
+		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
+				   tx_desc->id);
+		return;
+	}
+
+	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
+		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
+		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
+		goto release_tx_desc;
+	}
+
+	pdev = tx_desc->pdev;
+
+	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
+		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
+		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
+		goto release_tx_desc;
+	}
+
+	qdf_assert(tx_desc->pdev);
+
+	vdev_id = tx_desc->vdev_id;
+	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
+				     DP_MOD_ID_HTT_COMP);
+
+	if (qdf_unlikely(!vdev)) {
+		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
+		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
+		goto release_tx_desc;
+	}
+
+	switch (tx_status) {
+	case HTT_TX_FW2WBM_TX_STATUS_OK:
+	case HTT_TX_FW2WBM_TX_STATUS_DROP:
+	case HTT_TX_FW2WBM_TX_STATUS_TTL:
+	{
+		uint8_t tid;
+
+		if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
+			ts.peer_id =
+				HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
+						htt_desc[2]);
+			ts.tid =
+				HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
+						htt_desc[2]);
+		} else {
+			ts.peer_id = HTT_INVALID_PEER;
+			ts.tid = HTT_INVALID_TID;
+		}
+		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
+		ts.ppdu_id =
+			HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
+					htt_desc[1]);
+		ts.ack_frame_rssi =
+			HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
+					htt_desc[1]);
+
+		ts.tsf = htt_desc[3];
+		ts.first_msdu = 1;
+		ts.last_msdu = 1;
+		tid = ts.tid;
+		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
+			tid = CDP_MAX_DATA_TIDS - 1;
+
+		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
+
+		if (qdf_unlikely(pdev->delay_stats_flag))
+			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
+		if (tx_status < CDP_MAX_TX_HTT_STATUS)
+			tid_stats->htt_status_cnt[tx_status]++;
+
+		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
+					     DP_MOD_ID_HTT_COMP);
+		if (qdf_likely(peer)) {
+			DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
+					 qdf_nbuf_len(tx_desc->nbuf));
+			DP_STATS_INCC(peer, tx.tx_failed, 1,
+				      tx_status != HTT_TX_FW2WBM_TX_STATUS_OK);
+		}
+
+		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
+		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
+		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
+
+		if (qdf_likely(peer))
+			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
+
+		break;
+	}
+	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
+	{
+		uint8_t reinject_reason;
+
+		reinject_reason =
+			HTT_TX_WBM_COMPLETION_V2_REINJECT_REASON_GET(
+								htt_desc[0]);
+		dp_tx_reinject_handler(soc, vdev, tx_desc,
+				       status, reinject_reason);
+		break;
+	}
+	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
+	{
+		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
+		break;
+	}
+	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
+	{
+		DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
+		goto release_tx_desc;
+	}
+	default:
+		dp_tx_comp_err("Invalid HTT tx_status %d\n",
+			       tx_status);
+		goto release_tx_desc;
+	}
+
+	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
+	return;
+
+release_tx_desc:
+	dp_tx_comp_free_buf(soc, tx_desc);
+	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
+	if (vdev)
+		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
+}
+
 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
 /*
  * dp_tx_get_rbm_id()- Get the RBM ID for data transmission completion.

+ 17 - 0
dp/wifi3.0/li/dp_li_tx.h

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -53,6 +54,22 @@ void dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc,
 					    void *tx_comp_hal_desc,
 					    struct dp_tx_desc_s **r_tx_desc);
 
+/**
+ * dp_tx_process_htt_completion_li() - Tx HTT Completion Indication Handler
+ * @soc: Handle to DP soc structure
+ * @tx_desc: software descriptor head pointer
+ * @status : Tx completion status from HTT descriptor
+ * @ring_id: ring number
+ *
+ * This function will process HTT Tx indication messages from Target
+ *
+ * Return: none
+ */
+void dp_tx_process_htt_completion_li(struct dp_soc *soc,
+				     struct dp_tx_desc_s *tx_desc,
+				     uint8_t *status,
+				     uint8_t ring_id);
+
 /**
  * dp_tx_desc_pool_init_li() - Initialize Tx Descriptor pool(s)
  * @soc: Handle to DP Soc structure