Эх сурвалжийг харах

qcacmn: Implement delay VOW stats for hawkeye

Delay counters per TID have been implemented for following types:
1. Linux stack to hw enqueue delay
2. HW enqueue delay to tx completion delay
3. TX interframe delay
4. RX interframe delay
5. RX frame delay from ring reap to networking stack

Change-Id: I836596cbd878a43955c18b4981cb5b7b43d4df5e
Varsha Mishra 6 жил өмнө
parent
commit
a331e6e55f

+ 2 - 0
dp/inc/cdp_txrx_cmn_struct.h

@@ -822,6 +822,7 @@ struct cdp_soc_t {
  * @CDP_CONFIG_CAPTURE_LATENCY: Capture time latency
  * @CDP_INGRESS_STATS: Accumulate ingress statistics
  * @CDP_OSIF_DROP: Accumulate drops in OSIF layer
+ * @CDP_CONFIG_DELAY_STATS: Enable per tid delay stats
  */
 enum cdp_pdev_param_type {
 	CDP_CONFIG_DEBUG_SNIFFER,
@@ -834,6 +835,7 @@ enum cdp_pdev_param_type {
 	CDP_CONFIG_CAPTURE_LATENCY,
 	CDP_INGRESS_STATS,
 	CDP_OSIF_DROP,
+	CDP_CONFIG_DELAY_STATS,
 };
 
 /*

+ 31 - 1
dp/inc/cdp_txrx_ctrl.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -529,6 +529,36 @@ cdp_enable_peer_based_pktlog(ol_txrx_soc_handle soc,
 			(pdev, peer_macaddr, enable);
 }
 
+/**
+ * cdp_calculate_delay_stats()- get rx delay stats
+ *
+ * @soc: pointer to the soc
+ * @vdev: vdev handle
+ * @nbuf: nbuf which is passed
+ *
+ * This function will calculate rx delay statistics.
+ */
+static inline void
+cdp_calculate_delay_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
+			  qdf_nbuf_t nbuf)
+{
+	if (!soc || !soc->ops) {
+		QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
+			  "%s: Invalid Instance:", __func__);
+		QDF_BUG(0);
+		return;
+	}
+
+	if (!soc->ops->ctrl_ops ||
+	    !soc->ops->ctrl_ops->calculate_delay_stats) {
+		QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
+			  "%s: callback not registered:", __func__);
+		return;
+	}
+
+	return soc->ops->ctrl_ops->calculate_delay_stats(vdev, nbuf);
+}
+
 /**
  * @brief Subscribe to a specified WDI event.
  * @details

+ 1 - 0
dp/inc/cdp_txrx_ops.h

@@ -641,6 +641,7 @@ struct cdp_ctrl_ops {
 	int (*enable_peer_based_pktlog)(struct cdp_pdev
 			*txrx_pdev_handle, char *macaddr, uint8_t enb_dsb);
 
+	void (*calculate_delay_stats)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf);
 };
 
 struct cdp_me_ops {

+ 2 - 2
dp/inc/cdp_txrx_stats_struct.h

@@ -169,7 +169,7 @@ struct cdp_tidq_stats {
  * cdp_delay_stats_mode: Different types of delay statistics
  *
  * @CDP_DELAY_STATS_SW_ENQ: Stack to hw enqueue delay
- * @CDP_DELAY_STATS_INTERFRAME: Interframe delay at radio entry point
+ * @CDP_DELAY_STATS_TX_INTERFRAME: Interframe delay at radio entry point
  * @CDP_DELAY_STATS_FW_HW_TRANSMIT: Hw enqueue to tx completion delay
  * @CDP_DELAY_STATS_REAP_STACK: Delay in ring reap to indicating network stack
  * @CDP_DELAY_STATS_RX_INTERFRAME: Rx inteframe delay
@@ -177,7 +177,7 @@ struct cdp_tidq_stats {
  */
 enum cdp_delay_stats_mode {
 	CDP_DELAY_STATS_SW_ENQ,
-	CDP_DELAY_STATS_INTERFRAME,
+	CDP_DELAY_STATS_TX_INTERFRAME,
 	CDP_DELAY_STATS_FW_HW_TRANSMIT,
 	CDP_DELAY_STATS_REAP_STACK,
 	CDP_DELAY_STATS_RX_INTERFRAME,

+ 4 - 0
dp/wifi3.0/dp_internal.h

@@ -765,6 +765,10 @@ void dp_set_michael_key(struct cdp_peer *peer_handle,
 uint32_t dp_pdev_tid_stats_display(void *pdev_handle,
 			enum _ol_ath_param_t param, uint32_t value, void *buff);
 #endif
+
+void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
+			   uint8_t tid, uint8_t mode);
+
 /*
  * dp_get_mac_id_for_pdev() -  Return mac corresponding to pdev for mac
  *

+ 189 - 1
dp/wifi3.0/dp_main.c

@@ -4444,6 +4444,8 @@ static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
 	vdev->dscp_tid_map_id = 0;
 	vdev->mcast_enhancement_en = 0;
 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
+	vdev->prev_tx_enq_tstamp = 0;
+	vdev->prev_rx_deliver_tstamp = 0;
 
 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
 	/* add this vdev into the pdev's list */
@@ -6050,6 +6052,20 @@ uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
 	return pdev->pdev_id;
 }
 
+/**
+ * dp_get_delay_stats_flag() - get delay stats flag
+ * @pdev_handle: Datapath PDEV handle
+ *
+ * Return: 0 if flag is disabled else 1
+ */
+static
+bool dp_get_delay_stats_flag(struct cdp_pdev *pdev_handle)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
+
+	return pdev->delay_stats_flag;
+}
+
 /**
  * dp_pdev_set_chan_noise_floor() - set channel noise floor
  * @pdev_handle: Datapath PDEV handle
@@ -8158,12 +8174,33 @@ static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
 	case CDP_OSIF_DROP:
 		dp_pdev_tid_stats_osif_drop(pdev_handle, val);
 		break;
+	case CDP_CONFIG_DELAY_STATS:
+		if (val == 1)
+			pdev->delay_stats_flag = true;
+		else
+			pdev->delay_stats_flag = false;
+		break;
 	default:
 		return QDF_STATUS_E_INVAL;
 	}
 	return QDF_STATUS_SUCCESS;
 }
 
+/*
+ * dp_calculate_delay_stats: function to get rx delay stats
+ * @vdev_handle: DP vdev handle
+ * @nbuf: skb
+ *
+ * Return: void
+ */
+static void dp_calculate_delay_stats(struct cdp_vdev *vdev_handle,
+				     qdf_nbuf_t nbuf)
+{
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
+
+	dp_rx_compute_delay(vdev, nbuf);
+}
+
 /*
  * dp_get_vdev_param: function to get parameters from vdev
  * @param: parameter type to get value
@@ -9360,7 +9397,7 @@ static struct cdp_cmn_ops dp_ops_cmn = {
 	.txrx_stats_request = dp_txrx_stats_request,
 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
-	.txrx_get_vow_config_frm_pdev = NULL,
+	.txrx_get_vow_config_frm_pdev = dp_get_delay_stats_flag,
 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
 	.txrx_set_nac = dp_set_nac,
 	.txrx_get_tx_pending = dp_get_tx_pending,
@@ -9424,6 +9461,7 @@ static struct cdp_ctrl_ops dp_ops_ctrl = {
 	.set_key = dp_set_michael_key,
 	.txrx_get_vdev_param = dp_get_vdev_param,
 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
+	.calculate_delay_stats = dp_calculate_delay_stats,
 };
 
 static struct cdp_me_ops dp_ops_me = {
@@ -10246,3 +10284,153 @@ int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
 	return 0;
 }
 #endif
+
+/**
+ * dp_bucket_index() - Return index from array
+ *
+ * @delay: delay measured
+ * @array: array used to index corresponding delay
+ *
+ * Return: index
+ */
+static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
+{
+	uint8_t i = CDP_DELAY_BUCKET_1;
+
+	for (; i < CDP_DELAY_BUCKET_MAX; i++) {
+		if (delay < array[i] && delay > array[i + 1])
+			return i;
+	}
+
+	return (CDP_DELAY_BUCKET_MAX - 1);
+}
+
+/**
+ * dp_fill_delay_buckets() - Fill delay statistics bucket for each
+ *				type of delay
+ *
+ * @pdev: pdev handle
+ * @delay: delay in ms
+ * @t: tid value
+ * @mode: type of tx delay mode
+ * Return: pointer to cdp_delay_stats structure
+ */
+static struct cdp_delay_stats *
+dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
+		      uint8_t tid, uint8_t mode)
+{
+	uint8_t delay_index = 0;
+	struct cdp_tid_tx_stats *tstats =
+		&pdev->stats.tid_stats.tid_tx_stats[tid];
+	struct cdp_tid_rx_stats *rstats =
+		&pdev->stats.tid_stats.tid_rx_stats[tid];
+	/*
+	 * cdp_fw_to_hw_delay_range
+	 * Fw to hw delay ranges in milliseconds
+	 */
+	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
+		10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
+
+	/*
+	 * cdp_sw_enq_delay_range
+	 * Software enqueue delay ranges in milliseconds
+	 */
+	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
+		1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
+
+	/*
+	 * cdp_intfrm_delay_range
+	 * Interframe delay ranges in milliseconds
+	 */
+	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
+		5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
+
+	/*
+	 * Update delay stats in proper bucket
+	 */
+	switch (mode) {
+	/* Software Enqueue delay ranges */
+	case CDP_DELAY_STATS_SW_ENQ:
+
+		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
+		tstats->swq_delay.delay_bucket[delay_index]++;
+		return &tstats->swq_delay;
+
+	/* Tx Completion delay ranges */
+	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
+
+		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
+		tstats->hwtx_delay.delay_bucket[delay_index]++;
+		return &tstats->hwtx_delay;
+
+	/* Interframe tx delay ranges */
+	case CDP_DELAY_STATS_TX_INTERFRAME:
+
+		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
+		tstats->intfrm_delay.delay_bucket[delay_index]++;
+		return &tstats->intfrm_delay;
+
+	/* Interframe rx delay ranges */
+	case CDP_DELAY_STATS_RX_INTERFRAME:
+
+		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
+		rstats->intfrm_delay.delay_bucket[delay_index]++;
+		return &rstats->intfrm_delay;
+
+	/* Ring reap to indication to network stack */
+	case CDP_DELAY_STATS_REAP_STACK:
+
+		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
+		rstats->to_stack_delay.delay_bucket[delay_index]++;
+		return &rstats->intfrm_delay;
+	default:
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
+			  "%s Incorrect delay mode: %d", __func__, mode);
+	}
+
+	return NULL;
+}
+
+/**
+ * dp_update_delay_stats() - Update delay statistics in structure
+ *				and fill min, max and avg delay
+ *
+ * @pdev: pdev handle
+ * @delay: delay in ms
+ * @tid: tid value
+ * @mode: type of tx delay mode
+ * Return: none
+ */
+void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
+			   uint8_t tid, uint8_t mode)
+{
+	struct cdp_delay_stats *dstats = NULL;
+
+	/*
+	 * Delay ranges are different for different delay modes
+	 * Get the correct index to update delay bucket
+	 */
+	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode);
+	if (qdf_unlikely(!dstats))
+		return;
+
+	if (delay != 0) {
+		/*
+		 * Compute minimum,average and maximum
+		 * delay
+		 */
+		if (delay < dstats->min_delay)
+			dstats->min_delay = delay;
+
+		if (delay > dstats->max_delay)
+			dstats->max_delay = delay;
+
+		/*
+		 * Average over delay measured till now
+		 */
+		if (!dstats->avg_delay)
+			dstats->avg_delay = delay;
+		else
+			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
+	}
+}

+ 49 - 1
dp/wifi3.0/dp_rx.c

@@ -57,6 +57,23 @@ static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
 }
 #endif
 
+#ifdef ATH_RX_PRI_SAVE
+static inline void dp_rx_save_tid_ts(qdf_nbuf_t nbuf, uint8_t tid, bool flag)
+{
+	qdf_nbuf_set_priority(nbuf, tid);
+	if (qdf_unlikely(flag))
+		qdf_nbuf_set_timestamp(nbuf);
+}
+#else
+static inline void dp_rx_save_tid_ts(qdf_nbuf_t nbuf, uint8_t tid, bool flag)
+{
+	if (qdf_unlikely(flag)) {
+		qdf_nbuf_set_priority(nbuf, tid);
+		qdf_nbuf_set_timestamp(nbuf);
+	}
+}
+#endif
+
 /*
  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
  *
@@ -1102,6 +1119,37 @@ qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
 	return parent;
 }
 
+/**
+ * dp_rx_compute_delay() - Compute and fill in all timestamps
+ *				to pass in correct fields
+ *
+ * @vdev: pdev handle
+ * @tx_desc: tx descriptor
+ * @tid: tid value
+ * Return: none
+ */
+void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
+{
+	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
+	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
+	uint8_t tid = qdf_nbuf_get_priority(nbuf);
+	uint32_t interframe_delay =
+		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
+
+	dp_update_delay_stats(vdev->pdev, to_stack, tid,
+			      CDP_DELAY_STATS_REAP_STACK);
+	/*
+	 * Update interframe delay stats calculated at deliver_data_ol point.
+	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
+	 * interframe delay will not be calculate correctly for 1st frame.
+	 * On the other side, this will help in avoiding extra per packet check
+	 * of vdev->prev_rx_deliver_tstamp.
+	 */
+	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
+			      CDP_DELAY_STATS_RX_INTERFRAME);
+	vdev->prev_rx_deliver_tstamp = current_ts;
+}
+
 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
 						struct dp_peer *peer,
 						qdf_nbuf_t nbuf_head,
@@ -1618,13 +1666,13 @@ done:
 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
 							rx_tlv_hdr);
-		DP_RX_TID_SAVE(nbuf, tid);
 
 		/*
 		 * Check if DMA completed -- msdu_done is the last bit
 		 * to be written
 		 */
 		rx_pdev = soc->pdev_list[rx_desc->pool_id];
+		dp_rx_save_tid_ts(nbuf, tid, rx_pdev->delay_stats_flag);
 		tid_stats = &rx_pdev->stats.tid_stats.tid_rx_stats[tid];
 		if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,

+ 1 - 0
dp/wifi3.0/dp_rx.h

@@ -987,4 +987,5 @@ dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
 void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring,
 				void *ring_desc, struct dp_rx_desc *rx_desc);
 
+void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
 #endif /* _DP_RX_H */

+ 65 - 5
dp/wifi3.0/dp_tx.c

@@ -1028,6 +1028,7 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
 		hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
 
 
+	tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
 	/* Sync cached descriptor with HW */
 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
 
@@ -2665,21 +2666,74 @@ void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
 
 #endif
 
+/**
+ * dp_tx_compute_delay() - Compute and fill in all timestamps
+ *				to pass in correct fields
+ *
+ * @vdev: pdev handle
+ * @tx_desc: tx descriptor
+ * @tid: tid value
+ * Return: none
+ */
+static void dp_tx_compute_delay(struct dp_vdev *vdev,
+				struct dp_tx_desc_s *tx_desc, uint8_t tid)
+{
+	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
+	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
+
+	if (qdf_likely(!vdev->pdev->delay_stats_flag))
+		return;
+
+	current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
+	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
+	timestamp_hw_enqueue = tx_desc->timestamp;
+	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
+	fwhw_transmit_delay = (uint32_t)(current_timestamp -
+					 timestamp_hw_enqueue);
+	interframe_delay = (uint32_t)(timestamp_ingress -
+				      vdev->prev_tx_enq_tstamp);
+
+	/*
+	 * Delay in software enqueue
+	 */
+	dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
+			      CDP_DELAY_STATS_SW_ENQ);
+	/*
+	 * Delay between packet enqueued to HW and Tx completion
+	 */
+	dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
+			      CDP_DELAY_STATS_FW_HW_TRANSMIT);
+
+	/*
+	 * Update interframe delay stats calculated at hardstart receive point.
+	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
+	 * interframe delay will not be calculate correctly for 1st frame.
+	 * On the other side, this will help in avoiding extra per packet check
+	 * of !vdev->prev_tx_enq_tstamp.
+	 */
+	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
+			      CDP_DELAY_STATS_TX_INTERFRAME);
+	vdev->prev_tx_enq_tstamp = timestamp_ingress;
+}
+
 /**
  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
- * @peer: Handle to DP peer
- * @ts: pointer to HAL Tx completion stats
+ * @tx_desc: software descriptor head pointer
+ * @ts: Tx completion status
+ * @peer: peer handle
  *
  * Return: None
  */
 static inline void
-dp_tx_update_peer_stats(struct dp_peer *peer,
-			struct hal_tx_completion_status *ts, uint32_t length)
+dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
+			struct hal_tx_completion_status *ts,
+			struct dp_peer *peer)
 {
 	struct dp_pdev *pdev = peer->vdev->pdev;
 	struct dp_soc *soc = NULL;
 	uint8_t mcs, pkt_type;
 	uint8_t tid = ts->tid;
+	uint32_t length;
 	struct cdp_tid_tx_stats *tid_stats;
 
 	if (!pdev)
@@ -2699,7 +2753,11 @@ dp_tx_update_peer_stats(struct dp_peer *peer,
 		return;
 	}
 
+	length = qdf_nbuf_len(tx_desc->nbuf);
 	DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
+
+	if (qdf_unlikely(pdev->delay_stats_flag))
+		dp_tx_compute_delay(peer->vdev, tx_desc, tid);
 	tid_stats->complete_cnt++;
 	DP_STATS_INCC(peer, tx.dropped.age_out, 1,
 		     (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
@@ -3061,7 +3119,7 @@ void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
 		}
 	}
 
-	dp_tx_update_peer_stats(peer, ts, length);
+	dp_tx_update_peer_stats(tx_desc, ts, peer);
 
 out:
 	return;
@@ -3169,6 +3227,8 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
 
 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[tid];
 
+		if (qdf_unlikely(pdev->delay_stats_flag))
+			dp_tx_compute_delay(vdev, tx_desc, tid);
 		tid_stats->complete_cnt++;
 		if (qdf_unlikely(tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)) {
 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;

+ 0 - 3
dp/wifi3.0/dp_tx.h

@@ -223,9 +223,6 @@ void  dp_iterate_update_peer_list(void *pdev_hdl);
 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
 #endif
 
-#define DP_RX_TID_SAVE(_nbuf, _tid) \
-	(qdf_nbuf_set_priority(_nbuf, _tid))
-
 /* TODO TX_FEATURE_NOT_YET */
 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
 {

+ 1 - 3
dp/wifi3.0/dp_tx_desc.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -557,8 +557,6 @@ static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
 	soc->tx_desc[desc_pool_id].num_allocated++;
 	soc->tx_desc[desc_pool_id].num_free--;
 
-	tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
-
 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
 
 	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);

+ 8 - 0
dp/wifi3.0/dp_types.h

@@ -1331,6 +1331,8 @@ struct dp_pdev {
 	/* enable time latency check for tx completion */
 	bool latency_capture_enable;
 
+	/* enable calculation of delay stats*/
+	bool delay_stats_flag;
 	struct {
 		uint16_t tx_ppdu_id;
 		uint16_t tx_peer_id;
@@ -1548,6 +1550,12 @@ struct dp_vdev {
 
 	/* AST hash value for BSS peer in HW valid for STA VAP*/
 	uint16_t bss_ast_hash;
+
+	/* Capture timestamp of previous tx packet enqueued */
+	uint64_t prev_tx_enq_tstamp;
+
+	/* Capture timestamp of previous rx packet delivered */
+	uint64_t prev_rx_deliver_tstamp;
 };
 
 

+ 13 - 0
qdf/inc/qdf_nbuf.h

@@ -3274,6 +3274,19 @@ qdf_nbuf_set_timestamp(struct sk_buff *skb)
 	__qdf_nbuf_set_timestamp(skb);
 }
 
+/**
+ * qdf_nbuf_get_timestamp() - get the timestamp for frame
+ *
+ * @buf: sk buff
+ *
+ * Return: timestamp stored in skb in ms
+ */
+static inline uint64_t
+qdf_nbuf_get_timestamp(struct sk_buff *skb)
+{
+	return __qdf_nbuf_get_timestamp(skb);
+}
+
 /**
  * qdf_nbuf_get_timedelta_ms() - get time difference in ms
  *

+ 14 - 1
qdf/linux/src/i_qdf_nbuf.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -2021,6 +2021,19 @@ __qdf_nbuf_set_timestamp(struct sk_buff *skb)
 	__net_timestamp(skb);
 }
 
+/**
+ * __qdf_nbuf_get_timestamp() - get the timestamp for frame
+ *
+ * @buf: sk buff
+ *
+ * Return: timestamp stored in skb in ms
+ */
+static inline uint64_t
+__qdf_nbuf_get_timestamp(struct sk_buff *skb)
+{
+	return ktime_to_ms(skb_get_ktime(skb));
+}
+
 /**
  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
  *