qcacmn: add support for transmit latency stats

Add support for per-link transmit latency statistics

Change-Id: Iadb87deb6e19cd4d9f86565fe687c7d9a1f05679
CRs-Fixed: 3597028
此提交包含在:
Yu Wang
2023-09-04 19:13:08 -07:00
提交者 Rahul Choudhary
父節點 48f4d53901
當前提交 3f6e59aef9
共有 11 個檔案被更改,包括 1207 行新增18 行删除

查看文件

@@ -3269,4 +3269,83 @@ enum cdp_umac_reset_state {
CDP_UMAC_RESET_IN_PROGRESS_DURING_BUFFER_WINDOW,
CDP_UMAC_RESET_INVALID_STATE
};
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
/* the maximum distribution level of tx latency stats */
#define CDP_TX_LATENCY_DISTR_LV_MAX 4
/**
* enum cdp_tx_latency_type - transmit latency types
* @CDP_TX_LATENCY_TYPE_DRIVER: Per MSDU latency
* from: A MSDU is presented to the driver
* to: the MSDU is queued into TCL SRNG
* @CDP_TX_LATENCY_TYPE_RING_BUF: Per MSDU latency
* from: the MSDU is queued into TCL SRNG
* to: the MSDU is released by the driver
* @CDP_TX_LATENCY_TYPE_HW: Per MSDU latency
* from: the MSDU is presented to the hardware
* to: the MSDU is released by the hardware
* @CDP_TX_LATENCY_TYPE_CCA: Per PPDU latency
* The time spent on Clear Channel Assessment, the maximum value is 50000(us)
* from: A PPDU is presented to the hardware LMAC
* to: over-the-air transmission is started for the PPDU
* @CDP_TX_LATENCY_TYPE_MAX: maximum number of types
*/
enum cdp_tx_latency_type {
CDP_TX_LATENCY_TYPE_DRIVER,
CDP_TX_LATENCY_TYPE_RING_BUF,
CDP_TX_LATENCY_TYPE_HW,
CDP_TX_LATENCY_TYPE_CCA,
/* keep last */
CDP_TX_LATENCY_TYPE_MAX,
};
/**
* struct cdp_tx_latency_config - configuration for per-link transmit latency
* statistics
* @enable: enable/disable the feature
* @report: enable/disable async report
* @period: statistical period(in ms)
* @granularity: granularity(in microseconds) of the distribution for the types
*/
struct cdp_tx_latency_config {
bool enable;
bool report;
uint32_t period;
uint32_t granularity[CDP_TX_LATENCY_TYPE_MAX];
};
/**
* struct cdp_tx_latency_stats - per-type transmit latency statistics
* @average: average of the latency(in us) for the type within a cycle
* @granularity: granularity(in us) of the distribution for the type
* @distribution: latency distribution for the type
*/
struct cdp_tx_latency_stats {
uint32_t average;
uint32_t granularity;
uint32_t distribution[CDP_TX_LATENCY_DISTR_LV_MAX];
};
/**
* struct cdp_tx_latency - per-link transmit latency statistics
* @node: list node for membership in the stats list
* @mac_remote: link mac address of remote peer
* @stats: transmit latency statistics for types
*/
struct cdp_tx_latency {
qdf_list_node_t node;
struct qdf_mac_addr mac_remote;
struct cdp_tx_latency_stats stats[CDP_TX_LATENCY_TYPE_MAX];
};
/**
* typedef cdp_tx_latency_cb() - callback for transmit latency
* @vdev_id: vdev id
* @stats_list: list of per-link transmit latency statistics
*/
typedef QDF_STATUS(*cdp_tx_latency_cb)(uint8_t vdev_id,
qdf_list_t *stats_list);
#endif
#endif

查看文件

@@ -1303,4 +1303,88 @@ cdp_host_get_interface_stats(ol_txrx_soc_handle soc,
buf,
true);
}
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
/**
* cdp_host_tx_latency_stats_config() - config transmit latency statistics for
* specified vdev
* @soc: Handle to struct dp_soc
* @vdev_id: vdev id
* @config: configuration for transmit latency statistics
*
* Return: QDF_STATUS
*/
static inline QDF_STATUS
cdp_host_tx_latency_stats_config(ol_txrx_soc_handle soc,
uint8_t vdev_id,
struct cdp_tx_latency_config *config)
{
if (!soc || !soc->ops) {
QDF_BUG(0);
return QDF_STATUS_E_FAILURE;
}
if (!soc->ops->host_stats_ops ||
!soc->ops->host_stats_ops->tx_latency_stats_config)
return QDF_STATUS_E_FAILURE;
return soc->ops->host_stats_ops->tx_latency_stats_config(soc,
vdev_id,
config);
}
/**
* cdp_host_tx_latency_stats_fetch() - fetch transmit latency statistics for
* specified link mac address
* @soc: Handle to struct dp_soc
* @vdev_id: vdev id
* @mac: link mac address of remote peer
* @latency: buffer to hold per-link transmit latency statistics
*
* Return: QDF_STATUS
*/
static inline QDF_STATUS
cdp_host_tx_latency_stats_fetch(ol_txrx_soc_handle soc,
uint8_t vdev_id, uint8_t *mac,
struct cdp_tx_latency *latency)
{
if (!soc || !soc->ops) {
QDF_BUG(0);
return QDF_STATUS_E_FAILURE;
}
if (!soc->ops->host_stats_ops ||
!soc->ops->host_stats_ops->tx_latency_stats_fetch)
return QDF_STATUS_E_FAILURE;
return soc->ops->host_stats_ops->tx_latency_stats_fetch(soc,
vdev_id,
mac,
latency);
}
/**
* cdp_host_tx_latency_stats_register_cb() - register transmit latency
* statistics callback
* @soc: Handle to struct dp_soc
* @cb: callback function for transmit latency statistics
*
* Return: QDF_STATUS
*/
static inline QDF_STATUS
cdp_host_tx_latency_stats_register_cb(ol_txrx_soc_handle soc,
cdp_tx_latency_cb cb)
{
if (!soc || !soc->ops) {
QDF_BUG(0);
return QDF_STATUS_E_FAILURE;
}
if (!soc->ops->host_stats_ops ||
!soc->ops->host_stats_ops->tx_latency_stats_register_cb)
return QDF_STATUS_E_FAILURE;
return soc->ops->host_stats_ops->tx_latency_stats_register_cb(soc, cb);
}
#endif
#endif /* _CDP_TXRX_HOST_STATS_H_ */

查看文件

@@ -1195,6 +1195,10 @@ struct cdp_mon_ops {
* @get_pdev_obss_stats:
* @clear_pdev_obss_pd_stats:
* @txrx_get_interface_stats:
* @tx_latency_stats_config: config tx latency stats for specified vdev
* @tx_latency_stats_fetch: fetch tx latency stats for specified link
* mac address
* @tx_latency_stats_register_cb: register tx latency stats callback
*/
struct cdp_host_stats_ops {
int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
@@ -1404,6 +1408,18 @@ struct cdp_host_stats_ops {
uint8_t vdev_id,
void *buf,
bool is_aggregate);
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
QDF_STATUS
(*tx_latency_stats_config)(struct cdp_soc_t *soc,
uint8_t vdev_id,
struct cdp_tx_latency_config *config);
QDF_STATUS (*tx_latency_stats_fetch)(struct cdp_soc_t *soc,
uint8_t vdev_id,
uint8_t *mac,
struct cdp_tx_latency *latency);
QDF_STATUS (*tx_latency_stats_register_cb)(struct cdp_soc_t *soc,
cdp_tx_latency_cb cb);
#endif
};
/**

查看文件

@@ -3757,6 +3757,223 @@ static inline void dp_htt_rx_nbuf_free(qdf_nbuf_t nbuf)
}
#endif
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
#define TX_LATENCY_STATS_PERIOD_MAX_MS \
(HTT_H2T_TX_LATENCY_STATS_CFG_PERIODIC_INTERVAL_M >> \
HTT_H2T_TX_LATENCY_STATS_CFG_PERIODIC_INTERVAL_S)
#define TX_LATENCY_STATS_GRANULARITY_MAX_MS \
(HTT_H2T_TX_LATENCY_STATS_CFG_GRANULARITY_M >> \
HTT_H2T_TX_LATENCY_STATS_CFG_GRANULARITY_S)
/**
* dp_h2t_tx_latency_stats_cfg_msg_send(): send HTT message for tx latency
* stats config to FW
* @dp_soc: DP SOC handle
* @vdev_id: vdev id
* @enable: indicates enablement of the feature
* @period: statistical period for transmit latency in terms of ms
* @granularity: granularity for tx latency distribution in terms of ms
*
* return: QDF STATUS
*/
QDF_STATUS
dp_h2t_tx_latency_stats_cfg_msg_send(struct dp_soc *dp_soc, uint16_t vdev_id,
bool enable, uint32_t period,
uint32_t granularity)
{
struct htt_soc *soc = dp_soc->htt_handle;
struct dp_htt_htc_pkt *pkt;
uint8_t *htt_logger_bufp;
qdf_nbuf_t msg;
uint32_t *msg_word;
QDF_STATUS status;
qdf_size_t size;
if (period > TX_LATENCY_STATS_PERIOD_MAX_MS ||
granularity > TX_LATENCY_STATS_GRANULARITY_MAX_MS)
return QDF_STATUS_E_INVAL;
size = sizeof(struct htt_h2t_tx_latency_stats_cfg);
msg = qdf_nbuf_alloc(soc->osdev, HTT_MSG_BUF_SIZE(size),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4, TRUE);
if (!msg)
return QDF_STATUS_E_NOMEM;
/*
* Set the length of the message.
* The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
* separately during the below call to qdf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
if (!qdf_nbuf_put_tail(msg, size)) {
dp_htt_err("Failed to expand head");
qdf_nbuf_free(msg);
return QDF_STATUS_E_FAILURE;
}
msg_word = (uint32_t *)qdf_nbuf_data(msg);
memset(msg_word, 0, size);
qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
htt_logger_bufp = (uint8_t *)msg_word;
HTT_H2T_MSG_TYPE_SET(*msg_word,
HTT_H2T_MSG_TYPE_TX_LATENCY_STATS_CFG);
HTT_H2T_TX_LATENCY_STATS_CFG_VDEV_ID_SET(*msg_word, vdev_id);
HTT_H2T_TX_LATENCY_STATS_CFG_ENABLE_SET(*msg_word, enable);
HTT_H2T_TX_LATENCY_STATS_CFG_PERIODIC_INTERVAL_SET(*msg_word, period);
HTT_H2T_TX_LATENCY_STATS_CFG_GRANULARITY_SET(*msg_word, granularity);
pkt = htt_htc_pkt_alloc(soc);
if (!pkt) {
dp_htt_err("Fail to allocate dp_htt_htc_pkt buffer");
qdf_nbuf_free(msg);
return QDF_STATUS_E_NOMEM;
}
pkt->soc_ctxt = NULL;
/* macro to set packet parameters for TX */
SET_HTC_PACKET_INFO_TX(
&pkt->htc_pkt,
dp_htt_h2t_send_complete_free_netbuf,
qdf_nbuf_data(msg),
qdf_nbuf_len(msg),
soc->htc_endpoint,
HTC_TX_PACKET_TAG_RUNTIME_PUT);
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
status = DP_HTT_SEND_HTC_PKT(
soc, pkt,
HTT_H2T_MSG_TYPE_TX_LATENCY_STATS_CFG,
htt_logger_bufp);
if (QDF_IS_STATUS_ERROR(status)) {
qdf_nbuf_free(msg);
htt_htc_pkt_free(soc, pkt);
}
dp_htt_debug("vdev id %u enable %u period %u granularity %u status %d",
vdev_id, enable, period, granularity, status);
return status;
}
/**
* dp_htt_tx_latency_get_stats_elem(): get tx latency stats from HTT message
* @msg_buf: pointer to stats in HTT message
* @elem_size_msg: size of per peer stats which is reported in HTT message
* @local_buf: additional buffer to hold the stats
* @elem_size_local: size of per peer stats according to current host side
* htt definition
*
* This function is to handle htt version mismatch(between host and target)
* case. It compares elem_size_msg with elem_size_local, when elem_size_msg
* is greater than or equal to elem_size_local, return the pointer to stats
* in HTT message; otherwise, copy the stas(with size elem_size_msg) from
* HTT message to local buffer and leave the left as zero, then return pointer
* to this local buffer.
*
* return: pointer to tx latency stats
*/
static inline htt_t2h_peer_tx_latency_stats *
dp_htt_tx_latency_get_stats_elem(uint8_t *msg_buf, uint32_t elem_size_msg,
htt_t2h_peer_tx_latency_stats *local_buf,
uint32_t elem_size_local) {
if (elem_size_msg >= elem_size_local)
return (htt_t2h_peer_tx_latency_stats *)msg_buf;
qdf_mem_zero(local_buf, sizeof(*local_buf));
qdf_mem_copy(local_buf, msg_buf, elem_size_msg);
return local_buf;
}
#define TX_LATENCY_STATS_GET_PAYLOAD_ELEM_SIZE \
HTT_T2H_TX_LATENCY_STATS_PERIODIC_IND_PAYLOAD_ELEM_SIZE_GET
#define TX_LATENCY_STATS_GET_GRANULARITY \
HTT_T2H_TX_LATENCY_STATS_PERIODIC_IND_GRANULARITY_GET
/**
* dp_htt_tx_latency_stats_handler - Handle tx latency stats received from FW
* @soc: htt soc handle
* @htt_t2h_msg: HTT message nbuf
*
* Return: void
*/
static void
dp_htt_tx_latency_stats_handler(struct htt_soc *soc,
qdf_nbuf_t htt_t2h_msg)
{
struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
uint8_t pdev_id;
uint8_t target_pdev_id;
struct dp_pdev *pdev;
htt_t2h_peer_tx_latency_stats stats, *pstats;
uint32_t elem_size_msg, elem_size_local, granularity;
uint32_t *msg_word;
int32_t buf_len;
uint8_t *pbuf;
buf_len = qdf_nbuf_len(htt_t2h_msg);
if (buf_len <= HTT_T2H_TX_LATENCY_STATS_PERIODIC_IND_HDR_SIZE)
return;
pbuf = qdf_nbuf_data(htt_t2h_msg);
msg_word = (uint32_t *)pbuf;
target_pdev_id =
HTT_T2H_TX_LATENCY_STATS_PERIODIC_IND_PDEV_ID_GET(*msg_word);
pdev_id = dp_get_host_pdev_id_for_target_pdev_id(dpsoc,
target_pdev_id);
if (pdev_id >= MAX_PDEV_CNT)
return;
pdev = dpsoc->pdev_list[pdev_id];
if (!pdev) {
dp_err("PDEV is NULL for pdev_id:%d", pdev_id);
return;
}
qdf_trace_hex_dump(QDF_MODULE_ID_DP_HTT, QDF_TRACE_LEVEL_INFO,
(void *)pbuf, buf_len);
elem_size_msg = TX_LATENCY_STATS_GET_PAYLOAD_ELEM_SIZE(*msg_word);
elem_size_local = sizeof(stats);
granularity = TX_LATENCY_STATS_GET_GRANULARITY(*msg_word);
/* Adjust pbuf to point to the first stat in buffer */
pbuf += HTT_T2H_TX_LATENCY_STATS_PERIODIC_IND_HDR_SIZE;
buf_len -= HTT_T2H_TX_LATENCY_STATS_PERIODIC_IND_HDR_SIZE;
/* Parse the received buffer till payload size reaches 0 */
while (buf_len > 0) {
if (buf_len < elem_size_msg) {
dp_err_rl("invalid payload size left %d - %d",
buf_len, elem_size_msg);
break;
}
pstats = dp_htt_tx_latency_get_stats_elem(pbuf, elem_size_msg,
&stats,
elem_size_local);
dp_tx_latency_stats_update_cca(dpsoc, pstats->peer_id,
granularity,
pstats->peer_tx_latency,
pstats->avg_latency);
pbuf += elem_size_msg;
buf_len -= elem_size_msg;
}
dp_tx_latency_stats_report(dpsoc, pdev);
}
#else
static inline void
dp_htt_tx_latency_stats_handler(struct htt_soc *soc,
qdf_nbuf_t htt_t2h_msg)
{
}
#endif
void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
{
struct htt_soc *soc = (struct htt_soc *) context;
@@ -4216,6 +4433,11 @@ void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
dp_htt_peer_ext_evt(soc, msg_word);
break;
}
case HTT_T2H_MSG_TYPE_TX_LATENCY_STATS_PERIODIC_IND:
{
dp_htt_tx_latency_stats_handler(soc, htt_t2h_msg);
break;
}
default:
break;
};

查看文件

@@ -5505,4 +5505,47 @@ dp_get_ring_stats_from_hal(struct dp_soc *soc, struct dp_srng *srng,
}
}
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
/**
* dp_h2t_tx_latency_stats_cfg_msg_send(): send HTT message for tx latency
* stats config to FW
* @dp_soc: DP SOC handle
* @vdev_id: vdev id
* @enable: indicates enablement of the feature
* @period: statistical period for transmit latency in terms of ms
* @granularity: granularity for tx latency distribution
*
* return: QDF STATUS
*/
QDF_STATUS
dp_h2t_tx_latency_stats_cfg_msg_send(struct dp_soc *dp_soc, uint16_t vdev_id,
bool enable, uint32_t period,
uint32_t granularity);
/**
* dp_tx_latency_stats_update_cca() - update transmit latency statistics for
* CCA
* @soc: dp soc handle
* @peer_id: peer id
* @granularity: granularity of distribution
* @distribution: distribution of transmit latency statistics
* @avg: average of CCA latency(in microseconds) within a cycle
*
* Return: None
*/
void
dp_tx_latency_stats_update_cca(struct dp_soc *soc, uint16_t peer_id,
uint32_t granularity, uint32_t *distribution,
uint32_t avg);
/**
* dp_tx_latency_stats_report() - report transmit latency statistics for each
* vdev of specified pdev
* @soc: dp soc handle
* @pdev: dp pdev Handle
*
* Return: None
*/
void dp_tx_latency_stats_report(struct dp_soc *soc, struct dp_pdev *pdev);
#endif
#endif /* #ifndef _DP_INTERNAL_H_ */

查看文件

@@ -7420,24 +7420,6 @@ void dp_get_peer_extd_stats(struct dp_peer *peer,
#endif
#else
#if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT
/**
* dp_get_peer_link_id() - Get Link peer Link ID
* @peer: Datapath peer
*
* Return: Link peer Link ID
*/
static inline
uint8_t dp_get_peer_link_id(struct dp_peer *peer)
{
uint8_t link_id;
link_id = IS_MLO_DP_LINK_PEER(peer) ? peer->link_id + 1 : 0;
if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
link_id = 0;
return link_id;
}
static inline
void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
struct cdp_peer_stats *peer_stats)
@@ -12435,6 +12417,11 @@ static struct cdp_host_stats_ops dp_ops_host_stats = {
.get_pdev_obss_stats = dp_get_obss_stats,
.clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats,
.txrx_get_interface_stats = dp_txrx_get_interface_stats,
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
.tx_latency_stats_fetch = dp_tx_latency_stats_fetch,
.tx_latency_stats_config = dp_tx_latency_stats_config,
.tx_latency_stats_register_cb = dp_tx_latency_stats_register_cb,
#endif
/* TODO */
};

查看文件

@@ -535,6 +535,17 @@ void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
dp_err("unknown peer type %d", peer->peer_type);
}
}
uint8_t dp_get_peer_link_id(struct dp_peer *peer)
{
uint8_t link_id;
link_id = IS_MLO_DP_LINK_PEER(peer) ? peer->link_id + 1 : 0;
if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
link_id = 0;
return link_id;
}
#else
static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
{

查看文件

@@ -2192,6 +2192,13 @@ dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
*/
void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
/**
* dp_get_peer_link_id() - Get Link peer Link ID
* @peer: Datapath peer
*
* Return: Link peer Link ID
*/
uint8_t dp_get_peer_link_id(struct dp_peer *peer);
#else
#define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
@@ -2344,6 +2351,11 @@ uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
{
}
static inline uint8_t dp_get_peer_link_id(struct dp_peer *peer)
{
return 0;
}
#endif /* WLAN_FEATURE_11BE_MLO */
static inline

查看文件

@@ -2370,6 +2370,631 @@ static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
}
#endif
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
/**
* dp_tx_latency_stats_enabled() - check enablement of transmit latency
* statistics
* @vdev: DP vdev handle
*
* Return: true if transmit latency statistics is enabled, false otherwise.
*/
static inline bool dp_tx_latency_stats_enabled(struct dp_vdev *vdev)
{
return qdf_atomic_read(&vdev->tx_latency_cfg.enabled);
}
/**
* dp_tx_latency_stats_report_enabled() - check enablement of async report
* for transmit latency statistics
* @vdev: DP vdev handle
*
* Return: true if transmit latency statistics is enabled, false otherwise.
*/
static inline bool dp_tx_latency_stats_report_enabled(struct dp_vdev *vdev)
{
return qdf_atomic_read(&vdev->tx_latency_cfg.report);
}
/**
* dp_tx_get_driver_ingress_ts() - get driver ingress timestamp from nbuf
* @vdev: DP vdev handle
* @msdu_info: pointer to MSDU Descriptor
* @nbuf: original buffer from network stack
*
* Return: None
*/
static inline void
dp_tx_get_driver_ingress_ts(struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info,
qdf_nbuf_t nbuf)
{
if (!dp_tx_latency_stats_enabled(vdev))
return;
msdu_info->driver_ingress_ts = qdf_nbuf_get_tx_ts(nbuf, true);
}
/**
* dp_tx_update_ts_on_enqueued() - set driver ingress/egress timestamp in
* tx descriptor
* @vdev: DP vdev handle
* @msdu_info: pointer to MSDU Descriptor
* @tx_desc: pointer to tx descriptor
*
* Return: None
*/
static inline void
dp_tx_update_ts_on_enqueued(struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info,
struct dp_tx_desc_s *tx_desc)
{
if (!dp_tx_latency_stats_enabled(vdev))
return;
tx_desc->driver_ingress_ts = msdu_info->driver_ingress_ts;
tx_desc->driver_egress_ts = qdf_ktime_real_get();
}
/**
* dp_tx_latency_stats_update_bucket() - update transmit latency statistics
* for specified type
* @vdev: DP vdev handle
* @tx_latency: pointer to transmit latency stats
* @idx: index of the statistics
* @type: transmit latency type
* @value: latency to be recorded
*
* Return: None
*/
static inline void
dp_tx_latency_stats_update_bucket(struct dp_vdev *vdev,
struct dp_tx_latency *tx_latency,
int idx, enum cdp_tx_latency_type type,
uint32_t value)
{
int32_t granularity;
int lvl;
granularity =
qdf_atomic_read(&vdev->tx_latency_cfg.granularity[type]);
if (qdf_unlikely(!granularity))
return;
lvl = value / granularity;
if (lvl >= CDP_TX_LATENCY_DISTR_LV_MAX)
lvl = CDP_TX_LATENCY_DISTR_LV_MAX - 1;
qdf_atomic_inc(&tx_latency->stats[idx][type].msdus_accum);
qdf_atomic_add(value, &tx_latency->stats[idx][type].latency_accum);
qdf_atomic_inc(&tx_latency->stats[idx][type].distribution[lvl]);
}
/**
* dp_tx_latency_stats_update() - update transmit latency statistics on
* msdu transmit completed
* @soc: dp soc handle
* @txrx_peer: txrx peer handle
* @tx_desc: pointer to tx descriptor
* @ts: tx completion status
* @link_id: link id
*
* Return: None
*/
static inline void
dp_tx_latency_stats_update(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer,
struct dp_tx_desc_s *tx_desc,
struct hal_tx_completion_status *ts,
uint8_t link_id)
{
uint32_t driver_latency, ring_buf_latency, hw_latency;
QDF_STATUS status = QDF_STATUS_E_INVAL;
int64_t current_ts, ingress, egress;
struct dp_vdev *vdev = txrx_peer->vdev;
struct dp_tx_latency *tx_latency;
uint8_t idx;
if (!dp_tx_latency_stats_enabled(vdev))
return;
if (!tx_desc->driver_ingress_ts || !tx_desc->driver_egress_ts)
return;
status = dp_tx_compute_hw_delay_us(ts, vdev->delta_tsf, &hw_latency);
if (QDF_IS_STATUS_ERROR(status))
return;
ingress = qdf_ktime_to_us(tx_desc->driver_ingress_ts);
egress = qdf_ktime_to_us(tx_desc->driver_egress_ts);
driver_latency = (uint32_t)(egress - ingress);
current_ts = qdf_ktime_to_us(qdf_ktime_real_get());
ring_buf_latency = (uint32_t)(current_ts - egress);
tx_latency = &txrx_peer->stats[link_id].tx_latency;
idx = tx_latency->cur_idx;
dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
CDP_TX_LATENCY_TYPE_DRIVER,
driver_latency);
dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
CDP_TX_LATENCY_TYPE_RING_BUF,
ring_buf_latency);
dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
CDP_TX_LATENCY_TYPE_HW, hw_latency);
}
/**
* dp_tx_latency_stats_clear_bucket() - clear specified transmit latency
* statistics for specified type
* @tx_latency: pointer to transmit latency stats
* @idx: index of the statistics
* @type: transmit latency type
*
* Return: None
*/
static inline void
dp_tx_latency_stats_clear_bucket(struct dp_tx_latency *tx_latency,
int idx, enum cdp_tx_latency_type type)
{
int lvl;
struct dp_tx_latency_stats *stats;
stats = &tx_latency->stats[idx][type];
qdf_atomic_init(&stats->msdus_accum);
qdf_atomic_init(&stats->latency_accum);
for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++)
qdf_atomic_init(&stats->distribution[lvl]);
}
/**
* dp_tx_latency_stats_clear_buckets() - clear specified transmit latency
* statistics
* @tx_latency: pointer to transmit latency stats
* @idx: index of the statistics
*
* Return: None
*/
static void
dp_tx_latency_stats_clear_buckets(struct dp_tx_latency *tx_latency,
int idx)
{
int type;
for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++)
dp_tx_latency_stats_clear_bucket(tx_latency, idx, type);
}
/**
* dp_tx_latency_stats_update_cca() - update transmit latency statistics for
* CCA
* @soc: dp soc handle
* @peer_id: peer id
* @granularity: granularity of distribution
* @distribution: distribution of transmit latency statistics
* @avg: average of CCA latency(in microseconds) within a cycle
*
* Return: None
*/
void
dp_tx_latency_stats_update_cca(struct dp_soc *soc, uint16_t peer_id,
uint32_t granularity, uint32_t *distribution,
uint32_t avg)
{
int lvl, idx;
uint8_t link_id;
struct dp_tx_latency *tx_latency;
struct dp_tx_latency_stats *stats;
int32_t cur_granularity;
struct dp_vdev *vdev;
struct dp_tx_latency_config *cfg;
struct dp_txrx_peer *txrx_peer;
struct dp_peer *peer;
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
if (!peer) {
dp_err_rl("Peer not found peer id %d", peer_id);
return;
}
if (IS_MLO_DP_MLD_PEER(peer))
goto out;
vdev = peer->vdev;
if (!dp_tx_latency_stats_enabled(vdev))
goto out;
cfg = &vdev->tx_latency_cfg;
cur_granularity =
qdf_atomic_read(&cfg->granularity[CDP_TX_LATENCY_TYPE_CCA]);
/* in unit of ms */
cur_granularity /= 1000;
if (cur_granularity != granularity) {
dp_info_rl("invalid granularity, cur %d report %d",
cur_granularity, granularity);
goto out;
}
txrx_peer = dp_get_txrx_peer(peer);
if (qdf_unlikely(!txrx_peer)) {
dp_err_rl("txrx_peer NULL for MAC: " QDF_MAC_ADDR_FMT,
QDF_MAC_ADDR_REF(peer->mac_addr.raw));
goto out;
}
link_id = dp_get_peer_link_id(peer);
if (link_id >= txrx_peer->stats_arr_size)
goto out;
tx_latency = &txrx_peer->stats[link_id].tx_latency;
idx = tx_latency->cur_idx;
stats = &tx_latency->stats[idx][CDP_TX_LATENCY_TYPE_CCA];
qdf_atomic_set(&stats->latency_accum, avg);
qdf_atomic_set(&stats->msdus_accum, (avg ? 1 : 0));
for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++)
qdf_atomic_set(&stats->distribution[lvl],
distribution[lvl]);
/* prepare for the next cycle */
tx_latency->cur_idx = 1 - idx;
dp_tx_latency_stats_clear_buckets(tx_latency, tx_latency->cur_idx);
out:
dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
}
/**
* dp_tx_latency_stats_get_per_peer() - get transmit latency statistics for a
* peer
* @soc: dp soc handle
* @peer: dp peer Handle
* @latency: buffer to hold transmit latency statistics
*
* Return: QDF_STATUS
*/
static QDF_STATUS
dp_tx_latency_stats_get_per_peer(struct dp_soc *soc, struct dp_peer *peer,
struct cdp_tx_latency *latency)
{
int lvl, type, link_id;
int32_t latency_accum, msdus_accum;
struct dp_vdev *vdev;
struct dp_txrx_peer *txrx_peer;
struct dp_tx_latency *tx_latency;
struct dp_tx_latency_config *cfg;
struct dp_tx_latency_stats *stats;
uint8_t last_idx;
if (unlikely(!latency))
return QDF_STATUS_E_INVAL;
/* Authenticated link/legacy peer only */
if (IS_MLO_DP_MLD_PEER(peer) || peer->state != OL_TXRX_PEER_STATE_AUTH)
return QDF_STATUS_E_INVAL;
vdev = peer->vdev;
if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
return QDF_STATUS_E_INVAL;
txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer)
return QDF_STATUS_E_INVAL;
link_id = dp_get_peer_link_id(peer);
if (link_id >= txrx_peer->stats_arr_size)
return QDF_STATUS_E_INVAL;
tx_latency = &txrx_peer->stats[link_id].tx_latency;
qdf_mem_zero(latency, sizeof(*latency));
qdf_mem_copy(latency->mac_remote.bytes,
peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
last_idx = 1 - tx_latency->cur_idx;
cfg = &vdev->tx_latency_cfg;
for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++) {
latency->stats[type].granularity =
qdf_atomic_read(&cfg->granularity[type]);
stats = &tx_latency->stats[last_idx][type];
msdus_accum = qdf_atomic_read(&stats->msdus_accum);
if (!msdus_accum)
continue;
latency_accum = qdf_atomic_read(&stats->latency_accum);
latency->stats[type].average = latency_accum / msdus_accum;
for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++) {
latency->stats[type].distribution[lvl] =
qdf_atomic_read(&stats->distribution[lvl]);
}
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_latency_stats_get_peer_iter() - iterator to get transmit latency
* statistics for specified peer
* @soc: dp soc handle
* @peer: dp peer Handle
* @arg: list to hold transmit latency statistics for peers
*
* Return: None
*/
static void
dp_tx_latency_stats_get_peer_iter(struct dp_soc *soc,
struct dp_peer *peer,
void *arg)
{
struct dp_vdev *vdev;
struct dp_txrx_peer *txrx_peer;
struct cdp_tx_latency *latency;
QDF_STATUS status;
qdf_list_t *stats_list = (qdf_list_t *)arg;
/* Authenticated link/legacy peer only */
if (IS_MLO_DP_MLD_PEER(peer) || peer->state != OL_TXRX_PEER_STATE_AUTH)
return;
txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer)
return;
vdev = peer->vdev;
latency = qdf_mem_malloc(sizeof(*latency));
if (!latency)
return;
status = dp_tx_latency_stats_get_per_peer(soc, peer, latency);
if (QDF_IS_STATUS_ERROR(status))
goto out;
status = qdf_list_insert_back(stats_list, &latency->node);
if (QDF_IS_STATUS_ERROR(status))
goto out;
return;
out:
qdf_mem_free(latency);
}
/**
* dp_tx_latency_stats_rpt_per_vdev() - report transmit latency statistics for
* specified vdev
* @soc: dp soc handle
* @vdev: dp vdev Handle
*
* Return: None
*/
static void
dp_tx_latency_stats_rpt_per_vdev(struct dp_soc *soc, struct dp_vdev *vdev)
{
qdf_list_t stats_list;
struct cdp_tx_latency *entry, *next;
if (!soc->tx_latency_cb || !dp_tx_latency_stats_report_enabled(vdev))
return;
qdf_list_create(&stats_list, 0);
dp_vdev_iterate_peer(vdev, dp_tx_latency_stats_get_peer_iter,
&stats_list, DP_MOD_ID_CDP);
if (qdf_list_empty(&stats_list))
goto out;
soc->tx_latency_cb(vdev->vdev_id, &stats_list);
qdf_list_for_each_del(&stats_list, entry, next, node) {
qdf_list_remove_node(&stats_list, &entry->node);
qdf_mem_free(entry);
}
out:
qdf_list_destroy(&stats_list);
}
/**
* dp_tx_latency_stats_report() - report transmit latency statistics for each
* vdev of specified pdev
* @soc: dp soc handle
* @pdev: dp pdev Handle
*
* Return: None
*/
void dp_tx_latency_stats_report(struct dp_soc *soc, struct dp_pdev *pdev)
{
struct dp_vdev *vdev;
if (!soc->tx_latency_cb)
return;
qdf_spin_lock_bh(&pdev->vdev_list_lock);
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
dp_tx_latency_stats_rpt_per_vdev(soc, vdev);
}
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
}
/**
* dp_tx_latency_stats_clear_per_peer() - iterator to clear transmit latency
* statistics for specified peer
* @soc: dp soc handle
* @peer: dp pdev Handle
* @arg: argument from iterator
*
* Return: None
*/
static void
dp_tx_latency_stats_clear_per_peer(struct dp_soc *soc, struct dp_peer *peer,
void *arg)
{
int link_id;
struct dp_tx_latency *tx_latency;
struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer) {
dp_err("no txrx peer, skip");
return;
}
for (link_id = 0; link_id < txrx_peer->stats_arr_size; link_id++) {
tx_latency = &txrx_peer->stats[link_id].tx_latency;
dp_tx_latency_stats_clear_buckets(tx_latency, 0);
dp_tx_latency_stats_clear_buckets(tx_latency, 1);
}
}
/**
* dp_tx_latency_stats_clear_per_vdev() - clear transmit latency statistics
* for specified vdev
* @vdev: dp vdev handle
*
* Return: None
*/
static inline void dp_tx_latency_stats_clear_per_vdev(struct dp_vdev *vdev)
{
dp_vdev_iterate_peer(vdev, dp_tx_latency_stats_clear_per_peer,
NULL, DP_MOD_ID_CDP);
}
/**
* dp_tx_latency_stats_fetch() - fetch transmit latency statistics for
* specified link mac address
* @soc_hdl: Handle to struct dp_soc
* @vdev_id: vdev id
* @mac: link mac address of remote peer
* @latency: buffer to hold per-link transmit latency statistics
*
* Return: QDF_STATUS
*/
QDF_STATUS
dp_tx_latency_stats_fetch(struct cdp_soc_t *soc_hdl,
uint8_t vdev_id, uint8_t *mac,
struct cdp_tx_latency *latency)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct cdp_peer_info peer_info = {0};
struct dp_peer *peer;
QDF_STATUS status;
/* MAC addr of link peer may be the same as MLD peer,
* so specify the type as CDP_LINK_PEER_TYPE here to
* get link peer explicitly.
*/
DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, mac, false,
CDP_LINK_PEER_TYPE);
peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
if (!peer) {
dp_err_rl("peer(vdev id %d mac " QDF_MAC_ADDR_FMT ") not found",
vdev_id, QDF_MAC_ADDR_REF(mac));
return QDF_STATUS_E_INVAL;
}
status = dp_tx_latency_stats_get_per_peer(soc, peer, latency);
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return status;
}
/**
* dp_tx_latency_stats_config() - config transmit latency statistics for
* specified vdev
* @soc_hdl: Handle to struct dp_soc
* @vdev_id: vdev id
* @cfg: configuration for transmit latency statistics
*
* Return: QDF_STATUS
*/
QDF_STATUS
dp_tx_latency_stats_config(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
struct cdp_tx_latency_config *cfg)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev;
QDF_STATUS status = QDF_STATUS_E_INVAL;
uint32_t cca_granularity;
int type;
vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
if (!vdev) {
dp_err_rl("vdev %d does not exist", vdev_id);
return QDF_STATUS_E_FAILURE;
}
/* disable to ignore upcoming updates */
qdf_atomic_set(&vdev->tx_latency_cfg.enabled, 0);
dp_tx_latency_stats_clear_per_vdev(vdev);
if (!cfg->enable)
goto send_htt;
qdf_atomic_set(&vdev->tx_latency_cfg.report, (cfg->report ? 1 : 0));
for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++)
qdf_atomic_set(&vdev->tx_latency_cfg.granularity[type],
cfg->granularity[type]);
send_htt:
/* in units of ms */
cca_granularity = cfg->granularity[CDP_TX_LATENCY_TYPE_CCA] / 1000;
status = dp_h2t_tx_latency_stats_cfg_msg_send(soc, vdev_id,
cfg->enable, cfg->period,
cca_granularity);
if (QDF_IS_STATUS_ERROR(status)) {
dp_err_rl("failed to send htt msg: %d", status);
goto out;
}
qdf_atomic_set(&vdev->tx_latency_cfg.enabled, (cfg->enable ? 1 : 0));
out:
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
return status;
}
/**
* dp_tx_latency_stats_register_cb() - register transmit latency statistics
* callback
* @handle: Handle to struct dp_soc
* @cb: callback function for transmit latency statistics
*
* Return: QDF_STATUS
*/
QDF_STATUS
dp_tx_latency_stats_register_cb(struct cdp_soc_t *handle, cdp_tx_latency_cb cb)
{
struct dp_soc *soc = (struct dp_soc *)handle;
if (!soc || !cb) {
dp_err("soc or cb is NULL");
return QDF_STATUS_E_INVAL;
}
soc->tx_latency_cb = cb;
return QDF_STATUS_SUCCESS;
}
#else
static inline void
dp_tx_get_driver_ingress_ts(struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info,
qdf_nbuf_t nbuf)
{
}
static inline void
dp_tx_update_ts_on_enqueued(struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info,
struct dp_tx_desc_s *tx_desc)
{
}
static inline void
dp_tx_latency_stats_update(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer,
struct dp_tx_desc_s *tx_desc,
struct hal_tx_completion_status *ts,
uint8_t link_id)
{
}
#endif
qdf_nbuf_t
dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
@@ -2450,6 +3075,8 @@ dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
goto release_desc;
}
dp_tx_update_ts_on_enqueued(vdev, msdu_info, tx_desc);
tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
return NULL;
@@ -2811,6 +3438,8 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
goto done;
}
dp_tx_update_ts_on_enqueued(vdev, msdu_info, tx_desc);
/*
* TODO
* if tso_info structure can be modified to have curr_seg
@@ -3655,6 +4284,8 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
if (qdf_unlikely(!vdev))
return nbuf;
dp_tx_get_driver_ingress_ts(vdev, &msdu_info, nbuf);
dp_vdev_tx_mark_to_fw(nbuf, vdev);
/*
@@ -5419,6 +6050,7 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
ts, ts->tid);
dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
dp_tx_latency_stats_update(soc, txrx_peer, tx_desc, ts, link_id);
#ifdef QCA_SUPPORT_RDK_STATS
if (soc->peerstats_enabled)

查看文件

@@ -210,6 +210,7 @@ struct dp_tx_queue {
* @skip_hp_update : Skip HP update for TSO segments and update in last segment
* @buf_len:
* @payload_addr:
* @driver_ingress_ts: driver ingress timestamp
*
* This structure holds the complete MSDU information needed to program the
* Hardware TCL and MSDU extension descriptors for different frame types
@@ -241,6 +242,9 @@ struct dp_tx_msdu_info_s {
uint16_t buf_len;
uint8_t *payload_addr;
#endif
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
qdf_ktime_t driver_ingress_ts;
#endif
};
#ifndef QCA_HOST_MODE_WIFI_DISABLED
@@ -2150,4 +2154,44 @@ dp_tx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
{
}
#endif
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
/**
* dp_tx_latency_stats_fetch() - fetch transmit latency statistics for
* specified link mac address
* @soc_hdl: Handle to struct dp_soc
* @vdev_id: vdev id
* @mac: link mac address of remote peer
* @latency: buffer to hold per-link transmit latency statistics
*
* Return: QDF_STATUS
*/
QDF_STATUS
dp_tx_latency_stats_fetch(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
uint8_t *mac, struct cdp_tx_latency *latency);
/**
* dp_tx_latency_stats_config() - config transmit latency statistics for
* specified vdev
* @soc_hdl: Handle to struct dp_soc
* @vdev_id: vdev id
* @cfg: configuration for transmit latency statistics
*
* Return: QDF_STATUS
*/
QDF_STATUS
dp_tx_latency_stats_config(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
struct cdp_tx_latency_config *cfg);
/**
* dp_tx_latency_stats_register_cb() - register transmit latency statistics
* callback
* @handle: Handle to struct dp_soc
* @cb: callback function for transmit latency statistics
*
* Return: QDF_STATUS
*/
QDF_STATUS dp_tx_latency_stats_register_cb(struct cdp_soc_t *handle,
cdp_tx_latency_cb cb);
#endif
#endif

查看文件

@@ -659,6 +659,8 @@ struct dp_tx_ext_desc_pool_s {
* @pool_id: Pool ID - used when releasing the descriptor
* @msdu_ext_desc: MSDU extension descriptor
* @timestamp:
* @driver_egress_ts: driver egress timestamp
* @driver_ingress_ts: driver ingress timestamp
* @comp:
* @tcl_cmd_vaddr: VADDR of the TCL descriptor, valid for soft-umac arch
* @tcl_cmd_paddr: PADDR of the TCL descriptor, valid for soft-umac arch
@@ -686,6 +688,10 @@ struct dp_tx_desc_s {
uint8_t pool_id;
struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
qdf_ktime_t timestamp;
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
qdf_ktime_t driver_egress_ts;
qdf_ktime_t driver_ingress_ts;
#endif
struct hal_tx_desc_comp_s comp;
#ifdef WLAN_SOFTUMAC_SUPPORT
void *tcl_cmd_vaddr;
@@ -3178,6 +3184,11 @@ struct dp_soc {
uint64_t alloc_addr_list_idx;
uint64_t shared_qaddr_del_idx;
uint64_t write_paddr_list_idx;
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
/* callback function for tx latency stats */
cdp_tx_latency_cb tx_latency_cb;
#endif
};
#ifdef IPA_OFFLOAD
@@ -3814,6 +3825,45 @@ struct dp_peer;
#define WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED 0x2
#endif
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
/**
* struct dp_tx_latency_config - configuration for per-link transmit latency
* statistics
* @enabled: the feature is enabled or not
* @report: async report is enabled or not
* @granularity: granularity(in microseconds) of the distribution for the types
*/
struct dp_tx_latency_config {
qdf_atomic_t enabled;
qdf_atomic_t report;
qdf_atomic_t granularity[CDP_TX_LATENCY_TYPE_MAX];
};
/**
* struct dp_tx_latency_stats - transmit latency distribution for a type
* @latency_accum: accumulated latencies
* @msdus_accum: accumulated number of msdus
* @distribution: distribution of latencies
*/
struct dp_tx_latency_stats {
qdf_atomic_t latency_accum;
qdf_atomic_t msdus_accum;
qdf_atomic_t distribution[CDP_TX_LATENCY_DISTR_LV_MAX];
};
/**
* struct dp_tx_latency - transmit latency statistics for remote link peer
* @cur_idx: current row index of the 2D stats array
* @stats: two-dimensional array, to store the transmit latency statistics.
* one row is used to store the stats of the current cycle, it's indicated
* by cur_idx, the other is for the last cycle.
*/
struct dp_tx_latency {
uint8_t cur_idx;
struct dp_tx_latency_stats stats[2][CDP_TX_LATENCY_TYPE_MAX];
};
#endif
/* VDEV structure for data path state */
struct dp_vdev {
/* OS device abstraction */
@@ -4141,6 +4191,11 @@ struct dp_vdev {
/* Return buffer manager ID */
uint8_t rbm_id;
#endif
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
/* configuration for tx latency stats */
struct dp_tx_latency_config tx_latency_cfg;
#endif
};
enum {
@@ -4766,12 +4821,16 @@ struct dp_peer_extd_stats {
* struct dp_peer_stats - Peer stats
* @per_pkt_stats: Per packet path stats
* @extd_stats: Extended path stats
* @tx_latency: transmit latency stats
*/
struct dp_peer_stats {
struct dp_peer_per_pkt_stats per_pkt_stats;
#ifndef QCA_ENHANCED_STATS_SUPPORT
struct dp_peer_extd_stats extd_stats;
#endif
#ifdef WLAN_FEATURE_TX_LATENCY_STATS
struct dp_tx_latency tx_latency;
#endif
};
/**