qcacmn: Changes for delay and jitter stats
Delay and jitter stats are placed in struct dp_txrx_peer. Make changes accordingly for allocation, fetching and clearing operations of these stats. Change-Id: Ic9b9e48467741072806000f4d1b3aa8e5c65d508 CRs-Fixed: 3092123
This commit is contained in:

committed by
Madan Koyyalamudi

parent
eee813ad02
commit
27e9e64532
@@ -6813,11 +6813,13 @@ void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
|
||||
static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
|
||||
{
|
||||
struct dp_txrx_peer *txrx_peer;
|
||||
struct dp_pdev *pdev;
|
||||
|
||||
/* dp_txrx_peer exists for mld peer and legacy peer */
|
||||
if (peer->txrx_peer) {
|
||||
txrx_peer = peer->txrx_peer;
|
||||
peer->txrx_peer = NULL;
|
||||
pdev = txrx_peer->vdev->pdev;
|
||||
|
||||
dp_peer_defrag_rx_tids_deinit(txrx_peer);
|
||||
/*
|
||||
@@ -6825,6 +6827,7 @@ static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
|
||||
*/
|
||||
dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
|
||||
dp_peer_rx_bufq_resources_deinit(txrx_peer);
|
||||
dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
|
||||
|
||||
qdf_mem_free(txrx_peer);
|
||||
}
|
||||
@@ -6835,6 +6838,7 @@ static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
|
||||
static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
|
||||
{
|
||||
struct dp_txrx_peer *txrx_peer;
|
||||
struct dp_pdev *pdev;
|
||||
|
||||
txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
|
||||
|
||||
@@ -6844,6 +6848,7 @@ static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
|
||||
txrx_peer->peer_id = HTT_INVALID_PEER;
|
||||
/* initialize the peer_id */
|
||||
txrx_peer->vdev = peer->vdev;
|
||||
pdev = peer->vdev->pdev;
|
||||
|
||||
DP_STATS_INIT(txrx_peer);
|
||||
|
||||
@@ -6857,7 +6862,16 @@ static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
|
||||
*/
|
||||
if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
|
||||
QDF_STATUS_SUCCESS)
|
||||
dp_warn("peer ext_stats ctx alloc failed");
|
||||
dp_warn("peer delay_stats ctx alloc failed");
|
||||
|
||||
/*
|
||||
* Alloctate memory for jitter stats. Fall through in
|
||||
* case of failure as its not an implicit requirement to have
|
||||
* this object for regular statistics updates.
|
||||
*/
|
||||
if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
|
||||
QDF_STATUS_SUCCESS)
|
||||
dp_warn("peer jitter_stats ctx alloc failed");
|
||||
|
||||
dp_set_peer_isolation(txrx_peer, false);
|
||||
|
||||
@@ -6880,6 +6894,8 @@ void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
|
||||
txrx_peer->to_stack.bytes = 0;
|
||||
|
||||
DP_STATS_CLR(txrx_peer);
|
||||
dp_peer_delay_stats_ctx_clr(txrx_peer);
|
||||
dp_peer_jitter_stats_ctx_clr(txrx_peer);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -8815,23 +8831,6 @@ void dp_print_napi_stats(struct dp_soc *soc)
|
||||
hif_print_napi_stats(soc->hif_handle);
|
||||
}
|
||||
|
||||
#ifdef QCA_PEER_EXT_STATS
|
||||
/**
|
||||
* dp_txrx_host_peer_delay_stats_clr: Reinitialize the txrx peer delay stats
|
||||
*
|
||||
*/
|
||||
static inline void dp_txrx_host_peer_delay_stats_clr(struct dp_peer *peer)
|
||||
{
|
||||
if (peer->txrx_peer->delay_stats)
|
||||
qdf_mem_zero(peer->txrx_peer->delay_stats,
|
||||
sizeof(struct dp_peer_delay_stats));
|
||||
}
|
||||
#else
|
||||
static inline void dp_txrx_host_peer_delay_stats_clr(struct dp_peer *peer)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
|
||||
* @soc: Datapath soc
|
||||
|
@@ -4716,6 +4716,95 @@ void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
|
||||
qdf_mem_free(txrx_peer->delay_stats);
|
||||
txrx_peer->delay_stats = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
|
||||
*
|
||||
* @txrx_peer: dp_txrx_peer handle
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
if (txrx_peer->delay_stats)
|
||||
qdf_mem_zero(txrx_peer->delay_stats,
|
||||
sizeof(struct dp_peer_delay_stats));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_PEER_JITTER
|
||||
/**
|
||||
* dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
|
||||
*
|
||||
* @soc: Datapath pdev handle
|
||||
* @txrx_peer: dp_txrx_peer handle
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
|
||||
struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
if (!pdev || !txrx_peer) {
|
||||
dp_warn("Null pdev or peer");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate memory for jitter stats only when
|
||||
* operating in offload enabled mode.
|
||||
*/
|
||||
if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
txrx_peer->jitter_stats =
|
||||
qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats) * DP_MAX_TIDS);
|
||||
if (!txrx_peer->jitter_stats) {
|
||||
dp_warn("Jitter stats obj alloc failed!!");
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
|
||||
*
|
||||
* @pdev: Datapath pdev handle
|
||||
* @txrx_peer: dp_txrx_peer handle
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
|
||||
struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
if (!pdev || !txrx_peer) {
|
||||
dp_warn("Null pdev or peer");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check for offload mode */
|
||||
if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
|
||||
return;
|
||||
|
||||
if (txrx_peer->jitter_stats) {
|
||||
qdf_mem_free(txrx_peer->jitter_stats);
|
||||
txrx_peer->jitter_stats = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
|
||||
*
|
||||
* @txrx_peer: dp_txrx_peer handle
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
if (txrx_peer->jitter_stats)
|
||||
qdf_mem_zero(txrx_peer->jitter_stats,
|
||||
sizeof(struct cdp_peer_tid_stats) * DP_MAX_TIDS);
|
||||
}
|
||||
#endif
|
||||
|
||||
QDF_STATUS
|
||||
|
@@ -890,6 +890,7 @@ QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
|
||||
struct dp_txrx_peer *txrx_peer);
|
||||
void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
|
||||
struct dp_txrx_peer *txrx_peer);
|
||||
void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
|
||||
#else
|
||||
static inline
|
||||
QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
|
||||
@@ -903,6 +904,39 @@ void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
|
||||
struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_PEER_JITTER
|
||||
QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
|
||||
struct dp_txrx_peer *txrx_peer);
|
||||
|
||||
void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
|
||||
struct dp_txrx_peer *txrx_peer);
|
||||
|
||||
void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
|
||||
#else
|
||||
static inline
|
||||
QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
|
||||
struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
|
||||
struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
|
||||
|
@@ -5629,12 +5629,16 @@ static void dp_print_jitter_stats(struct dp_peer *peer, struct dp_pdev *pdev)
|
||||
{
|
||||
uint8_t tid = 0;
|
||||
|
||||
if (pdev && !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
|
||||
if (!pdev || !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
|
||||
return;
|
||||
|
||||
if (!peer->txrx_peer || !peer->txrx_peer->jitter_stats)
|
||||
return;
|
||||
|
||||
DP_PRINT_STATS("Per TID Tx HW Enqueue-Comp Jitter Stats:\n");
|
||||
for (tid = 0; tid < qdf_min(CDP_DATA_TID_MAX, DP_MAX_TIDS); tid++) {
|
||||
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
|
||||
struct cdp_peer_tid_stats *rx_tid =
|
||||
&peer->txrx_peer->jitter_stats[tid];
|
||||
|
||||
DP_PRINT_STATS("Node tid = %d\n"
|
||||
"Average Jiiter : %u (us)\n"
|
||||
@@ -5642,12 +5646,12 @@ static void dp_print_jitter_stats(struct dp_peer *peer, struct dp_pdev *pdev)
|
||||
"Total Average error count : %llu\n"
|
||||
"Total Success Count : %llu\n"
|
||||
"Total Drop : %llu\n",
|
||||
rx_tid->tid,
|
||||
rx_tid->stats.tx_avg_jitter,
|
||||
rx_tid->stats.tx_avg_delay,
|
||||
rx_tid->stats.tx_avg_err,
|
||||
rx_tid->stats.tx_total_success,
|
||||
rx_tid->stats.tx_drop);
|
||||
tid,
|
||||
rx_tid->tx_avg_jitter,
|
||||
rx_tid->tx_avg_delay,
|
||||
rx_tid->tx_avg_err,
|
||||
rx_tid->tx_total_success,
|
||||
rx_tid->tx_drop);
|
||||
}
|
||||
}
|
||||
#else
|
||||
@@ -5774,6 +5778,9 @@ void dp_peer_print_tx_delay_stats(struct dp_pdev *pdev,
|
||||
struct cdp_hist_stats hist_stats;
|
||||
uint8_t tid;
|
||||
|
||||
if (!peer || !peer->txrx_peer)
|
||||
return;
|
||||
|
||||
if (!pdev || !pdev->soc)
|
||||
return;
|
||||
|
||||
@@ -5818,6 +5825,9 @@ void dp_peer_print_rx_delay_stats(struct dp_pdev *pdev,
|
||||
struct cdp_hist_stats hist_stats;
|
||||
uint8_t tid;
|
||||
|
||||
if (!peer || !peer->txrx_peer)
|
||||
return;
|
||||
|
||||
if (!pdev || !pdev->soc)
|
||||
return;
|
||||
|
||||
@@ -7760,7 +7770,7 @@ dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
|
||||
struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
|
||||
DP_MOD_ID_CDP);
|
||||
struct cdp_peer_ext_stats *pext_stats;
|
||||
struct dp_peer_delay_stats *pext_stats;
|
||||
struct cdp_delay_rx_stats *rx_delay;
|
||||
struct cdp_delay_tx_stats *tx_delay;
|
||||
uint8_t tid;
|
||||
@@ -7773,7 +7783,12 @@ dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
pext_stats = peer->pext_stats;
|
||||
if (!peer->txrx_peer) {
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
pext_stats = peer->txrx_peer->delay_stats;
|
||||
if (!pext_stats) {
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
@@ -7781,14 +7796,14 @@ dp_txrx_get_peer_delay_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
|
||||
for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
|
||||
rx_delay = &delay_stats[tid].rx_delay;
|
||||
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats,
|
||||
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_tid_stats,
|
||||
&rx_delay->to_stack_delay, tid,
|
||||
CDP_HIST_TYPE_REAP_STACK);
|
||||
tx_delay = &delay_stats[tid].tx_delay;
|
||||
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats,
|
||||
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_tid_stats,
|
||||
&tx_delay->tx_swq_delay, tid,
|
||||
CDP_HIST_TYPE_SW_ENQEUE_DELAY);
|
||||
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats,
|
||||
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_tid_stats,
|
||||
&tx_delay->hwtx_delay, tid,
|
||||
CDP_HIST_TYPE_HW_COMP_DELAY);
|
||||
}
|
||||
@@ -7827,15 +7842,20 @@ dp_txrx_get_peer_jitter_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
if (!peer)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
for (tid = 0; tid < qdf_min(CDP_DATA_TID_MAX, DP_MAX_TIDS); tid++) {
|
||||
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
|
||||
if (!peer->txrx_peer || !peer->txrx_peer->jitter_stats) {
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
tid_stats[tid].tx_avg_jitter = rx_tid->stats.tx_avg_jitter;
|
||||
tid_stats[tid].tx_avg_delay = rx_tid->stats.tx_avg_delay;
|
||||
tid_stats[tid].tx_avg_err = rx_tid->stats.tx_avg_err;
|
||||
tid_stats[tid].tx_total_success =
|
||||
rx_tid->stats.tx_total_success;
|
||||
tid_stats[tid].tx_drop = rx_tid->stats.tx_drop;
|
||||
for (tid = 0; tid < qdf_min(CDP_DATA_TID_MAX, DP_MAX_TIDS); tid++) {
|
||||
struct cdp_peer_tid_stats *rx_tid =
|
||||
&peer->txrx_peer->jitter_stats[tid];
|
||||
|
||||
tid_stats[tid].tx_avg_jitter = rx_tid->tx_avg_jitter;
|
||||
tid_stats[tid].tx_avg_delay = rx_tid->tx_avg_delay;
|
||||
tid_stats[tid].tx_avg_err = rx_tid->tx_avg_err;
|
||||
tid_stats[tid].tx_total_success = rx_tid->tx_total_success;
|
||||
tid_stats[tid].tx_drop = rx_tid->tx_drop;
|
||||
}
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
||||
|
||||
|
Reference in New Issue
Block a user