qca-wifi: use the generic peer iterate API

Use the peer iterate APIs to iterate through peers
in vdev list at vdev, pdev, soc level

Change-Id: I983d825a780d76370ecb652e5151029df0334a26
This commit is contained in:
Chaithanya Garrepalli
2020-08-11 12:06:12 +05:30
committed by Gerrit - the friendly Code Review server
parent 1052e4d52b
commit fe5cbcd4ab
3 changed files with 104 additions and 129 deletions

View File

@@ -241,6 +241,46 @@ void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev,
pdev->tx_capture.htt_frame_type[htt_frame_type]++;
}
static void
dp_peer_print_tid_qlen(struct dp_soc *soc,
struct dp_peer *peer,
void *arg)
{
int tid;
struct dp_tx_tid *tx_tid;
uint32_t msdu_len;
uint32_t tasklet_msdu_len;
uint32_t ppdu_len;
struct tid_q_len *c_tid_q_len = (struct tid_q_len *)arg;
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
tx_tid = &peer->tx_capture.tx_tid[tid];
msdu_len = qdf_nbuf_queue_len(&tx_tid->defer_msdu_q);
tasklet_msdu_len = qdf_nbuf_queue_len(&tx_tid->msdu_comp_q);
ppdu_len = qdf_nbuf_queue_len(&tx_tid->pending_ppdu_q);
/*
* if not NULL requested for aggreated stats hence add and
* return do not print individual peer stats
*/
if (c_tid_q_len) {
c_tid_q_len->defer_msdu_len += msdu_len;
c_tid_q_len->tasklet_msdu_len += tasklet_msdu_len;
c_tid_q_len->pending_q_len += ppdu_len;
continue;
}
if (!msdu_len && !ppdu_len && !tasklet_msdu_len)
continue;
DP_PRINT_STATS(" peer_id[%d] tid[%d] msdu_comp_q[%d] defer_msdu_q[%d] pending_ppdu_q[%d]",
peer->peer_id, tid,
tasklet_msdu_len,
msdu_len, ppdu_len);
}
dp_tx_capture_print_stats(peer);
}
/*
* dp_iterate_print_tid_qlen_per_peer()- API to print peer tid msdu queue
* @pdev_handle: DP_PDEV handle
@@ -250,59 +290,21 @@ void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev,
void dp_print_tid_qlen_per_peer(void *pdev_hdl, uint8_t consolidated)
{
struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
struct dp_vdev *vdev = NULL;
struct dp_peer *peer = NULL;
uint64_t c_defer_msdu_len = 0;
uint64_t c_tasklet_msdu_len = 0;
uint64_t c_pending_q_len = 0;
DP_PRINT_STATS("pending peer msdu and ppdu:");
qdf_spin_lock_bh(&pdev->vdev_list_lock);
if (consolidated) {
struct tid_q_len c_tid_q_len = {0};
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
qdf_spin_lock_bh(&vdev->peer_list_lock);
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
int tid;
struct dp_tx_tid *tx_tid;
uint32_t msdu_len;
uint32_t tasklet_msdu_len;
uint32_t ppdu_len;
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
tx_tid = &peer->tx_capture.tx_tid[tid];
msdu_len =
qdf_nbuf_queue_len(&tx_tid->defer_msdu_q);
tasklet_msdu_len =
qdf_nbuf_queue_len(&tx_tid->msdu_comp_q);
ppdu_len =
qdf_nbuf_queue_len(&tx_tid->pending_ppdu_q);
c_defer_msdu_len += msdu_len;
c_tasklet_msdu_len += tasklet_msdu_len;
c_pending_q_len += ppdu_len;
if (consolidated)
continue;
if (!msdu_len && !ppdu_len && !tasklet_msdu_len)
continue;
DP_PRINT_STATS(" peer_id[%d] tid[%d] msdu_comp_q[%d] defer_msdu_q[%d] pending_ppdu_q[%d]",
peer->peer_id, tid,
tasklet_msdu_len,
msdu_len, ppdu_len);
}
if (!consolidated)
dp_tx_capture_print_stats(peer);
}
qdf_spin_unlock_bh(&vdev->peer_list_lock);
}
dp_pdev_iterate_peer(pdev, dp_peer_print_tid_qlen, &c_tid_q_len,
DP_MOD_ID_TX_CAPTURE);
DP_PRINT_STATS("consolidated: msdu_comp_q[%d] defer_msdu_q[%d] pending_ppdu_q[%d]",
c_tasklet_msdu_len, c_defer_msdu_len,
c_pending_q_len);
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
}
c_tid_q_len.tasklet_msdu_len, c_tid_q_len.defer_msdu_len,
c_tid_q_len.pending_q_len);
}
dp_pdev_iterate_peer(pdev, dp_peer_print_tid_qlen, NULL,
DP_MOD_ID_TX_CAPTURE);
}
static void
dp_ppdu_queue_free(qdf_nbuf_t ppdu_nbuf, uint8_t usr_idx)
@@ -613,6 +615,8 @@ void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
tx_tid->max_ppdu_id = 0;
}
peer->tx_capture.is_tid_initialized = 0;
}
/*
@@ -1509,33 +1513,15 @@ static void dp_soc_set_txrx_ring_map_single(struct dp_soc *soc)
}
}
/*
* dp_iterate_free_peer_msdu_q()- API to free msdu queue
* @pdev_handle: DP_PDEV handle
*
* Return: void
*/
static void dp_iterate_free_peer_msdu_q(void *pdev_hdl)
static void dp_peer_free_msdu_q(struct dp_soc *soc,
struct dp_peer *peer,
void *arg)
{
struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
struct dp_vdev *vdev = NULL;
struct dp_peer *peer = NULL;
qdf_spin_lock_bh(&pdev->vdev_list_lock);
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
qdf_spin_lock_bh(&vdev->peer_list_lock);
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
/* set peer tx cap enabled to 0, when feature disable */
peer->tx_cap_enabled = 0;
dp_peer_tid_queue_cleanup(peer);
}
qdf_spin_unlock_bh(&vdev->peer_list_lock);
}
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
}
/*
* dp_soc_check_enh_tx_capture() - API to get tx capture set in any pdev
* dp_soc_is_tx_capture_set_in_pdev() - API to get tx capture set in any pdev
* @soc_handle: DP_SOC handle
*
* return: true
@@ -1578,7 +1564,9 @@ dp_enh_tx_capture_disable(struct dp_pdev *pdev)
DP_PPDU_STATS_CFG_ENH_STATS,
pdev->pdev_id);
dp_iterate_free_peer_msdu_q(pdev);
dp_pdev_iterate_peer(pdev, dp_peer_free_msdu_q, NULL,
DP_MOD_ID_TX_CAPTURE);
for (i = 0; i < TXCAP_MAX_TYPE; i++) {
for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
qdf_nbuf_queue_t *retries_q;

View File

@@ -241,6 +241,11 @@ struct dbg_tx_comp_ppdu {
struct dbg_tx_comp_ppdu_user user[];
};
struct tid_q_len {
uint64_t defer_msdu_len;
uint64_t tasklet_msdu_len;
uint64_t pending_q_len;
};
/*
* dp_peer_tid_peer_id_update() update peer_id to tid structure
* @peer: Datapath peer

View File

@@ -40,14 +40,46 @@
#define DP_PEER_AST3_FLOW_MASK 0x2
#define DP_MAX_AST_INDEX_PER_PEER 4
static void
dp_peer_age_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
{
struct dp_ast_entry *ase, *temp_ase;
bool check_wds_ase = *(bool *)arg;
DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
/*
* Do not expire static ast entries and HM WDS entries
*/
if (ase->type != CDP_TXRX_AST_TYPE_WDS &&
ase->type != CDP_TXRX_AST_TYPE_MEC &&
ase->type != CDP_TXRX_AST_TYPE_DA)
continue;
/* Expire MEC entry every n sec. This needs to be expired in
* case if STA backbone is made as AP backbone, In this case
* it needs to be re-added as a WDS entry.
*/
if (ase->is_active && ase->type == CDP_TXRX_AST_TYPE_MEC) {
ase->is_active = FALSE;
continue;
} else if (ase->is_active && check_wds_ase) {
ase->is_active = FALSE;
continue;
}
if (ase->type == CDP_TXRX_AST_TYPE_MEC) {
DP_STATS_INC(soc, ast.aged_out, 1);
dp_peer_del_ast(soc, ase);
} else if (check_wds_ase) {
DP_STATS_INC(soc, ast.aged_out, 1);
dp_peer_del_ast(soc, ase);
}
}
}
static void dp_ast_aging_timer_fn(void *soc_hdl)
{
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
struct dp_pdev *pdev;
struct dp_vdev *vdev;
struct dp_peer *peer;
struct dp_ast_entry *ase, *temp_ase;
int i;
bool check_wds_ase = false;
if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
@@ -58,58 +90,8 @@ static void dp_ast_aging_timer_fn(void *soc_hdl)
/* AST list access lock */
qdf_spin_lock_bh(&soc->ast_lock);
for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
pdev = soc->pdev_list[i];
qdf_spin_lock_bh(&pdev->vdev_list_lock);
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
qdf_spin_lock_bh(&vdev->peer_list_lock);
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
/*
* Do not expire static ast entries
* and HM WDS entries
*/
if (ase->type !=
CDP_TXRX_AST_TYPE_WDS &&
ase->type !=
CDP_TXRX_AST_TYPE_MEC &&
ase->type !=
CDP_TXRX_AST_TYPE_DA)
continue;
/* Expire MEC entry every n sec.
* This needs to be expired in
* case if STA backbone is made as
* AP backbone, In this case it needs
* to be re-added as a WDS entry.
*/
if (ase->is_active && ase->type ==
CDP_TXRX_AST_TYPE_MEC) {
ase->is_active = FALSE;
continue;
} else if (ase->is_active &&
check_wds_ase) {
ase->is_active = FALSE;
continue;
}
if (ase->type ==
CDP_TXRX_AST_TYPE_MEC) {
DP_STATS_INC(soc,
ast.aged_out, 1);
dp_peer_del_ast(soc, ase);
} else if (check_wds_ase) {
DP_STATS_INC(soc,
ast.aged_out, 1);
dp_peer_del_ast(soc, ase);
}
}
}
qdf_spin_unlock_bh(&vdev->peer_list_lock);
}
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
}
dp_soc_iterate_peer(soc, dp_peer_age_ast_entries, (void *)&check_wds_ase,
DP_MOD_ID_AST);
qdf_spin_unlock_bh(&soc->ast_lock);
if (qdf_atomic_read(&soc->cmn_init_done))