qcacmn: add APIs to iterate the peers at vdev level
Add new APIs to iterate through the vdev peer list by taking the peer reference. A call back has to be passed to these APIs which will be called for every peer in the lisst dp_vdev_iterate_peer() -> iterates through peer_list of vdev dp_pdev_iterate_peer() -> iterates through peer_list of all vdevs in pdev dp_soc_iterate_peer() -> iterates through peer_lists of all vdevs in soc Additonal APIs are added to for itearations where the callback will be called outside the vdev->peer_list_lock, as these APIs have additional memory allocations suggested to use only in case the iteration need to happen outside lock dp_vdev_iterate_peer_lock_safe() dp_pdev_iterate_peer_lock_safe() dp_soc_iterate_peer_lock_safe Change-Id: I24632fe611355cc3e93b7f16d90913d4b8686ca9
This commit is contained in:

committed by
snandini

parent
1322dc7949
commit
e2b00339f3
@@ -703,9 +703,11 @@ static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
|
||||
|
||||
}
|
||||
|
||||
static inline void dp_update_vdev_stats(struct cdp_vdev_stats *tgtobj,
|
||||
struct dp_peer *srcobj)
|
||||
static inline void dp_update_vdev_stats(struct dp_soc *soc,
|
||||
struct dp_peer *srcobj,
|
||||
void *arg)
|
||||
{
|
||||
struct cdp_vdev_stats *tgtobj = (struct cdp_vdev_stats *)arg;
|
||||
uint8_t i;
|
||||
uint8_t pream_type;
|
||||
|
||||
@@ -975,7 +977,6 @@ extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
|
||||
void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer);
|
||||
void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
|
||||
void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
|
||||
void dp_peer_unref_delete(struct dp_peer *peer, enum dp_peer_mod_id id);
|
||||
extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
|
||||
uint8_t *peer_mac_addr,
|
||||
int mac_addr_is_aligned,
|
||||
|
@@ -697,6 +697,27 @@ static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
|
||||
* @soc_handle: Datapath SOC handle
|
||||
* @peer: DP peer
|
||||
* @arg: callback argument
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static void
|
||||
dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
|
||||
{
|
||||
struct dp_ast_entry *ast_entry = NULL;
|
||||
struct dp_ast_entry *tmp_ast_entry;
|
||||
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
|
||||
if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
|
||||
(ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
|
||||
dp_peer_del_ast(soc, ast_entry);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
|
||||
* @soc_handle: Datapath SOC handle
|
||||
@@ -712,7 +733,6 @@ static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
|
||||
struct dp_ast_entry *ast_entry = NULL;
|
||||
struct dp_ast_entry *tmp_ast_entry;
|
||||
struct dp_peer *peer;
|
||||
struct dp_pdev *pdev;
|
||||
struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
|
||||
@@ -731,11 +751,7 @@ static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
|
||||
if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
|
||||
(ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
|
||||
dp_peer_del_ast(soc, ast_entry);
|
||||
}
|
||||
dp_peer_reset_ast_entries(soc, peer, NULL);
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
||||
|
||||
@@ -759,6 +775,7 @@ static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
|
||||
/*
|
||||
* dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
|
||||
* @soc: Datapath SOC handle
|
||||
* @vdev_id: id of vdev object
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
@@ -767,38 +784,42 @@ dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id)
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *) soc_hdl;
|
||||
struct dp_pdev *pdev;
|
||||
struct dp_vdev *vdev;
|
||||
struct dp_peer *peer;
|
||||
struct dp_ast_entry *ase, *temp_ase;
|
||||
int i;
|
||||
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
|
||||
for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
|
||||
pdev = soc->pdev_list[i];
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
|
||||
if ((ase->type ==
|
||||
CDP_TXRX_AST_TYPE_WDS_HM) ||
|
||||
(ase->type ==
|
||||
CDP_TXRX_AST_TYPE_WDS_HM_SEC))
|
||||
dp_peer_del_ast(soc, ase);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
}
|
||||
|
||||
dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
|
||||
DP_MOD_ID_CDP);
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
|
||||
* @soc: Datapath SOC
|
||||
* @peer: Datapath peer
|
||||
* @arg: arg to callback
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static void
|
||||
dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
|
||||
{
|
||||
struct dp_ast_entry *ase = NULL;
|
||||
struct dp_ast_entry *temp_ase;
|
||||
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
|
||||
if ((ase->type ==
|
||||
CDP_TXRX_AST_TYPE_STATIC) ||
|
||||
(ase->type ==
|
||||
CDP_TXRX_AST_TYPE_SELF) ||
|
||||
(ase->type ==
|
||||
CDP_TXRX_AST_TYPE_STA_BSS))
|
||||
continue;
|
||||
dp_peer_del_ast(soc, ase);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
|
||||
* @soc: Datapath SOC handle
|
||||
@@ -808,35 +829,11 @@ dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl,
|
||||
static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl)
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *) soc_hdl;
|
||||
struct dp_pdev *pdev;
|
||||
struct dp_vdev *vdev;
|
||||
struct dp_peer *peer;
|
||||
struct dp_ast_entry *ase, *temp_ase;
|
||||
int i;
|
||||
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
|
||||
for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
|
||||
pdev = soc->pdev_list[i];
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
|
||||
if ((ase->type ==
|
||||
CDP_TXRX_AST_TYPE_STATIC) ||
|
||||
(ase->type ==
|
||||
CDP_TXRX_AST_TYPE_SELF) ||
|
||||
(ase->type ==
|
||||
CDP_TXRX_AST_TYPE_STA_BSS))
|
||||
continue;
|
||||
dp_peer_del_ast(soc, ase);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
}
|
||||
dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
|
||||
DP_MOD_ID_CDP);
|
||||
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
}
|
||||
@@ -1218,42 +1215,24 @@ static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
|
||||
ring_params->flags |= HAL_SRNG_MSI_INTR;
|
||||
}
|
||||
|
||||
#ifdef FEATURE_AST
|
||||
/**
|
||||
* dp_print_ast_stats() - Dump AST table contents
|
||||
* dp_print_peer_ast_entries() - Dump AST entries of peer
|
||||
* @soc: Datapath soc handle
|
||||
* @peer: Datapath peer
|
||||
* @arg: argument to iterate function
|
||||
*
|
||||
* return void
|
||||
*/
|
||||
#ifdef FEATURE_AST
|
||||
void dp_print_ast_stats(struct dp_soc *soc)
|
||||
static void
|
||||
dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
|
||||
{
|
||||
uint8_t i;
|
||||
uint8_t num_entries = 0;
|
||||
struct dp_vdev *vdev;
|
||||
struct dp_pdev *pdev;
|
||||
struct dp_peer *peer;
|
||||
struct dp_ast_entry *ase, *tmp_ase;
|
||||
uint32_t num_entries = 0;
|
||||
char type[CDP_TXRX_AST_TYPE_MAX][10] = {
|
||||
"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
|
||||
"DA", "HMWDS_SEC"};
|
||||
|
||||
DP_PRINT_STATS("AST Stats:");
|
||||
DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
|
||||
DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
|
||||
DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
|
||||
DP_PRINT_STATS(" Entries MAP ERR = %d", soc->stats.ast.map_err);
|
||||
DP_PRINT_STATS(" Entries Mismatch ERR = %d",
|
||||
soc->stats.ast.ast_mismatch);
|
||||
|
||||
DP_PRINT_STATS("AST Table:");
|
||||
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
|
||||
pdev = soc->pdev_list[i];
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
|
||||
DP_PRINT_STATS("%6d mac_addr = %pM"
|
||||
" peer_mac_addr = %pM"
|
||||
@@ -1277,13 +1256,33 @@ void dp_print_ast_stats(struct dp_soc *soc)
|
||||
ase->ast_hash_value,
|
||||
ase->delete_in_progress,
|
||||
ase->pdev_id,
|
||||
vdev->vdev_id);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
ase->vdev_id);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_print_ast_stats() - Dump AST table contents
|
||||
* @soc: Datapath soc handle
|
||||
*
|
||||
* return void
|
||||
*/
|
||||
void dp_print_ast_stats(struct dp_soc *soc)
|
||||
{
|
||||
DP_PRINT_STATS("AST Stats:");
|
||||
DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added);
|
||||
DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted);
|
||||
DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out);
|
||||
DP_PRINT_STATS(" Entries MAP ERR = %d", soc->stats.ast.map_err);
|
||||
DP_PRINT_STATS(" Entries Mismatch ERR = %d",
|
||||
soc->stats.ast.ast_mismatch);
|
||||
|
||||
DP_PRINT_STATS("AST Table:");
|
||||
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
|
||||
dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
|
||||
DP_MOD_ID_GENERIC_STATS);
|
||||
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
}
|
||||
#else
|
||||
@@ -1295,18 +1294,16 @@ void dp_print_ast_stats(struct dp_soc *soc)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_print_peer_table() - Dump all Peer stats
|
||||
* @vdev: Datapath Vdev handle
|
||||
* dp_print_peer_info() - Dump peer info
|
||||
* @soc: Datapath soc handle
|
||||
* @peer: Datapath peer handle
|
||||
* @arg: argument to iter function
|
||||
*
|
||||
* return void
|
||||
*/
|
||||
static void dp_print_peer_table(struct dp_vdev *vdev)
|
||||
static void
|
||||
dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
|
||||
{
|
||||
struct dp_peer *peer = NULL;
|
||||
|
||||
DP_PRINT_STATS("Dumping Peer Table Stats:");
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
|
||||
DP_PRINT_STATS(" peer_mac_addr = %pM"
|
||||
" nawds_enabled = %d"
|
||||
" bss_peer = %d"
|
||||
@@ -1321,8 +1318,19 @@ static void dp_print_peer_table(struct dp_vdev *vdev)
|
||||
peer->tx_cap_enabled,
|
||||
peer->rx_cap_enabled,
|
||||
peer->peer_id);
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_print_peer_table() - Dump all Peer stats
|
||||
* @vdev: Datapath Vdev handle
|
||||
*
|
||||
* return void
|
||||
*/
|
||||
static void dp_print_peer_table(struct dp_vdev *vdev)
|
||||
{
|
||||
DP_PRINT_STATS("Dumping Peer Table Stats:");
|
||||
dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
|
||||
DP_MOD_ID_GENERIC_STATS);
|
||||
}
|
||||
|
||||
#ifdef WLAN_DP_PER_RING_TYPE_CONFIG
|
||||
@@ -3796,25 +3804,29 @@ QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ATH_SUPPORT_EXT_STAT
|
||||
/*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
|
||||
* @soc : Datapath SOC
|
||||
* @peer : Datapath peer
|
||||
* @arg : argument to iter function
|
||||
*/
|
||||
static void
|
||||
dp_peer_cal_clients_stats_update(struct dp_soc *soc,
|
||||
struct dp_peer *peer,
|
||||
void *arg)
|
||||
{
|
||||
dp_cal_client_update_peer_stats(&peer->stats);
|
||||
}
|
||||
|
||||
/*dp_iterate_update_peer_list - update peer stats on cal client timer
|
||||
* @pdev_hdl: pdev handle
|
||||
*/
|
||||
#ifdef ATH_SUPPORT_EXT_STAT
|
||||
void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
|
||||
{
|
||||
struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
|
||||
struct dp_vdev *vdev = NULL;
|
||||
struct dp_peer *peer = NULL;
|
||||
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
|
||||
dp_cal_client_update_peer_stats(&peer->stats);
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
|
||||
DP_MOD_ID_CDP);
|
||||
}
|
||||
#else
|
||||
void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
|
||||
@@ -4982,6 +4994,7 @@ static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
|
||||
qdf_atomic_init(&vdev->ref_cnt);
|
||||
/* Take one reference for create*/
|
||||
qdf_atomic_inc(&vdev->ref_cnt);
|
||||
vdev->num_peers = 0;
|
||||
#ifdef notyet
|
||||
vdev->filters_num = 0;
|
||||
#endif
|
||||
@@ -5109,6 +5122,28 @@ static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc,
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_peer_delete() - delete DP peer
|
||||
*
|
||||
* @soc: Datatpath soc
|
||||
* @peer: Datapath peer
|
||||
* @arg: argument to iter function
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static void
|
||||
dp_peer_delete(struct dp_soc *soc,
|
||||
struct dp_peer *peer,
|
||||
void *arg)
|
||||
{
|
||||
if (!peer->valid)
|
||||
return;
|
||||
|
||||
dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
|
||||
peer->vdev->vdev_id,
|
||||
peer->mac_addr.raw, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_vdev_flush_peers() - Forcibily Flush peers of vdev
|
||||
* @vdev: Datapath VDEV handle
|
||||
@@ -5122,80 +5157,35 @@ static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
|
||||
struct dp_pdev *pdev = vdev->pdev;
|
||||
struct dp_soc *soc = pdev->soc;
|
||||
struct dp_peer *peer;
|
||||
uint16_t *peer_ids;
|
||||
struct dp_peer **peer_array = NULL;
|
||||
uint8_t i = 0, j = 0;
|
||||
uint8_t m = 0, n = 0;
|
||||
uint32_t i = 0;
|
||||
|
||||
peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(*peer_ids));
|
||||
if (!peer_ids) {
|
||||
dp_err("DP alloc failure - unable to flush peers");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!unmap_only) {
|
||||
peer_array = qdf_mem_malloc(
|
||||
soc->max_peers * sizeof(struct dp_peer *));
|
||||
if (!peer_array) {
|
||||
qdf_mem_free(peer_ids);
|
||||
dp_err("DP alloc failure - unable to flush peers");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!unmap_only)
|
||||
dp_vdev_iterate_peer(vdev, dp_peer_delete, NULL,
|
||||
DP_MOD_ID_CDP);
|
||||
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
|
||||
if (!unmap_only && n < soc->max_peers)
|
||||
peer_array[n++] = peer;
|
||||
|
||||
if (peer->peer_id != HTT_INVALID_PEER)
|
||||
if (j < soc->max_peers)
|
||||
peer_ids[j++] = peer->peer_id;
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
|
||||
/*
|
||||
* If peer id is invalid, need to flush the peer if
|
||||
* peer valid flag is true, this is needed for NAN + SSR case.
|
||||
*/
|
||||
if (!unmap_only) {
|
||||
for (m = 0; m < n ; m++) {
|
||||
peer = peer_array[m];
|
||||
|
||||
dp_info("peer: %pM is getting deleted",
|
||||
peer->mac_addr.raw);
|
||||
/* only if peer valid is true */
|
||||
if (peer->valid)
|
||||
dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
|
||||
vdev->vdev_id,
|
||||
peer->mac_addr.raw, 0);
|
||||
}
|
||||
qdf_mem_free(peer_array);
|
||||
}
|
||||
|
||||
for (i = 0; i < j ; i++) {
|
||||
peer = __dp_peer_find_by_id(soc, peer_ids[i]);
|
||||
for (i = 0; i < soc->max_peers ; i++) {
|
||||
peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
|
||||
|
||||
if (!peer)
|
||||
continue;
|
||||
|
||||
dp_info("peer ref cnt %d", qdf_atomic_read(&peer->ref_cnt));
|
||||
/*
|
||||
* set ref count to one to force delete the peers
|
||||
* with ref count leak
|
||||
*/
|
||||
SET_PEER_REF_CNT_ONE(peer);
|
||||
if (peer->vdev != vdev) {
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
||||
continue;
|
||||
}
|
||||
|
||||
dp_info("peer: %pM is getting unmap",
|
||||
peer->mac_addr.raw);
|
||||
|
||||
dp_rx_peer_unmap_handler(soc, peer_ids[i],
|
||||
dp_rx_peer_unmap_handler(soc, i,
|
||||
vdev->vdev_id,
|
||||
peer->mac_addr.raw, 0,
|
||||
DP_PEER_WDS_COUNT_INVALID);
|
||||
SET_PEER_REF_CNT_ONE(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
||||
}
|
||||
|
||||
qdf_mem_free(peer_ids);
|
||||
dp_info("Flushed peers for vdev object %pK ", vdev);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -6960,7 +6950,6 @@ void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
|
||||
void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
|
||||
struct cdp_vdev_stats *vdev_stats)
|
||||
{
|
||||
struct dp_peer *peer = NULL;
|
||||
struct dp_soc *soc = NULL;
|
||||
|
||||
if (!vdev || !vdev->pdev)
|
||||
@@ -6970,10 +6959,8 @@ void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
|
||||
|
||||
qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
|
||||
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
|
||||
dp_update_vdev_stats(vdev_stats, peer);
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
|
||||
DP_MOD_ID_GENERIC_STATS);
|
||||
|
||||
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
|
||||
dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
|
||||
@@ -7205,6 +7192,35 @@ void dp_print_napi_stats(struct dp_soc *soc)
|
||||
hif_print_napi_stats(soc->hif_handle);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
|
||||
* @soc: Datapath soc
|
||||
* @peer: Datatpath peer
|
||||
* @arg: argument to iter function
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline void
|
||||
dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
|
||||
struct dp_peer *peer,
|
||||
void *arg)
|
||||
{
|
||||
struct dp_rx_tid *rx_tid;
|
||||
uint8_t tid;
|
||||
|
||||
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
|
||||
rx_tid = &peer->rx_tid[tid];
|
||||
DP_STATS_CLR(rx_tid);
|
||||
}
|
||||
DP_STATS_CLR(peer);
|
||||
|
||||
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
|
||||
dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
|
||||
&peer->stats, peer->peer_id,
|
||||
UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_txrx_host_stats_clr(): Reinitialize the txrx stats
|
||||
* @vdev: DP_VDEV handle
|
||||
@@ -7215,8 +7231,6 @@ void dp_print_napi_stats(struct dp_soc *soc)
|
||||
static inline QDF_STATUS
|
||||
dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
|
||||
{
|
||||
struct dp_peer *peer = NULL;
|
||||
|
||||
if (!vdev || !vdev->pdev)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
@@ -7237,24 +7251,8 @@ dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
|
||||
|
||||
hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
|
||||
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
|
||||
struct dp_rx_tid *rx_tid;
|
||||
uint8_t tid;
|
||||
|
||||
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
|
||||
rx_tid = &peer->rx_tid[tid];
|
||||
DP_STATS_CLR(rx_tid);
|
||||
}
|
||||
|
||||
DP_STATS_CLR(peer);
|
||||
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
|
||||
dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
|
||||
&peer->stats, peer->peer_id,
|
||||
UPDATE_PEER_STATS, vdev->pdev->pdev_id);
|
||||
#endif
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
|
||||
DP_MOD_ID_GENERIC_STATS);
|
||||
|
||||
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
|
||||
dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
|
||||
@@ -9675,32 +9673,47 @@ static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
|
||||
}
|
||||
|
||||
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
|
||||
/**
|
||||
* dp_peer_flush_rate_stats_req(): Flush peer rate stats
|
||||
* @soc: Datapath SOC handle
|
||||
* @peer: Datapath peer
|
||||
* @arg: argument to iter function
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static void
|
||||
dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
|
||||
void *arg)
|
||||
{
|
||||
if (peer->bss_peer)
|
||||
return;
|
||||
|
||||
dp_wdi_event_handler(
|
||||
WDI_EVENT_FLUSH_RATE_STATS_REQ,
|
||||
soc, peer->wlanstats_ctx,
|
||||
peer->peer_id,
|
||||
WDI_NO_VAL, peer->vdev->pdev->pdev_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_flush_rate_stats_req(): Flush peer rate stats in pdev
|
||||
* @soc_hdl: Datapath SOC handle
|
||||
* @pdev_id: pdev_id
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t pdev_id)
|
||||
{
|
||||
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
|
||||
struct dp_vdev *vdev = NULL;
|
||||
struct dp_peer *peer = NULL;
|
||||
struct dp_pdev *pdev =
|
||||
dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
|
||||
pdev_id);
|
||||
if (!pdev)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
|
||||
if (peer && !peer->bss_peer)
|
||||
dp_wdi_event_handler(
|
||||
WDI_EVENT_FLUSH_RATE_STATS_REQ,
|
||||
soc, peer->wlanstats_ctx,
|
||||
peer->peer_id,
|
||||
WDI_NO_VAL, pdev_id);
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
|
||||
DP_MOD_ID_CDP);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
@@ -292,6 +292,7 @@ void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
else
|
||||
TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
|
||||
|
||||
vdev->num_peers++;
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
|
||||
@@ -321,6 +322,7 @@ void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
TAILQ_REMOVE(&peer->vdev->peer_list, peer,
|
||||
peer_list_elem);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_PEER_CONFIG);
|
||||
vdev->num_peers--;
|
||||
} else {
|
||||
/*Ignoring the remove operation as peer not found*/
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
@@ -2081,7 +2083,7 @@ dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
|
||||
struct dp_peer *peer;
|
||||
struct dp_vdev *vdev = NULL;
|
||||
|
||||
peer = __dp_peer_find_by_id(soc, peer_id);
|
||||
peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
|
||||
|
||||
/*
|
||||
* Currently peer IDs are assigned for vdevs as well as peers.
|
||||
@@ -2098,14 +2100,17 @@ dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
|
||||
*/
|
||||
if (is_wds) {
|
||||
if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
|
||||
mac_addr))
|
||||
mac_addr)) {
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
|
||||
return;
|
||||
}
|
||||
|
||||
dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
|
||||
peer, peer->peer_id,
|
||||
peer->mac_addr.raw, mac_addr, vdev_id,
|
||||
is_wds);
|
||||
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
|
||||
return;
|
||||
} else {
|
||||
dp_peer_clean_wds_entries(soc, peer, free_wds_count);
|
||||
@@ -2137,6 +2142,7 @@ dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
|
||||
qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
|
||||
|
||||
dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
|
||||
/*
|
||||
* Remove a reference to the peer.
|
||||
* If there are no more references, delete the peer object.
|
||||
|
@@ -26,6 +26,10 @@
|
||||
|
||||
#define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
|
||||
|
||||
typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
|
||||
void *arg);
|
||||
void dp_peer_unref_delete(struct dp_peer *peer, enum dp_peer_mod_id id);
|
||||
|
||||
/**
|
||||
* dp_peer_get_ref() - Returns peer object given the peer id
|
||||
*
|
||||
@@ -51,29 +55,38 @@ QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
|
||||
}
|
||||
|
||||
/**
|
||||
* __dp_peer_find_by_id() - Returns peer object given the peer id
|
||||
* __dp_peer_get_ref_by_id() - Returns peer object given the peer id
|
||||
*
|
||||
* @soc : core DP soc context
|
||||
* @peer_id : peer id from peer object can be retrieved
|
||||
* @mod_id : module id
|
||||
*
|
||||
* Return: struct dp_peer*: Pointer to DP peer object
|
||||
*/
|
||||
static inline struct dp_peer *
|
||||
__dp_peer_find_by_id(struct dp_soc *soc,
|
||||
uint16_t peer_id)
|
||||
__dp_peer_get_ref_by_id(struct dp_soc *soc,
|
||||
uint16_t peer_id,
|
||||
enum dp_peer_mod_id mod_id)
|
||||
|
||||
{
|
||||
struct dp_peer *peer;
|
||||
|
||||
/* TODO: Hold lock */
|
||||
qdf_spin_lock_bh(&soc->peer_map_lock);
|
||||
peer = (peer_id >= soc->max_peers) ? NULL :
|
||||
soc->peer_id_to_obj_map[peer_id];
|
||||
if (!peer ||
|
||||
(dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
|
||||
qdf_spin_unlock_bh(&soc->peer_map_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qdf_spin_unlock_bh(&soc->peer_map_lock);
|
||||
return peer;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_peer_get_ref_by_id() - Returns peer object given the peer id
|
||||
* if delete_in_progress in not set for peer
|
||||
* if peer state is active
|
||||
*
|
||||
* @soc : core DP soc context
|
||||
* @peer_id : peer id from peer object can be retrieved
|
||||
@@ -89,7 +102,9 @@ struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
|
||||
struct dp_peer *peer;
|
||||
|
||||
qdf_spin_lock_bh(&soc->peer_map_lock);
|
||||
peer = __dp_peer_find_by_id(soc, peer_id);
|
||||
peer = (peer_id >= soc->max_peers) ? NULL :
|
||||
soc->peer_id_to_obj_map[peer_id];
|
||||
|
||||
if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
|
||||
(dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
|
||||
qdf_spin_unlock_bh(&soc->peer_map_lock);
|
||||
@@ -126,6 +141,275 @@ dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
|
||||
dp_rx_flush_rx_cached(peer, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_vdev_iterate_peer() - API to iterate through vdev peer list
|
||||
*
|
||||
* @vdev : DP vdev context
|
||||
* @func : function to be called for each peer
|
||||
* @arg : argument need to be passed to func
|
||||
* @mod_id : module_id
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
|
||||
enum dp_peer_mod_id mod_id)
|
||||
{
|
||||
struct dp_peer *peer;
|
||||
struct dp_peer *tmp_peer;
|
||||
struct dp_soc *soc = NULL;
|
||||
|
||||
if (!vdev || !vdev->pdev || !vdev->pdev->soc)
|
||||
return;
|
||||
|
||||
soc = vdev->pdev->soc;
|
||||
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
|
||||
peer_list_elem,
|
||||
tmp_peer) {
|
||||
if (dp_peer_get_ref(soc, peer, mod_id) ==
|
||||
QDF_STATUS_SUCCESS) {
|
||||
(*func)(soc, peer, arg);
|
||||
dp_peer_unref_delete(peer, mod_id);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_pdev_iterate_peer() - API to iterate through all peers of pdev
|
||||
*
|
||||
* @pdev : DP pdev context
|
||||
* @func : function to be called for each peer
|
||||
* @arg : argument need to be passed to func
|
||||
* @mod_id : module_id
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
|
||||
enum dp_peer_mod_id mod_id)
|
||||
{
|
||||
struct dp_vdev *vdev;
|
||||
|
||||
if (!pdev)
|
||||
return;
|
||||
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
|
||||
dp_vdev_iterate_peer(vdev, func, arg, mod_id);
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_soc_iterate_peer() - API to iterate through all peers of soc
|
||||
*
|
||||
* @soc : DP soc context
|
||||
* @func : function to be called for each peer
|
||||
* @arg : argument need to be passed to func
|
||||
* @mod_id : module_id
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
|
||||
enum dp_peer_mod_id mod_id)
|
||||
{
|
||||
struct dp_pdev *pdev;
|
||||
int i;
|
||||
|
||||
if (!soc)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
|
||||
pdev = soc->pdev_list[i];
|
||||
dp_pdev_iterate_peer(pdev, func, arg, mod_id);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
|
||||
*
|
||||
* This API will cache the peers in local allocated memory and calls
|
||||
* iterate function outside the lock.
|
||||
*
|
||||
* As this API is allocating new memory it is suggested to use this
|
||||
* only when lock cannot be held
|
||||
*
|
||||
* @vdev : DP vdev context
|
||||
* @func : function to be called for each peer
|
||||
* @arg : argument need to be passed to func
|
||||
* @mod_id : module_id
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
|
||||
dp_peer_iter_func *func,
|
||||
void *arg,
|
||||
enum dp_peer_mod_id mod_id)
|
||||
{
|
||||
struct dp_peer *peer;
|
||||
struct dp_peer *tmp_peer;
|
||||
struct dp_soc *soc = NULL;
|
||||
struct dp_peer **peer_array = NULL;
|
||||
int i = 0;
|
||||
uint32_t num_peers = 0;
|
||||
|
||||
if (!vdev || !vdev->pdev || !vdev->pdev->soc)
|
||||
return;
|
||||
|
||||
num_peers = vdev->num_peers;
|
||||
|
||||
soc = vdev->pdev->soc;
|
||||
|
||||
peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
|
||||
if (!peer_array)
|
||||
return;
|
||||
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
|
||||
peer_list_elem,
|
||||
tmp_peer) {
|
||||
if (i >= num_peers)
|
||||
break;
|
||||
|
||||
if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
|
||||
peer_array[i] = peer;
|
||||
i = (i + 1);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
|
||||
for (i = 0; i < num_peers; i++) {
|
||||
peer = peer_array[i];
|
||||
|
||||
if (!peer)
|
||||
continue;
|
||||
|
||||
(*func)(soc, peer, arg);
|
||||
dp_peer_unref_delete(peer, mod_id);
|
||||
}
|
||||
|
||||
qdf_mem_free(peer_array);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
|
||||
*
|
||||
* This API will cache the peers in local allocated memory and calls
|
||||
* iterate function outside the lock.
|
||||
*
|
||||
* As this API is allocating new memory it is suggested to use this
|
||||
* only when lock cannot be held
|
||||
*
|
||||
* @pdev : DP pdev context
|
||||
* @func : function to be called for each peer
|
||||
* @arg : argument need to be passed to func
|
||||
* @mod_id : module_id
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
|
||||
dp_peer_iter_func *func,
|
||||
void *arg,
|
||||
enum dp_peer_mod_id mod_id)
|
||||
{
|
||||
struct dp_peer *peer;
|
||||
struct dp_peer *tmp_peer;
|
||||
struct dp_soc *soc = NULL;
|
||||
struct dp_vdev *vdev = NULL;
|
||||
struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
|
||||
int i = 0;
|
||||
int j = 0;
|
||||
uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
|
||||
|
||||
if (!pdev || !pdev->soc)
|
||||
return;
|
||||
|
||||
soc = pdev->soc;
|
||||
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
|
||||
num_peers[i] = vdev->num_peers;
|
||||
peer_array[i] = qdf_mem_malloc(num_peers[i] *
|
||||
sizeof(struct dp_peer *));
|
||||
if (!peer_array[i])
|
||||
break;
|
||||
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
|
||||
peer_list_elem,
|
||||
tmp_peer) {
|
||||
if (j >= num_peers[i])
|
||||
break;
|
||||
|
||||
if (dp_peer_get_ref(soc, peer, mod_id) ==
|
||||
QDF_STATUS_SUCCESS) {
|
||||
peer_array[i][j] = peer;
|
||||
|
||||
j = (j + 1);
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
i = (i + 1);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
|
||||
for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
|
||||
if (!peer_array[i])
|
||||
break;
|
||||
|
||||
for (j = 0; j < num_peers[i]; j++) {
|
||||
peer = peer_array[i][j];
|
||||
|
||||
if (!peer)
|
||||
continue;
|
||||
|
||||
(*func)(soc, peer, arg);
|
||||
dp_peer_unref_delete(peer, mod_id);
|
||||
}
|
||||
|
||||
qdf_mem_free(peer_array[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
|
||||
*
|
||||
* This API will cache the peers in local allocated memory and calls
|
||||
* iterate function outside the lock.
|
||||
*
|
||||
* As this API is allocating new memory it is suggested to use this
|
||||
* only when lock cannot be held
|
||||
*
|
||||
* @soc : DP soc context
|
||||
* @func : function to be called for each peer
|
||||
* @arg : argument need to be passed to func
|
||||
* @mod_id : module_id
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static inline void
|
||||
dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
|
||||
dp_peer_iter_func *func,
|
||||
void *arg,
|
||||
enum dp_peer_mod_id mod_id)
|
||||
{
|
||||
struct dp_pdev *pdev;
|
||||
int i;
|
||||
|
||||
if (!soc)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
|
||||
pdev = soc->pdev_list[i];
|
||||
dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_peer_update_state() - update dp peer state
|
||||
*
|
||||
|
@@ -6040,24 +6040,19 @@ void dp_txrx_path_stats(struct dp_soc *soc)
|
||||
* dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats
|
||||
* Current scope is bar received count
|
||||
*
|
||||
* @pdev_handle: DP_PDEV handle
|
||||
* @soc : Datapath SOC handle
|
||||
* @peer: Datapath peer handle
|
||||
* @arg : argument to iterate function
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static void
|
||||
dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
|
||||
dp_peer_ctrl_frames_stats_get(struct dp_soc *soc,
|
||||
struct dp_peer *peer,
|
||||
void *arg)
|
||||
{
|
||||
struct dp_vdev *vdev;
|
||||
struct dp_peer *peer;
|
||||
uint32_t waitcnt;
|
||||
|
||||
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
|
||||
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
|
||||
|
||||
if (dp_peer_get_ref(pdev->soc, peer,
|
||||
DP_MOD_ID_GENERIC_STATS) !=
|
||||
QDF_STATUS_SUCCESS)
|
||||
continue;
|
||||
struct dp_pdev *pdev = peer->vdev->pdev;
|
||||
|
||||
waitcnt = 0;
|
||||
dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev);
|
||||
@@ -6068,9 +6063,6 @@ dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
|
||||
waitcnt++;
|
||||
}
|
||||
qdf_atomic_set(&pdev->stats_cmd_complete, 0);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -6282,8 +6274,9 @@ dp_print_pdev_rx_stats(struct dp_pdev *pdev)
|
||||
DP_PRINT_STATS(" Failed frag alloc = %u",
|
||||
pdev->stats.replenish.frag_alloc_fail);
|
||||
|
||||
dp_pdev_iterate_peer_lock_safe(pdev, dp_peer_ctrl_frames_stats_get,
|
||||
NULL, DP_MOD_ID_GENERIC_STATS);
|
||||
/* Get bar_recv_cnt */
|
||||
dp_aggregate_pdev_ctrl_frames_stats(pdev);
|
||||
DP_PRINT_STATS("BAR Received Count: = %u",
|
||||
pdev->stats.rx.bar_recv_cnt);
|
||||
|
||||
|
@@ -2721,11 +2721,6 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
|
||||
QDF_TRACE_LEVEL_DEBUG,
|
||||
FL("pkt send failed"));
|
||||
qdf_nbuf_free(nbuf_copy);
|
||||
} else {
|
||||
if (peer_id != DP_INVALID_PEER)
|
||||
DP_STATS_INC_PKT(peer,
|
||||
tx.nawds_mcast,
|
||||
1, qdf_nbuf_len(nbuf));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -86,6 +86,13 @@
|
||||
#define MAX_VDEV_CNT 51
|
||||
#endif
|
||||
|
||||
/* Max no. of VDEVs, a PDEV can support */
|
||||
#ifdef WLAN_PDEV_MAX_VDEVS
|
||||
#define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
|
||||
#else
|
||||
#define DP_PDEV_MAX_VDEVS 17
|
||||
#endif
|
||||
|
||||
#define MAX_TXDESC_POOLS 4
|
||||
#define MAX_RXDESC_POOLS 4
|
||||
#define MAX_REO_DEST_RINGS 4
|
||||
@@ -2305,6 +2312,7 @@ struct dp_vdev {
|
||||
* peer is created for VDEV
|
||||
*/
|
||||
qdf_atomic_t ref_cnt;
|
||||
uint32_t num_peers;
|
||||
};
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user