qcacmn: refine dp_vdev_flush_peers logic

Refine dp_vdev_flush_peers,
a. wrap ast entry flushing into dp_peer_flush_ast_entry()
b. if unmap_only is true, logic keep same.
c. if unmap_only is false, also use __dp_peer_find_by_id
instead of dp_peer_find_by_id to get peer object.
only if peer flag valid is true, then call dp_peer_delete_wifi3
to avoid peer member invalid accessing.

Change-Id: I7a6d53e124ff369305b91050731901ff0a43d6af
CRs-Fixed: 2534274
This commit is contained in:
Jinwei Chen
2019-09-24 20:37:49 +08:00
committed by nshrivas
parent b07c34cc2d
commit 1c76e897e3

View File

@@ -4771,6 +4771,33 @@ static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
"DP Vdev Register success");
}
/**
* dp_peer_flush_ast_entry() - Forcibily flush all AST entry of peer
* @soc: Datapath soc handle
* @peer: Datapath peer handle
* @peer_id: Peer ID
* @vdev_id: Vdev ID
*
* Return: void
*/
static inline void dp_peer_flush_ast_entry(struct dp_soc *soc,
struct dp_peer *peer,
uint16_t peer_id,
uint8_t vdev_id)
{
struct dp_ast_entry *ase, *tmp_ase;
if (soc->is_peer_map_unmap_v2) {
DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
dp_rx_peer_unmap_handler
(soc, peer_id,
vdev_id,
ase->mac_addr.raw,
1);
}
}
}
/**
* dp_vdev_flush_peers() - Forcibily Flush peers of vdev
* @vdev: Datapath VDEV handle
@@ -4785,13 +4812,11 @@ static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
struct dp_soc *soc = pdev->soc;
struct dp_peer *peer;
uint16_t *peer_ids;
struct dp_ast_entry *ase, *tmp_ase;
uint8_t i = 0, j = 0;
peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
if (!peer_ids) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"DP alloc failure - unable to flush peers");
dp_err("DP alloc failure - unable to flush peers");
return;
}
@@ -4805,70 +4830,33 @@ static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
qdf_spin_unlock_bh(&soc->peer_ref_mutex);
for (i = 0; i < j ; i++) {
if (unmap_only) {
peer = __dp_peer_find_by_id(soc, peer_ids[i]);
peer = __dp_peer_find_by_id(soc, peer_ids[i]);
if (peer) {
if (soc->is_peer_map_unmap_v2) {
/* free AST entries of peer before
* release peer reference
*/
DP_PEER_ITERATE_ASE_LIST(peer, ase,
tmp_ase) {
dp_rx_peer_unmap_handler
(soc, peer_ids[i],
vdev->vdev_id,
ase->mac_addr.raw,
1);
}
}
dp_rx_peer_unmap_handler(soc, peer_ids[i],
vdev->vdev_id,
peer->mac_addr.raw,
0);
}
} else {
peer = __dp_peer_find_by_id(soc, peer_ids[i]);
if (!peer)
continue;
if (peer) {
dp_info("peer: %pM is getting flush",
peer->mac_addr.raw);
dp_info("peer: %pM is getting flush",
peer->mac_addr.raw);
/* free AST entries of peer */
dp_peer_flush_ast_entry(soc, peer,
peer_ids[i],
vdev->vdev_id);
/*
* If peer flag valid is false, then dp_peer_delete_wifi3()
* is already executed, skip doing it again to avoid
* peer member invalid accessing.
*/
if (!unmap_only && peer->valid)
dp_peer_delete_wifi3(peer, 0);
if (soc->is_peer_map_unmap_v2) {
/* free AST entries of peer before
* release peer reference
*/
DP_PEER_ITERATE_ASE_LIST(peer, ase,
tmp_ase) {
dp_rx_peer_unmap_handler
(soc, peer_ids[i],
vdev->vdev_id,
ase->mac_addr.raw,
1);
}
}
dp_peer_delete_wifi3(peer, 0);
/*
* we need to call dp_peer_unref_del_find_by_id
* to remove additional ref count incremented
* by dp_peer_find_by_id() call.
*
* Hold the ref count while executing
* dp_peer_delete_wifi3() call.
*
*/
dp_peer_unref_del_find_by_id(peer);
dp_rx_peer_unmap_handler(soc, peer_ids[i],
vdev->vdev_id,
peer->mac_addr.raw, 0);
}
}
dp_rx_peer_unmap_handler(soc, peer_ids[i],
vdev->vdev_id,
peer->mac_addr.raw, 0);
}
qdf_mem_free(peer_ids);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
FL("Flushed peers for vdev object %pK "), vdev);
dp_info("Flushed peers for vdev object %pK ", vdev);
}
/*