qca-wifi: use dp_peer_find_by_id API to get the peer

Use common dp_peer_find_by_id API as it is taking
peer refcount now

Change-Id: Ib2604f1f7d9acc9ffcc33969fc8900089cd245d2
This commit is contained in:
Chaithanya Garrepalli
2020-08-06 10:52:55 +05:30
parent ccf15df948
commit 78363d7d9c
2 changed files with 51 additions and 103 deletions

View File

@@ -225,46 +225,6 @@ void dp_tx_capture_print_stats(struct dp_peer *peer)
}
#endif
/**
* dp_tx_cap_peer_find_by_id() - Returns peer object given the peer id
* if delete_in_progress in not set for peer
*
* @soc: core DP soc context
* @peer_id: peer id from peer object can be retrieved
*
* Return: struct dp_peer*: Pointer to DP peer object
*/
static inline
struct dp_peer *dp_tx_cap_peer_find_by_id(struct dp_soc *soc,
uint16_t peer_id)
{
struct dp_peer *peer;
qdf_spin_lock_bh(&soc->peer_map_lock);
peer = __dp_peer_find_by_id(soc, peer_id);
if (!peer || (peer && peer->delete_in_progress)) {
qdf_spin_unlock_bh(&soc->peer_map_lock);
return NULL;
}
qdf_atomic_inc(&peer->ref_cnt);
qdf_spin_unlock_bh(&soc->peer_map_lock);
return peer;
}
/**
* dp_tx_cap_peer_unref_del() - dec ref and del peer if ref count is
* taken by dp_tx_cap_peer_find_by_id
* @peer: peer context
*
* Return: none
*/
static inline void dp_tx_cap_peer_unref_del(struct dp_peer *peer)
{
dp_peer_unref_delete(peer);
}
/*
* dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type
* pdev: DP pdev handle
@@ -2715,10 +2675,10 @@ QDF_STATUS dp_send_dummy_mpdu_info_to_stack(struct dp_pdev *pdev,
qdf_nbuf_set_pktlen(tx_capture_info.mpdu_nbuf,
sizeof(struct ieee80211_frame_min_one));
else {
peer = dp_tx_cap_peer_find_by_id(pdev->soc, user->peer_id);
peer = dp_peer_find_by_id(pdev->soc, user->peer_id);
if (peer) {
vdev = peer->vdev;
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
} else {
vdev =
dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
@@ -2823,10 +2783,10 @@ void dp_send_dummy_rts_cts_frame(struct dp_pdev *pdev,
ppdu_desc->tx_duration = cur_ppdu_desc->tx_duration -
(RTS_INTERVAL + SIFS_INTERVAL);
ppdu_desc->user[usr_idx].peer_id = peer_id;
peer = dp_tx_cap_peer_find_by_id(pdev->soc, peer_id);
peer = dp_peer_find_by_id(pdev->soc, peer_id);
if (peer) {
vdev = peer->vdev;
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
} else {
uint8_t vdev_id;
@@ -2878,7 +2838,7 @@ static void dp_gen_ack_rx_frame(struct dp_pdev *pdev,
qdf_mem_copy(&ppdu_desc->user[0].mac_addr,
vdev->mac_addr.raw,
QDF_MAC_ADDR_SIZE);
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
}
dp_send_dummy_mpdu_info_to_stack(pdev, ppdu_desc, 0);
@@ -2914,7 +2874,7 @@ void dp_send_data_to_stack(struct dp_pdev *pdev,
ppdu_id = ppdu_desc->ppdu_id;
user = &ppdu_desc->user[usr_idx];
peer = dp_tx_cap_peer_find_by_id(pdev->soc, user->peer_id);
peer = dp_peer_find_by_id(pdev->soc, user->peer_id);
if (!peer) {
return;
}
@@ -3001,7 +2961,7 @@ void dp_send_data_to_stack(struct dp_pdev *pdev,
dp_gen_ack_rx_frame(pdev, &tx_capture_info);
return_send_to_stack:
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
return;
}
@@ -3513,12 +3473,12 @@ dp_tx_mon_proc_pending_ppdus(struct dp_pdev *pdev, struct dp_tx_tid *tx_tid,
qdf_nbuf_copy_expand_fraglist(
mpdu_nbuf, MAX_MONITOR_HEADER, 0);
peer = dp_tx_cap_peer_find_by_id(pdev->soc,
peer = dp_peer_find_by_id(pdev->soc,
user->peer_id);
if (peer) {
dp_tx_cap_stats_mpdu_update(peer,
PEER_MPDU_CLONE, 1);
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
}
user->failed_bitmap[k] |=
SEQ_SEG_MSK(user->failed_bitmap[k], i);
@@ -3801,20 +3761,20 @@ dp_check_mgmt_ctrl_ppdu(struct dp_pdev *pdev,
subtype = 0;
}
peer = dp_tx_cap_peer_find_by_id(pdev->soc, ppdu_desc->user[0].peer_id);
peer = dp_peer_find_by_id(pdev->soc, ppdu_desc->user[0].peer_id);
if (peer && !peer->bss_peer) {
if (!dp_peer_or_pdev_tx_cap_enabled(pdev, peer,
ppdu_desc->user[0].mac_addr
)) {
qdf_nbuf_free(nbuf_ppdu_desc);
status = 0;
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
goto free_ppdu_desc;
}
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
} else {
if (peer)
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
if (!(type == IEEE80211_FC0_TYPE_MGT &&
(subtype == MGMT_SUBTYPE_PROBE_RESP >> 4 ||
subtype == MGMT_SUBTYPE_DISASSOC >> 4 ||
@@ -4275,14 +4235,14 @@ dp_tx_ppdu_stats_flush(struct dp_pdev *pdev,
struct cdp_tx_completion_ppdu_user *user;
user = &ppdu_desc->user[usr_idx];
peer = dp_tx_cap_peer_find_by_id(pdev->soc, user->peer_id);
peer = dp_peer_find_by_id(pdev->soc, user->peer_id);
if (!peer)
return;
dp_peer_tx_cap_tid_queue_flush_tlv(pdev, peer, ppdu_desc, usr_idx);
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
return;
}
@@ -4422,8 +4382,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
if (user->delayed_ba || user->skip == 1)
continue;
peer = dp_tx_cap_peer_find_by_id(pdev->soc,
user->peer_id);
peer = dp_peer_find_by_id(pdev->soc, user->peer_id);
if (!peer) {
dp_ppdu_desc_free(ptr_nbuf_list, usr_idx);
user->skip = 1;
@@ -4459,7 +4418,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
"%s: ppdu_desc->mpdus allocation failed",
__func__);
dp_ppdu_desc_free_all(ptr_nbuf_list, num_users);
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
dp_print_pdev_tx_capture_stats(pdev);
qdf_assert_always(0);
return;
@@ -4468,7 +4427,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
if (qdf_unlikely(user->ba_size >
CDP_BA_256_BIT_MAP_SIZE_DWORDS *
SEQ_SEG_SZ_BITS(user->failed_bitmap))) {
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
qdf_assert_always(0);
return;
}
@@ -4564,7 +4523,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
user->ba_size = seq_no - start_seq + 1;
}
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
}
}
@@ -4599,7 +4558,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
continue;
peer_id = cur_ppdu_desc->user[usr_idx].peer_id;
peer = dp_tx_cap_peer_find_by_id(pdev->soc, peer_id);
peer = dp_peer_find_by_id(pdev->soc, peer_id);
if (!peer) {
dp_ppdu_desc_free(ptr_nbuf_list, usr_idx);
continue;
@@ -4652,6 +4611,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
tmp_ppdu_q = &tx_tid->pending_ppdu_q;
tmp_nbuf = qdf_nbuf_queue_remove(tmp_ppdu_q);
if (qdf_unlikely(!tmp_nbuf)) {
dp_peer_unref_delete(peer);
qdf_assert_always(0);
return;
}
@@ -4667,7 +4627,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
qdf_nbuf_free(tmp_nbuf);
pdev->tx_capture.pend_ppdu_dropped++;
}
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
}
}
}
@@ -4764,7 +4724,7 @@ dp_tx_cap_proc_per_ppdu_info(struct dp_pdev *pdev, qdf_nbuf_t nbuf_ppdu,
}
peer_id = user->peer_id;
peer = dp_tx_cap_peer_find_by_id(pdev->soc,
peer = dp_peer_find_by_id(pdev->soc,
peer_id);
/**
* peer can be NULL
@@ -4924,7 +4884,7 @@ dequeue_msdu_again:
mpdu_suc);
dp_tx_cap_stats_mpdu_update(peer, PEER_MPDU_TRI,
mpdu_tri);
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
/* get reference count */
ref_cnt = qdf_nbuf_get_users(nbuf_ppdu);
continue;
@@ -4934,7 +4894,7 @@ free_nbuf_dec_ref:
qdf_nbuf_free(nbuf_ppdu);
ref_cnt--;
if (peer)
dp_tx_cap_peer_unref_del(peer);
dp_peer_unref_delete(peer);
continue;
}
@@ -5492,7 +5452,7 @@ QDF_STATUS dp_send_cts_frame_to_stack(struct dp_soc *soc,
return QDF_STATUS_E_FAILURE;
if (!dp_peer_or_pdev_tx_cap_enabled(pdev, NULL, peer->mac_addr.raw)) {
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return QDF_STATUS_E_FAILURE;
}
@@ -5507,9 +5467,11 @@ QDF_STATUS dp_send_cts_frame_to_stack(struct dp_soc *soc,
break;
}
}
if (!match)
if (!match) {
dp_peer_unref_delete(peer);
return QDF_STATUS_E_FAILURE;
}
}
set_mpdu_info(&tx_capture_info,
&ppdu_info->rx_status, rx_user_status);
@@ -5524,13 +5486,13 @@ QDF_STATUS dp_send_cts_frame_to_stack(struct dp_soc *soc,
4, FALSE);
if (!tx_capture_info.mpdu_nbuf) {
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return QDF_STATUS_E_NOMEM;
}
dp_gen_cts_frame(ppdu_info, peer,
tx_capture_info.mpdu_nbuf);
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
&tx_capture_info, HTT_INVALID_PEER,
WDI_NO_VAL, pdev->pdev_id);
@@ -5632,7 +5594,7 @@ void dp_send_usr_ack_frm_to_stack(struct dp_soc *soc,
if (!dp_peer_or_pdev_tx_cap_enabled(pdev, peer,
peer->mac_addr.raw)) {
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return;
}
@@ -5647,7 +5609,7 @@ void dp_send_usr_ack_frm_to_stack(struct dp_soc *soc,
4, FALSE);
if (!tx_capture_info.mpdu_nbuf) {
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return;
}
@@ -5665,7 +5627,7 @@ void dp_send_usr_ack_frm_to_stack(struct dp_soc *soc,
tx_capture_info.mpdu_nbuf);
}
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
&tx_capture_info, HTT_INVALID_PEER,
WDI_NO_VAL, pdev->pdev_id);
@@ -5900,7 +5862,7 @@ QDF_STATUS dp_send_noack_frame_to_stack(struct dp_soc *soc,
}
if (!dp_peer_or_pdev_tx_cap_enabled(pdev, peer, peer->mac_addr.raw)) {
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return QDF_STATUS_E_FAILURE;
}
@@ -5921,14 +5883,14 @@ QDF_STATUS dp_send_noack_frame_to_stack(struct dp_soc *soc,
4, FALSE);
if (!tx_capture_info.mpdu_nbuf) {
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return QDF_STATUS_E_NOMEM;
}
dp_gen_noack_frame(ppdu_info, peer,
tx_capture_info.mpdu_nbuf, mon_mpdu);
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
&tx_capture_info, HTT_INVALID_PEER,
WDI_NO_VAL, pdev->pdev_id);

View File

@@ -257,27 +257,15 @@ dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc, uint8_t vdev_id, u_int32_t val)
return QDF_STATUS_E_INVAL;
}
if (vdev->opmode == wlan_op_mode_ap) {
/* for ap, set it on bss_peer */
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
if (peer->bss_peer) {
peer->wds_ecm.wds_rx_filter = 1;
peer->wds_ecm.wds_rx_ucast_4addr =
(val & WDS_POLICY_RX_UCAST_4ADDR) ?
1 : 0;
peer->wds_ecm.wds_rx_mcast_4addr =
(val & WDS_POLICY_RX_MCAST_4ADDR) ?
1 : 0;
break;
}
}
} else if (vdev->opmode == wlan_op_mode_sta) {
peer = TAILQ_FIRST(&vdev->peer_list);
peer = dp_vdev_bss_peer_ref_n_get(vdev);
if (peer) {
peer->wds_ecm.wds_rx_filter = 1;
peer->wds_ecm.wds_rx_ucast_4addr =
(val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0;
peer->wds_ecm.wds_rx_mcast_4addr =
(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
dp_peer_unref_delete(peer);
}
return QDF_STATUS_SUCCESS;
@@ -345,17 +333,15 @@ int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(hal_soc, rx_tlv_hdr);
if (vdev->opmode == wlan_op_mode_ap) {
TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) {
if (bss_peer->bss_peer) {
bss_peer = dp_vdev_bss_peer_ref_n_get(vdev);
/* if wds policy check is not enabled on this vdev, accept all frames */
if (!bss_peer->wds_ecm.wds_rx_filter) {
if (bss_peer && !bss_peer->wds_ecm.wds_rx_filter) {
dp_peer_unref_delete(bss_peer);
return 1;
}
break;
}
}
rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr;
rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr;
dp_peer_unref_delete(bss_peer);
} else { /* sta mode */
if (!peer->wds_ecm.wds_rx_filter) {
return 1;
@@ -915,7 +901,7 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s: Peer ast flow map not in STA mode\n", __func__);
/* Release peer reference */
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return;
}
@@ -924,7 +910,7 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
(struct qdf_mac_addr *)peer->mac_addr.raw)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: Peer mac address mismatch\n", __func__);
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return;
}
@@ -933,7 +919,7 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
(struct qdf_mac_addr *)peer->vdev->mac_addr.raw)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: Ast flow mapping not valid for self peer \n", __func__);
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
return;
}
@@ -975,7 +961,7 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
}
/* Release peer reference */
dp_peer_unref_del_find_by_id(peer);
dp_peer_unref_delete(peer);
}
/**