Merge "qca-wifi: remove DP peer back pointer in DP AST entry"
Este cometimento está contido em:

cometido por
Gerrit - the friendly Code Review server

cometimento
22d1f03d21
@@ -386,14 +386,19 @@ dp_rx_enh_capture_is_peer_enabled(struct dp_soc *soc,
|
||||
struct dp_peer *peer;
|
||||
struct dp_ast_entry *ast_entry;
|
||||
uint32_t ast_index;
|
||||
bool rx_cap_enabled;
|
||||
|
||||
ast_index = ppdu_info->rx_user_status[user_id].ast_index;
|
||||
if (ast_index < wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
|
||||
ast_entry = soc->ast_table[ast_index];
|
||||
if (ast_entry) {
|
||||
peer = ast_entry->peer;
|
||||
if (peer && (peer->peer_id != HTT_INVALID_PEER))
|
||||
return peer->rx_cap_enabled;
|
||||
peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
|
||||
DP_MOD_ID_AST);
|
||||
if (peer) {
|
||||
rx_cap_enabled = peer->rx_cap_enabled;
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return rx_cap_enabled;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
@@ -225,46 +225,6 @@ void dp_tx_capture_print_stats(struct dp_peer *peer)
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dp_tx_cap_peer_find_by_id() - Returns peer object given the peer id
|
||||
* if delete_in_progress in not set for peer
|
||||
*
|
||||
* @soc: core DP soc context
|
||||
* @peer_id: peer id from peer object can be retrieved
|
||||
*
|
||||
* Return: struct dp_peer*: Pointer to DP peer object
|
||||
*/
|
||||
static inline
|
||||
struct dp_peer *dp_tx_cap_peer_find_by_id(struct dp_soc *soc,
|
||||
uint16_t peer_id)
|
||||
{
|
||||
struct dp_peer *peer;
|
||||
|
||||
qdf_spin_lock_bh(&soc->peer_ref_mutex);
|
||||
peer = __dp_peer_find_by_id(soc, peer_id);
|
||||
if (!peer || (peer && peer->delete_in_progress)) {
|
||||
qdf_spin_unlock_bh(&soc->peer_ref_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qdf_atomic_inc(&peer->ref_cnt);
|
||||
qdf_spin_unlock_bh(&soc->peer_ref_mutex);
|
||||
|
||||
return peer;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_tx_cap_peer_unref_del() - dec ref and del peer if ref count is
|
||||
* taken by dp_tx_cap_peer_find_by_id
|
||||
* @peer: peer context
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
static inline void dp_tx_cap_peer_unref_del(struct dp_peer *peer)
|
||||
{
|
||||
dp_peer_unref_delete(peer);
|
||||
}
|
||||
|
||||
/*
|
||||
* dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type
|
||||
* pdev: DP pdev handle
|
||||
@@ -290,7 +250,6 @@ void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev,
|
||||
void dp_print_tid_qlen_per_peer(void *pdev_hdl, uint8_t consolidated)
|
||||
{
|
||||
struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
|
||||
struct dp_soc *soc = pdev->soc;
|
||||
struct dp_vdev *vdev = NULL;
|
||||
struct dp_peer *peer = NULL;
|
||||
uint64_t c_defer_msdu_len = 0;
|
||||
@@ -298,10 +257,10 @@ void dp_print_tid_qlen_per_peer(void *pdev_hdl, uint8_t consolidated)
|
||||
uint64_t c_pending_q_len = 0;
|
||||
|
||||
DP_PRINT_STATS("pending peer msdu and ppdu:");
|
||||
qdf_spin_lock_bh(&soc->peer_ref_mutex);
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
|
||||
int tid;
|
||||
struct dp_tx_tid *tx_tid;
|
||||
@@ -335,6 +294,7 @@ void dp_print_tid_qlen_per_peer(void *pdev_hdl, uint8_t consolidated)
|
||||
if (!consolidated)
|
||||
dp_tx_capture_print_stats(peer);
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
|
||||
DP_PRINT_STATS("consolidated: msdu_comp_q[%d] defer_msdu_q[%d] pending_ppdu_q[%d]",
|
||||
@@ -342,7 +302,6 @@ void dp_print_tid_qlen_per_peer(void *pdev_hdl, uint8_t consolidated)
|
||||
c_pending_q_len);
|
||||
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
qdf_spin_unlock_bh(&soc->peer_ref_mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1559,21 +1518,20 @@ static void dp_soc_set_txrx_ring_map_single(struct dp_soc *soc)
|
||||
static void dp_iterate_free_peer_msdu_q(void *pdev_hdl)
|
||||
{
|
||||
struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
|
||||
struct dp_soc *soc = pdev->soc;
|
||||
struct dp_vdev *vdev = NULL;
|
||||
struct dp_peer *peer = NULL;
|
||||
|
||||
qdf_spin_lock_bh(&soc->peer_ref_mutex);
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
|
||||
/* set peer tx cap enabled to 0, when feature disable */
|
||||
peer->tx_cap_enabled = 0;
|
||||
dp_peer_tid_queue_cleanup(peer);
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
qdf_spin_unlock_bh(&soc->peer_ref_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2717,10 +2675,11 @@ QDF_STATUS dp_send_dummy_mpdu_info_to_stack(struct dp_pdev *pdev,
|
||||
qdf_nbuf_set_pktlen(tx_capture_info.mpdu_nbuf,
|
||||
sizeof(struct ieee80211_frame_min_one));
|
||||
else {
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc, user->peer_id);
|
||||
peer = dp_peer_get_ref_by_id(pdev->soc, user->peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
if (peer) {
|
||||
vdev = peer->vdev;
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
} else {
|
||||
vdev =
|
||||
dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
|
||||
@@ -2825,10 +2784,11 @@ void dp_send_dummy_rts_cts_frame(struct dp_pdev *pdev,
|
||||
ppdu_desc->tx_duration = cur_ppdu_desc->tx_duration -
|
||||
(RTS_INTERVAL + SIFS_INTERVAL);
|
||||
ppdu_desc->user[usr_idx].peer_id = peer_id;
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc, peer_id);
|
||||
peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
if (peer) {
|
||||
vdev = peer->vdev;
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
} else {
|
||||
uint8_t vdev_id;
|
||||
|
||||
@@ -2870,8 +2830,9 @@ static void dp_gen_ack_rx_frame(struct dp_pdev *pdev,
|
||||
tx_capture_info->ppdu_desc->ppdu_end_timestamp;
|
||||
ppdu_desc->user[0].peer_id =
|
||||
tx_capture_info->ppdu_desc->user[0].peer_id;
|
||||
peer = dp_peer_find_by_id(pdev->soc,
|
||||
tx_capture_info->ppdu_desc->user[0].peer_id);
|
||||
peer = dp_peer_get_ref_by_id
|
||||
(pdev->soc, tx_capture_info->ppdu_desc->user[0].peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
if (peer) {
|
||||
struct dp_vdev *vdev = NULL;
|
||||
|
||||
@@ -2880,7 +2841,7 @@ static void dp_gen_ack_rx_frame(struct dp_pdev *pdev,
|
||||
qdf_mem_copy(&ppdu_desc->user[0].mac_addr,
|
||||
vdev->mac_addr.raw,
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
}
|
||||
|
||||
dp_send_dummy_mpdu_info_to_stack(pdev, ppdu_desc, 0);
|
||||
@@ -2916,7 +2877,8 @@ void dp_send_data_to_stack(struct dp_pdev *pdev,
|
||||
ppdu_id = ppdu_desc->ppdu_id;
|
||||
user = &ppdu_desc->user[usr_idx];
|
||||
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc, user->peer_id);
|
||||
peer = dp_peer_get_ref_by_id(pdev->soc, user->peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
if (!peer) {
|
||||
return;
|
||||
}
|
||||
@@ -3003,7 +2965,7 @@ void dp_send_data_to_stack(struct dp_pdev *pdev,
|
||||
dp_gen_ack_rx_frame(pdev, &tx_capture_info);
|
||||
|
||||
return_send_to_stack:
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3515,12 +3477,14 @@ dp_tx_mon_proc_pending_ppdus(struct dp_pdev *pdev, struct dp_tx_tid *tx_tid,
|
||||
qdf_nbuf_copy_expand_fraglist(
|
||||
mpdu_nbuf, MAX_MONITOR_HEADER, 0);
|
||||
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc,
|
||||
user->peer_id);
|
||||
peer = dp_peer_get_ref_by_id
|
||||
(pdev->soc, user->peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
if (peer) {
|
||||
dp_tx_cap_stats_mpdu_update(peer,
|
||||
PEER_MPDU_CLONE, 1);
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete
|
||||
(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
}
|
||||
user->failed_bitmap[k] |=
|
||||
SEQ_SEG_MSK(user->failed_bitmap[k], i);
|
||||
@@ -3803,20 +3767,21 @@ dp_check_mgmt_ctrl_ppdu(struct dp_pdev *pdev,
|
||||
subtype = 0;
|
||||
}
|
||||
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc, ppdu_desc->user[0].peer_id);
|
||||
peer = dp_peer_get_ref_by_id(pdev->soc, ppdu_desc->user[0].peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
if (peer && !peer->bss_peer) {
|
||||
if (!dp_peer_or_pdev_tx_cap_enabled(pdev, peer,
|
||||
ppdu_desc->user[0].mac_addr
|
||||
)) {
|
||||
qdf_nbuf_free(nbuf_ppdu_desc);
|
||||
status = 0;
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
goto free_ppdu_desc;
|
||||
}
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
} else {
|
||||
if (peer)
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
if (!(type == IEEE80211_FC0_TYPE_MGT &&
|
||||
(subtype == MGMT_SUBTYPE_PROBE_RESP >> 4 ||
|
||||
subtype == MGMT_SUBTYPE_DISASSOC >> 4 ||
|
||||
@@ -4277,14 +4242,15 @@ dp_tx_ppdu_stats_flush(struct dp_pdev *pdev,
|
||||
struct cdp_tx_completion_ppdu_user *user;
|
||||
|
||||
user = &ppdu_desc->user[usr_idx];
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc, user->peer_id);
|
||||
peer = dp_peer_get_ref_by_id(pdev->soc, user->peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
|
||||
if (!peer)
|
||||
return;
|
||||
|
||||
dp_peer_tx_cap_tid_queue_flush_tlv(pdev, peer, ppdu_desc, usr_idx);
|
||||
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -4424,8 +4390,9 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
|
||||
if (user->delayed_ba || user->skip == 1)
|
||||
continue;
|
||||
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc,
|
||||
user->peer_id);
|
||||
peer = dp_peer_get_ref_by_id(pdev->soc,
|
||||
user->peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
if (!peer) {
|
||||
dp_ppdu_desc_free(ptr_nbuf_list, usr_idx);
|
||||
user->skip = 1;
|
||||
@@ -4461,7 +4428,8 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
|
||||
"%s: ppdu_desc->mpdus allocation failed",
|
||||
__func__);
|
||||
dp_ppdu_desc_free_all(ptr_nbuf_list, num_users);
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
dp_print_pdev_tx_capture_stats(pdev);
|
||||
qdf_assert_always(0);
|
||||
return;
|
||||
@@ -4470,7 +4438,8 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
|
||||
if (qdf_unlikely(user->ba_size >
|
||||
CDP_BA_256_BIT_MAP_SIZE_DWORDS *
|
||||
SEQ_SEG_SZ_BITS(user->failed_bitmap))) {
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
qdf_assert_always(0);
|
||||
return;
|
||||
}
|
||||
@@ -4566,7 +4535,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
|
||||
user->ba_size = seq_no - start_seq + 1;
|
||||
}
|
||||
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4601,7 +4570,8 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
|
||||
continue;
|
||||
|
||||
peer_id = cur_ppdu_desc->user[usr_idx].peer_id;
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc, peer_id);
|
||||
peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
if (!peer) {
|
||||
dp_ppdu_desc_free(ptr_nbuf_list, usr_idx);
|
||||
continue;
|
||||
@@ -4654,6 +4624,8 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
|
||||
tmp_ppdu_q = &tx_tid->pending_ppdu_q;
|
||||
tmp_nbuf = qdf_nbuf_queue_remove(tmp_ppdu_q);
|
||||
if (qdf_unlikely(!tmp_nbuf)) {
|
||||
dp_peer_unref_delete
|
||||
(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
qdf_assert_always(0);
|
||||
return;
|
||||
}
|
||||
@@ -4669,7 +4641,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
|
||||
qdf_nbuf_free(tmp_nbuf);
|
||||
pdev->tx_capture.pend_ppdu_dropped++;
|
||||
}
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4766,8 +4738,9 @@ dp_tx_cap_proc_per_ppdu_info(struct dp_pdev *pdev, qdf_nbuf_t nbuf_ppdu,
|
||||
}
|
||||
|
||||
peer_id = user->peer_id;
|
||||
peer = dp_tx_cap_peer_find_by_id(pdev->soc,
|
||||
peer_id);
|
||||
peer = dp_peer_get_ref_by_id(pdev->soc,
|
||||
peer_id,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
/**
|
||||
* peer can be NULL
|
||||
*/
|
||||
@@ -4926,7 +4899,7 @@ dequeue_msdu_again:
|
||||
mpdu_suc);
|
||||
dp_tx_cap_stats_mpdu_update(peer, PEER_MPDU_TRI,
|
||||
mpdu_tri);
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
/* get reference count */
|
||||
ref_cnt = qdf_nbuf_get_users(nbuf_ppdu);
|
||||
continue;
|
||||
@@ -4936,7 +4909,8 @@ free_nbuf_dec_ref:
|
||||
qdf_nbuf_free(nbuf_ppdu);
|
||||
ref_cnt--;
|
||||
if (peer)
|
||||
dp_tx_cap_peer_unref_del(peer);
|
||||
dp_peer_unref_delete(peer,
|
||||
DP_MOD_ID_TX_CAPTURE);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -5481,20 +5455,15 @@ QDF_STATUS dp_send_cts_frame_to_stack(struct dp_soc *soc,
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
peer = ast_entry->peer;
|
||||
if (!peer || peer->peer_id == HTT_INVALID_PEER) {
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
peer_id = peer->peer_id;
|
||||
peer_id = ast_entry->peer_id;
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
|
||||
peer = dp_peer_find_by_id(soc, peer_id);
|
||||
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_CAPTURE);
|
||||
if (!peer)
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
if (!dp_peer_or_pdev_tx_cap_enabled(pdev, NULL, peer->mac_addr.raw)) {
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
@@ -5509,8 +5478,10 @@ QDF_STATUS dp_send_cts_frame_to_stack(struct dp_soc *soc,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!match)
|
||||
if (!match) {
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
set_mpdu_info(&tx_capture_info,
|
||||
@@ -5526,13 +5497,13 @@ QDF_STATUS dp_send_cts_frame_to_stack(struct dp_soc *soc,
|
||||
4, FALSE);
|
||||
|
||||
if (!tx_capture_info.mpdu_nbuf) {
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
dp_gen_cts_frame(ppdu_info, peer,
|
||||
tx_capture_info.mpdu_nbuf);
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
|
||||
&tx_capture_info, HTT_INVALID_PEER,
|
||||
WDI_NO_VAL, pdev->pdev_id);
|
||||
@@ -5620,21 +5591,16 @@ void dp_send_usr_ack_frm_to_stack(struct dp_soc *soc,
|
||||
return;
|
||||
}
|
||||
|
||||
peer = ast_entry->peer;
|
||||
if (!peer || peer->peer_id == HTT_INVALID_PEER) {
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return;
|
||||
}
|
||||
peer_id = peer->peer_id;
|
||||
peer_id = ast_entry->peer_id;
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
|
||||
peer = dp_peer_find_by_id(soc, peer_id);
|
||||
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_CAPTURE);
|
||||
if (!peer)
|
||||
return;
|
||||
|
||||
if (!dp_peer_or_pdev_tx_cap_enabled(pdev, peer,
|
||||
peer->mac_addr.raw)) {
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -5649,7 +5615,7 @@ void dp_send_usr_ack_frm_to_stack(struct dp_soc *soc,
|
||||
4, FALSE);
|
||||
|
||||
if (!tx_capture_info.mpdu_nbuf) {
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -5667,7 +5633,7 @@ void dp_send_usr_ack_frm_to_stack(struct dp_soc *soc,
|
||||
tx_capture_info.mpdu_nbuf);
|
||||
}
|
||||
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
|
||||
&tx_capture_info, HTT_INVALID_PEER,
|
||||
WDI_NO_VAL, pdev->pdev_id);
|
||||
@@ -5888,21 +5854,16 @@ QDF_STATUS dp_send_noack_frame_to_stack(struct dp_soc *soc,
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
peer = ast_entry->peer;
|
||||
if (!peer || peer->peer_id == HTT_INVALID_PEER) {
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
peer_id = peer->peer_id;
|
||||
peer_id = ast_entry->peer_id;
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
|
||||
peer = dp_peer_find_by_id(soc, peer_id);
|
||||
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_CAPTURE);
|
||||
if (!peer) {
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
if (!dp_peer_or_pdev_tx_cap_enabled(pdev, peer, peer->mac_addr.raw)) {
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
@@ -5923,14 +5884,14 @@ QDF_STATUS dp_send_noack_frame_to_stack(struct dp_soc *soc,
|
||||
4, FALSE);
|
||||
|
||||
if (!tx_capture_info.mpdu_nbuf) {
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
dp_gen_noack_frame(ppdu_info, peer,
|
||||
tx_capture_info.mpdu_nbuf, mon_mpdu);
|
||||
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_CAPTURE);
|
||||
dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
|
||||
&tx_capture_info, HTT_INVALID_PEER,
|
||||
WDI_NO_VAL, pdev->pdev_id);
|
||||
|
@@ -55,9 +55,6 @@ static void dp_ast_aging_timer_fn(void *soc_hdl)
|
||||
check_wds_ase = true;
|
||||
}
|
||||
|
||||
/* Peer list access lock */
|
||||
qdf_spin_lock_bh(&soc->peer_ref_mutex);
|
||||
|
||||
/* AST list access lock */
|
||||
qdf_spin_lock_bh(&soc->ast_lock);
|
||||
|
||||
@@ -65,6 +62,7 @@ static void dp_ast_aging_timer_fn(void *soc_hdl)
|
||||
pdev = soc->pdev_list[i];
|
||||
qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
||||
DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
|
||||
qdf_spin_lock_bh(&vdev->peer_list_lock);
|
||||
DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
|
||||
DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
|
||||
/*
|
||||
@@ -107,12 +105,12 @@ static void dp_ast_aging_timer_fn(void *soc_hdl)
|
||||
}
|
||||
}
|
||||
}
|
||||
qdf_spin_unlock_bh(&vdev->peer_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
||||
}
|
||||
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
qdf_spin_unlock_bh(&soc->peer_ref_mutex);
|
||||
|
||||
if (qdf_atomic_read(&soc->cmn_init_done))
|
||||
qdf_timer_mod(&soc->ast_aging_timer,
|
||||
@@ -211,7 +209,8 @@ void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
|
||||
return;
|
||||
|
||||
soc = vdev->pdev->soc;
|
||||
peer = dp_vdev_bss_peer_ref_n_get(soc, vdev);
|
||||
peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
|
||||
DP_MOD_ID_AST);
|
||||
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
@@ -233,7 +232,7 @@ void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
|
||||
mac_addr,
|
||||
CDP_TXRX_AST_TYPE_MEC,
|
||||
flags);
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -259,27 +258,15 @@ dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc, uint8_t vdev_id, u_int32_t val)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (vdev->opmode == wlan_op_mode_ap) {
|
||||
/* for ap, set it on bss_peer */
|
||||
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
|
||||
if (peer->bss_peer) {
|
||||
peer->wds_ecm.wds_rx_filter = 1;
|
||||
peer->wds_ecm.wds_rx_ucast_4addr =
|
||||
(val & WDS_POLICY_RX_UCAST_4ADDR) ?
|
||||
1 : 0;
|
||||
peer->wds_ecm.wds_rx_mcast_4addr =
|
||||
(val & WDS_POLICY_RX_MCAST_4ADDR) ?
|
||||
1 : 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (vdev->opmode == wlan_op_mode_sta) {
|
||||
peer = TAILQ_FIRST(&vdev->peer_list);
|
||||
peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
|
||||
|
||||
if (peer) {
|
||||
peer->wds_ecm.wds_rx_filter = 1;
|
||||
peer->wds_ecm.wds_rx_ucast_4addr =
|
||||
(val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0;
|
||||
peer->wds_ecm.wds_rx_mcast_4addr =
|
||||
(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
@@ -303,7 +290,8 @@ dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
{
|
||||
struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
|
||||
peer_mac, 0,
|
||||
vdev_id);
|
||||
vdev_id,
|
||||
DP_MOD_ID_AST);
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("peer is NULL for mac %pM vdev_id %d"),
|
||||
@@ -332,7 +320,7 @@ dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc, uint8_t vdev_id,
|
||||
"peer->wds_ecm.wds_tx_mcast_4addr %d\n",
|
||||
peer->wds_ecm.wds_tx_mcast_4addr);
|
||||
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -347,17 +335,15 @@ int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
|
||||
int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(hal_soc, rx_tlv_hdr);
|
||||
|
||||
if (vdev->opmode == wlan_op_mode_ap) {
|
||||
TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) {
|
||||
if (bss_peer->bss_peer) {
|
||||
/* if wds policy check is not enabled on this vdev, accept all frames */
|
||||
if (!bss_peer->wds_ecm.wds_rx_filter) {
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
bss_peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
|
||||
/* if wds policy check is not enabled on this vdev, accept all frames */
|
||||
if (bss_peer && !bss_peer->wds_ecm.wds_rx_filter) {
|
||||
dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
|
||||
return 1;
|
||||
}
|
||||
rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr;
|
||||
rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr;
|
||||
dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
|
||||
} else { /* sta mode */
|
||||
if (!peer->wds_ecm.wds_rx_filter) {
|
||||
return 1;
|
||||
@@ -507,7 +493,8 @@ uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
return DP_VLAN_UNTAGGED;
|
||||
}
|
||||
|
||||
peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL);
|
||||
peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL,
|
||||
DP_MOD_ID_TX_MULTIPASS);
|
||||
|
||||
if (qdf_unlikely(peer == NULL))
|
||||
return DP_VLAN_UNTAGGED;
|
||||
@@ -517,11 +504,11 @@ uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
* Send the frame as it is.
|
||||
*/
|
||||
if (*vlan_id == peer->vlan_id) {
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
|
||||
return DP_VLAN_TAGGED_UNICAST;
|
||||
}
|
||||
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
|
||||
return DP_VLAN_UNTAGGED;
|
||||
}
|
||||
|
||||
@@ -663,7 +650,7 @@ void dp_peer_multipass_list_remove(struct dp_peer *peer)
|
||||
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
|
||||
|
||||
if (found)
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -679,7 +666,8 @@ static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
|
||||
uint8_t vdev_id, uint16_t vlan_id)
|
||||
{
|
||||
struct dp_peer *peer =
|
||||
dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
|
||||
dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
|
||||
DP_MOD_ID_TX_MULTIPASS);
|
||||
|
||||
if (qdf_unlikely(!peer)) {
|
||||
qdf_err("NULL peer");
|
||||
@@ -694,7 +682,7 @@ static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
|
||||
dp_debug("peer already added to vdev multipass list"
|
||||
"MAC: "QDF_MAC_ADDR_STR" vlan: %d ",
|
||||
QDF_MAC_ADDR_ARRAY(peer->mac_addr.raw), peer->vlan_id);
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -905,7 +893,7 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
|
||||
if (is_wds)
|
||||
return;
|
||||
|
||||
peer = dp_peer_find_by_id(soc, peer_id);
|
||||
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_AST);
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Invalid peer\n", __func__);
|
||||
@@ -916,9 +904,7 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
|
||||
if (peer->vdev->opmode != wlan_op_mode_ap) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Peer ast flow map not in STA mode\n", __func__);
|
||||
/* Release peer reference */
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
return;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Making sure the peer is for this mac address */
|
||||
@@ -926,8 +912,7 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
|
||||
(struct qdf_mac_addr *)peer->mac_addr.raw)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Peer mac address mismatch\n", __func__);
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
return;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Ast entry flow mapping not valid for self peer map */
|
||||
@@ -935,8 +920,7 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
|
||||
(struct qdf_mac_addr *)peer->vdev->mac_addr.raw)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Ast flow mapping not valid for self peer \n", __func__);
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
return;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Fill up ast index <---> flow id mapping table for this peer */
|
||||
@@ -976,8 +960,9 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
|
||||
peer->vdev->vdev_id, peer_mac_addr);
|
||||
}
|
||||
|
||||
end:
|
||||
/* Release peer reference */
|
||||
dp_peer_unref_del_find_by_id(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1003,7 +988,8 @@ int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
|
||||
}
|
||||
|
||||
peer = dp_peer_find_hash_find((struct dp_soc *)soc,
|
||||
peer_mac_addr, 0, vdev_id);
|
||||
peer_mac_addr, 0, vdev_id,
|
||||
DP_MOD_ID_AST);
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Invalid peer\n", __func__);
|
||||
@@ -1028,7 +1014,7 @@ int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
|
||||
if (i == DP_PEER_AST_FLOWQ_MAX) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: ast index not found for flow %d\n", __func__, flow_id);
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -1036,7 +1022,7 @@ int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
|
||||
if (!peer->peer_ast_flowq_idx[i].is_valid) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: ast index is invalid for flow %d\n", __func__, flow_id);
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -1050,7 +1036,7 @@ int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
|
||||
& (1 << tid))) {
|
||||
/* Release peer reference */
|
||||
ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return ast_index;
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
@@ -1060,7 +1046,7 @@ int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
|
||||
* TID is not valid for this flow
|
||||
* Return -1
|
||||
*/
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -1070,7 +1056,7 @@ int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
|
||||
* UDP/NON UDP flow id
|
||||
*/
|
||||
ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
|
||||
dp_peer_unref_delete(peer);
|
||||
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
|
||||
return ast_index;
|
||||
}
|
||||
#endif
|
||||
|
@@ -230,7 +230,6 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
|
||||
ast->is_active = TRUE;
|
||||
|
||||
if (sa_sw_peer_id != ta_peer->peer_id) {
|
||||
sa_peer = ast->peer;
|
||||
|
||||
if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) &&
|
||||
(ast->type != CDP_TXRX_AST_TYPE_SELF) &&
|
||||
@@ -279,8 +278,13 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
|
||||
* Kickout, when direct associated peer(SA) roams
|
||||
* to another AP and reachable via TA peer
|
||||
*/
|
||||
sa_peer = dp_peer_get_ref_by_id(soc, ast->peer_id,
|
||||
DP_MOD_ID_RX);
|
||||
if (!sa_peer)
|
||||
return;
|
||||
|
||||
if ((sa_peer->vdev->opmode == wlan_op_mode_ap) &&
|
||||
!sa_peer->delete_in_progress) {
|
||||
sa_peer->delete_in_progress) {
|
||||
qdf_mem_copy(wds_src_mac,
|
||||
(qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE),
|
||||
QDF_MAC_ADDR_SIZE);
|
||||
@@ -292,6 +296,7 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
|
||||
wds_src_mac);
|
||||
}
|
||||
}
|
||||
dp_peer_unref_delete(sa_peer, DP_MOD_ID_RX);
|
||||
return;
|
||||
}
|
||||
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador