qcacmn: Move per packet params to txrx_peer

Move the parameters from the dp_peer which are used
in per packet path to txrx_peer and params related
to monitor path to monitor peer.

Initialize txrx_peer and monitor peers.

Change-Id: I0449c67c1cd47deb76bc89b5ddc64174f6694eb0
CRs-Fixed: 3095637
This commit is contained in:
Pavankumar Nandeshwar
2021-12-15 03:09:31 -08:00
committed by Madan Koyyalamudi
parent a795c47f70
commit 9c6800383c
9 changed files with 405 additions and 220 deletions

View File

@@ -322,7 +322,7 @@ static inline
QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc,
struct dp_tx_desc_s *desc,
struct hal_tx_completion_status *ts,
struct dp_peer *peer)
uint16_t peer_id)
{
return QDF_STATUS_E_FAILURE;
}
@@ -982,29 +982,27 @@ static inline int dp_log2_ceil(unsigned int value)
#ifdef QCA_SUPPORT_PEER_ISOLATION
#define dp_get_peer_isolation(_peer) ((_peer)->isolation)
static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
static inline void dp_set_peer_isolation(struct dp_txrx_peer *txrx_peer,
bool val)
{
peer->isolation = val;
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"peer:"QDF_MAC_ADDR_FMT" isolation:%d",
QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->isolation);
txrx_peer->isolation = val;
}
#else
#define dp_get_peer_isolation(_peer) (0)
static inline void dp_set_peer_isolation(struct dp_peer *peer, bool val)
static inline void dp_set_peer_isolation(struct dp_txrx_peer *peer, bool val)
{
}
#endif /* QCA_SUPPORT_PEER_ISOLATION */
#ifdef QCA_SUPPORT_WDS_EXTENDED
static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer)
{
peer->wds_ext.init = 0;
txrx_peer->wds_ext.init = 0;
}
#else
static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
static inline void dp_wds_ext_peer_init(struct dp_txrx_peer *txrx_peer)
{
}
#endif /* QCA_SUPPORT_WDS_EXTENDED */

View File

@@ -285,9 +285,6 @@ static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
#define RNG_ERR "SRNG setup failed for"
/* Threshold for peer's cached buf queue beyond which frames are dropped */
#define DP_RX_CACHED_BUFQ_THRESH 64
/**
* default_dscp_tid_map - Default DSCP-TID mapping
*
@@ -6792,19 +6789,6 @@ static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
}
#endif
#ifdef PEER_CACHE_RX_PKTS
static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
{
qdf_spinlock_create(&peer->bufq_info.bufq_lock);
peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
}
#else
static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
{
}
#endif
#ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
/*
* dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
@@ -6814,16 +6798,18 @@ static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
* Return: none
*/
static inline
void dp_peer_hw_txrx_stats_init(struct dp_soc *soc, struct dp_peer *peer)
void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{
peer->hw_txrx_stats_en =
txrx_peer->hw_txrx_stats_en =
wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
}
#else
static inline
void dp_peer_hw_txrx_stats_init(struct dp_soc *soc, struct dp_peer *peer)
void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{
peer->hw_txrx_stats_en = 0;
txrx_peer->hw_txrx_stats_en = 0;
}
#endif
@@ -6836,6 +6822,9 @@ static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
txrx_peer = peer->txrx_peer;
peer->txrx_peer = NULL;
dp_peer_defrag_rx_tids_deinit(txrx_peer);
dp_peer_rx_bufq_resources_deinit(txrx_peer);
qdf_mem_free(txrx_peer);
}
@@ -6856,7 +6845,8 @@ static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
txrx_peer->vdev = peer->vdev;
dp_wds_ext_peer_init(peer);
dp_peer_rx_bufq_resources_init(txrx_peer);
dp_peer_defrag_rx_tids_init(txrx_peer);
dp_txrx_peer_attach_add(soc, peer, txrx_peer);
return QDF_STATUS_SUCCESS;
@@ -6926,7 +6916,6 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
dp_local_peer_id_alloc(pdev, peer);
qdf_spinlock_create(&peer->peer_info_lock);
dp_peer_rx_bufq_resources_init(peer);
DP_STATS_INIT(peer);
DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
@@ -6938,10 +6927,13 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
*/
dp_monitor_peer_tx_capture_filter_check(pdev, peer);
dp_set_peer_isolation(peer, false);
if (peer->txrx_peer) {
dp_peer_rx_bufq_resources_init(peer->txrx_peer);
dp_set_peer_isolation(peer->txrx_peer, false);
dp_wds_ext_peer_init(peer->txrx_peer);
dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
}
dp_wds_ext_peer_init(peer);
dp_peer_hw_txrx_stats_init(soc, peer);
dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
@@ -7007,9 +6999,6 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_spinlock_create(&peer->peer_state_lock);
dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
qdf_spinlock_create(&peer->peer_info_lock);
dp_wds_ext_peer_init(peer);
dp_peer_hw_txrx_stats_init(soc, peer);
dp_peer_rx_bufq_resources_init(peer);
/* reset the ast index to flowid table */
dp_peer_reset_flowq_map(peer);
@@ -7042,6 +7031,8 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
(wlan_op_mode_sta != vdev->opmode)) {
dp_info("vdev bss_peer!!");
peer->bss_peer = 1;
if (peer->txrx_peer)
peer->txrx_peer->bss_peer = 1;
}
if (wlan_op_mode_sta == vdev->opmode &&
@@ -7077,17 +7068,6 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
}
}
/*
* Allocate peer extended stats context. Fall through in
* case of failure as its not an implicit requirement to have
* this object for regular statistics updates.
*/
if (dp_peer_ext_stats_ctx_alloc(soc, peer) !=
QDF_STATUS_SUCCESS)
dp_warn("peer ext_stats ctx alloc failed");
dp_set_peer_isolation(peer, false);
dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
@@ -7187,6 +7167,7 @@ QDF_STATUS dp_peer_mlo_setup(
dp_link_peer_add_mld_peer(peer, mld_peer);
dp_mld_peer_add_link_peer(mld_peer, peer);
mld_peer->txrx_peer->mld_peer = 1;
dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
} else {
peer->mld_peer = NULL;
@@ -7825,7 +7806,8 @@ static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
uint8_t *peer_mac, uint8_t sec_idx)
{
int sec_type = 0;
struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
struct dp_peer *peer =
dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
peer_mac, 0, vdev_id,
DP_MOD_ID_CDP);
@@ -7834,7 +7816,12 @@ static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
return sec_type;
}
sec_type = peer->security[sec_idx].sec_type;
if (!peer->txrx_peer) {
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
return sec_type;
}
sec_type = peer->txrx_peer->security[sec_idx].sec_type;
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return sec_type;
@@ -7854,7 +7841,7 @@ dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
{
QDF_STATUS status = QDF_STATUS_SUCCESS;
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
0, vdev_id,
DP_MOD_ID_CDP);
@@ -7863,6 +7850,8 @@ dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
status = QDF_STATUS_E_FAILURE;
} else {
peer->authorize = authorize ? 1 : 0;
if (peer->txrx_peer)
peer->txrx_peer->authorize = peer->authorize;
if (!peer->authorize)
dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
@@ -8022,11 +8011,6 @@ void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
QDF_MAC_ADDR_REF(peer->mac_addr.raw));
/*
* Deallocate the extended stats contenxt
*/
dp_peer_ext_stats_ctx_dealloc(soc, peer);
/* send peer destroy event to upper layer */
qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
QDF_MAC_ADDR_SIZE);
@@ -8082,27 +8066,17 @@ qdf_export_symbol(dp_peer_unref_delete);
/*
* dp_txrx_peer_unref_delete() - unref and delete peer
* @handle: Datapath txrx ref handle
* @mod_id: Module ID of the caller
*
*/
void dp_txrx_peer_unref_delete(dp_txrx_ref_handle *handle)
void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
enum dp_mod_id mod_id)
{
dp_peer_unref_delete((struct dp_peer *)handle, DP_MOD_ID_TX_RX);
dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
}
qdf_export_symbol(dp_txrx_peer_unref_delete);
#ifdef PEER_CACHE_RX_PKTS
static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
{
qdf_list_destroy(&peer->bufq_info.cached_bufq);
qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
}
#else
static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
{
}
#endif
/*
* dp_peer_detach_wifi3() Detach txrx peer
* @soc_hdl: soc handle
@@ -8137,8 +8111,11 @@ static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
vdev = peer->vdev;
if (!vdev)
if (!vdev) {
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return QDF_STATUS_E_FAILURE;
}
peer->valid = 0;
dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
@@ -8149,8 +8126,6 @@ static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
/* Drop all rx packets before deleting peer */
dp_clear_peer_internal(soc, peer);
dp_peer_rx_bufq_resources_deinit(peer);
qdf_spinlock_destroy(&peer->peer_info_lock);
dp_peer_multipass_list_remove(peer);
@@ -9278,25 +9253,30 @@ static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
enum cdp_peer_param_type param,
cdp_config_param_type val)
{
struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
struct dp_peer *peer =
dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
peer_mac, 0, vdev_id,
DP_MOD_ID_CDP);
struct dp_txrx_peer *txrx_peer;
if (!peer)
return QDF_STATUS_E_FAILURE;
txrx_peer = peer->txrx_peer;
if (!txrx_peer) {
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return QDF_STATUS_E_FAILURE;
}
switch (param) {
case CDP_CONFIG_NAWDS:
peer->nawds_enabled = val.cdp_peer_param_nawds;
break;
case CDP_CONFIG_NAC:
peer->nac = !!(val.cdp_peer_param_nac);
txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
break;
case CDP_CONFIG_ISOLATION:
dp_set_peer_isolation(peer, val.cdp_peer_param_isolation);
dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
break;
case CDP_CONFIG_IN_TWT:
peer->in_twt = !!(val.cdp_peer_param_in_twt);
txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
break;
default:
break;
@@ -13708,24 +13688,28 @@ QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
return status;
}
if (!peer->txrx_peer) {
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return status;
}
if (rx) {
if (peer->osif_rx) {
if (peer->txrx_peer->osif_rx) {
status = QDF_STATUS_E_ALREADY;
} else {
peer->osif_rx = rx;
peer->txrx_peer->osif_rx = rx;
status = QDF_STATUS_SUCCESS;
}
} else {
if (peer->osif_rx) {
peer->osif_rx = NULL;
if (peer->txrx_peer->osif_rx) {
peer->txrx_peer->osif_rx = NULL;
status = QDF_STATUS_SUCCESS;
} else {
status = QDF_STATUS_E_ALREADY;
}
}
peer->wds_ext.osif_peer = osif_peer;
peer->txrx_peer->wds_ext.osif_peer = osif_peer;
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return status;

View File

@@ -717,6 +717,7 @@ void dp_txrx_peer_attach_add(struct dp_soc *soc,
qdf_spin_lock_bh(&soc->peer_map_lock);
peer->txrx_peer = txrx_peer;
txrx_peer->bss_peer = peer->bss_peer;
if (peer->peer_id == HTT_INVALID_PEER) {
qdf_spin_unlock_bh(&soc->peer_map_lock);
@@ -2601,6 +2602,8 @@ dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
QDF_MAC_ADDR_SIZE) != 0) {
dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
peer->bss_peer = 1;
if (peer->txrx_peer)
peer->txrx_peer->bss_peer = 1;
}
if (peer->vdev->opmode == wlan_op_mode_sta) {
@@ -2722,6 +2725,8 @@ dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
QDF_MAC_ADDR_SIZE) != 0) {
dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
peer->bss_peer = 1;
if (peer->txrx_peer)
peer->txrx_peer->bss_peer = 1;
}
if (peer->vdev->opmode == wlan_op_mode_sta) {
@@ -3214,6 +3219,7 @@ QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
void *hw_qdesc_vaddr;
uint32_t alloc_tries = 0;
QDF_STATUS status = QDF_STATUS_SUCCESS;
struct dp_txrx_peer *txrx_peer;
if (!qdf_atomic_read(&peer->is_default_route_set))
return QDF_STATUS_E_FAILURE;
@@ -3299,11 +3305,13 @@ try_desc_alloc:
}
rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
txrx_peer = dp_get_txrx_peer(peer);
/* TODO: Ensure that sec_type is set before ADDBA is received.
* Currently this is set based on htt indication
* HTT_T2H_MSG_TYPE_SEC_IND from target
*/
switch (peer->security[dp_sec_ucast].sec_type) {
switch (txrx_peer->security[dp_sec_ucast].sec_type) {
case cdp_sec_type_tkip_nomic:
case cdp_sec_type_aes_ccmp:
case cdp_sec_type_aes_ccmp_256:
@@ -3717,6 +3725,21 @@ static void dp_peer_rx_tids_init(struct dp_peer *peer)
{
int tid;
struct dp_rx_tid *rx_tid;
struct dp_rx_tid_defrag *rx_tid_defrag;
if (!IS_MLO_DP_LINK_PEER(peer)) {
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
rx_tid_defrag->array = &rx_tid_defrag->base;
rx_tid_defrag->defrag_timeout_ms = 0;
rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
rx_tid_defrag->base.head = NULL;
rx_tid_defrag->base.tail = NULL;
rx_tid_defrag->defrag_peer = peer->txrx_peer;
}
}
/* if not first assoc link peer,
* not to initialize rx_tids again.
@@ -3726,18 +3749,9 @@ static void dp_peer_rx_tids_init(struct dp_peer *peer)
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
rx_tid = &peer->rx_tid[tid];
rx_tid->array = &rx_tid->base;
rx_tid->base.head = NULL;
rx_tid->base.tail = NULL;
rx_tid->tid = tid;
rx_tid->defrag_timeout_ms = 0;
rx_tid->ba_win_size = 0;
rx_tid->ba_status = DP_RX_BA_INACTIVE;
rx_tid->defrag_waitlist_elem.tqe_next = NULL;
rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
rx_tid->defrag_peer =
IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
}
}
#else
@@ -3745,20 +3759,23 @@ static void dp_peer_rx_tids_init(struct dp_peer *peer)
{
int tid;
struct dp_rx_tid *rx_tid;
struct dp_rx_tid_defrag *rx_tid_defrag;
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
rx_tid = &peer->rx_tid[tid];
rx_tid->array = &rx_tid->base;
rx_tid->base.head = NULL;
rx_tid->base.tail = NULL;
rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
rx_tid->tid = tid;
rx_tid->defrag_timeout_ms = 0;
rx_tid->ba_win_size = 0;
rx_tid->ba_status = DP_RX_BA_INACTIVE;
rx_tid->defrag_waitlist_elem.tqe_next = NULL;
rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
rx_tid->defrag_peer = peer;
rx_tid_defrag->base.head = NULL;
rx_tid_defrag->base.tail = NULL;
rx_tid_defrag->array = &rx_tid_defrag->base;
rx_tid_defrag->defrag_timeout_ms = 0;
rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
rx_tid_defrag->defrag_peer = peer->txrx_peer;
}
}
#endif
@@ -3795,8 +3812,10 @@ void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
* Set security defaults: no PN check, no security. The target may
* send a HTT SEC_IND message to overwrite these defaults.
*/
peer->security[dp_sec_ucast].sec_type =
peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
if (peer->txrx_peer)
peer->txrx_peer->security[dp_sec_ucast].sec_type =
peer->txrx_peer->security[dp_sec_mcast].sec_type =
cdp_sec_type_none;
}
/*
@@ -3810,20 +3829,25 @@ void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
int tid;
uint32_t tid_delete_mask = 0;
dp_info("Remove tids for peer: %pK", peer);
if (IS_MLO_DP_LINK_PEER(peer))
if (!peer->txrx_peer)
return;
dp_info("Remove tids for peer: %pK", peer);
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
struct dp_rx_tid_defrag *defrag_rx_tid =
&peer->txrx_peer->rx_tid[tid];
qdf_spin_lock_bh(&rx_tid->tid_lock);
qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
/* Cleanup defrag related resource */
dp_rx_defrag_waitlist_remove(peer, tid);
dp_rx_reorder_flush_frag(peer, tid);
dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
}
qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
qdf_spin_lock_bh(&rx_tid->tid_lock);
if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
dp_rx_tid_delete_wifi3(peer, tid);
@@ -4511,7 +4535,8 @@ dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
uint8_t *peer_mac, enum cdp_sec_type sec_type,
bool is_unicast)
{
struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
struct dp_peer *peer =
dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
peer_mac, 0, vdev_id,
DP_MOD_ID_CDP);
int sec_index;
@@ -4521,12 +4546,18 @@ dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
return QDF_STATUS_E_FAILURE;
}
if (!peer->txrx_peer) {
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
return QDF_STATUS_E_FAILURE;
}
dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
is_unicast ? "ucast" : "mcast", sec_type);
sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
peer->security[sec_index].sec_type = sec_type;
peer->txrx_peer->security[sec_index].sec_type = sec_type;
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
@@ -4540,6 +4571,7 @@ dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
u_int32_t *rx_pn)
{
struct dp_peer *peer;
struct dp_txrx_peer *txrx_peer;
int sec_index;
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
@@ -4548,22 +4580,30 @@ dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
peer_id);
return;
}
txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer) {
dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
peer_id);
return;
}
dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
is_unicast ? "ucast" : "mcast", sec_type);
sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
peer->security[sec_index].sec_type = sec_type;
peer->txrx_peer->security[sec_index].sec_type = sec_type;
#ifdef notyet /* TODO: See if this is required for defrag support */
/* michael key only valid for TKIP, but for simplicity,
* copy it anyway
*/
qdf_mem_copy(
&peer->security[sec_index].michael_key[0],
&peer->txrx_peer->security[sec_index].michael_key[0],
michael_key,
sizeof(peer->security[sec_index].michael_key));
sizeof(peer->txrx_peer->security[sec_index].michael_key));
#ifdef BIG_ENDIAN_HOST
OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
sizeof(peer->security[sec_index].michael_key));
OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
sizeof(peer->txrx_peer->security[sec_index].michael_key));
#endif /* BIG_ENDIAN_HOST */
#endif
@@ -4783,13 +4823,16 @@ QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
peer->state = state;
peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
if (peer->txrx_peer)
peer->txrx_peer->authorize = peer->authorize;
dp_peer_info("peer" QDF_MAC_ADDR_FMT "state %d",
QDF_MAC_ADDR_REF(peer->mac_addr.raw),
peer->state);
if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
peer->mld_peer->state = peer->state;
peer->mld_peer->authorize = peer->authorize;
peer->mld_peer->txrx_peer->authorize = peer->authorize;
dp_peer_info("mld peer" QDF_MAC_ADDR_FMT "state %d",
QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
peer->mld_peer->state);
@@ -4842,6 +4885,9 @@ QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
peer->state = state;
peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
if (peer->txrx_peer)
peer->txrx_peer->authorize = peer->authorize;
dp_info("peer %pK state %d", peer, peer->state);
/* ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it here.
@@ -5185,7 +5231,8 @@ dp_set_michael_key(struct cdp_soc_t *soc,
bool is_unicast, uint32_t *key)
{
uint8_t sec_index = is_unicast ? 1 : 0;
struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
struct dp_peer *peer =
dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
peer_mac, 0, vdev_id,
DP_MOD_ID_CDP);
@@ -5194,7 +5241,7 @@ dp_set_michael_key(struct cdp_soc_t *soc,
return QDF_STATUS_E_FAILURE;
}
qdf_mem_copy(&peer->security[sec_index].michael_key[0],
qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
key, IEEE80211_WEP_MICLEN);
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
@@ -5366,26 +5413,33 @@ void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
uint8_t *peer_mac)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
vdev_id, DP_MOD_ID_CDP);
struct dp_rx_tid *rx_tid;
struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
vdev_id,
DP_MOD_ID_CDP);
struct dp_txrx_peer *txrx_peer;
uint8_t tid;
struct dp_rx_tid_defrag *defrag_rx_tid;
if (!peer)
return;
if (!peer->txrx_peer)
goto fail;
dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
QDF_MAC_ADDR_REF(peer->mac_addr.raw));
txrx_peer = peer->txrx_peer;
for (tid = 0; tid < DP_MAX_TIDS; tid++) {
rx_tid = &peer->rx_tid[tid];
defrag_rx_tid = &txrx_peer->rx_tid[tid];
qdf_spin_lock_bh(&rx_tid->tid_lock);
dp_rx_defrag_waitlist_remove(peer, tid);
dp_rx_reorder_flush_frag(peer, tid);
qdf_spin_unlock_bh(&rx_tid->tid_lock);
qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
dp_rx_defrag_waitlist_remove(txrx_peer, tid);
dp_rx_reorder_flush_frag(txrx_peer, tid);
qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
}
fail:
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
}

View File

@@ -37,6 +37,9 @@
#define DP_PEER_HASH_LOAD_MULT 2
#define DP_PEER_HASH_LOAD_SHIFT 0
/* Threshold for peer's cached buf queue beyond which frames are dropped */
#define DP_RX_CACHED_BUFQ_THRESH 64
#define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
#define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
#define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
@@ -66,7 +69,7 @@ struct ast_del_ctxt {
typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
void *arg);
void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
void dp_txrx_peer_unref_delete(dp_txrx_ref_handle *handle);
void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
uint8_t *peer_mac_addr,
int mac_addr_is_aligned,
@@ -166,21 +169,28 @@ struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
* @soc : core DP soc context
* @peer_id : peer id from peer object can be retrieved
* @handle : reference handle
* @mod_id : ID ot module requesting reference
*
* Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
*/
static inline struct dp_txrx_peer *
dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
uint16_t peer_id,
dp_txrx_ref_handle *handle)
dp_txrx_ref_handle *handle,
enum dp_mod_id mod_id)
{
struct dp_peer *peer;
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_RX);
peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
if (!peer)
return NULL;
if (!peer->txrx_peer) {
dp_peer_unref_delete(peer, mod_id);
return NULL;
}
*handle = (dp_txrx_ref_handle)peer;
return peer->txrx_peer;
}
@@ -1064,6 +1074,10 @@ void dp_peer_delete(struct dp_soc *soc,
void *arg);
#ifdef WLAN_FEATURE_11BE_MLO
/* is MLO connection mld peer */
#define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
/* set peer type */
#define DP_PEER_SET_TYPE(_peer, _type_val) \
((_peer)->peer_type = (_type_val))
@@ -1530,7 +1544,45 @@ bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
else
return false;
}
/**
* dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
*
* @soc : core DP soc context
* @peer_id : peer id from peer object can be retrieved
* @handle : reference handle
* @mod_id : ID ot module requesting reference
*
* Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
*/
static inline struct dp_txrx_peer *
dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
uint16_t peer_id,
dp_txrx_ref_handle *handle,
enum dp_mod_id mod_id)
{
struct dp_peer *peer;
struct dp_txrx_peer *txrx_peer;
peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
if (!peer)
return NULL;
txrx_peer = dp_get_txrx_peer(peer);
if (txrx_peer) {
*handle = (dp_txrx_ref_handle)peer;
return txrx_peer;
}
dp_peer_unref_delete(peer, mod_id);
return NULL;
}
#else
#define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
#define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
/* is legacy peer */
#define IS_DP_LEGACY_PEER(_peer) true
@@ -1637,6 +1689,27 @@ bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
{
return true;
}
/**
* dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
*
* @soc : core DP soc context
* @peer_id : peer id from peer object can be retrieved
* @handle : reference handle
* @mod_id : ID ot module requesting reference
*
* Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
*/
static inline struct dp_txrx_peer *
dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
uint16_t peer_id,
dp_txrx_ref_handle *handle,
enum dp_mod_id mod_id)
{
return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
}
#endif /* WLAN_FEATURE_11BE_MLO */
#if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
@@ -1719,6 +1792,56 @@ void dp_peer_rx_tids_destroy(struct dp_peer *peer)
peer->rx_tid = NULL;
}
static inline
void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
{
uint8_t i;
qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
sizeof(struct dp_rx_tid_defrag));
for (i = 0; i < DP_MAX_TIDS; i++)
qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
}
static inline
void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
{
uint8_t i;
for (i = 0; i < DP_MAX_TIDS; i++)
qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
}
#ifdef PEER_CACHE_RX_PKTS
static inline
void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
{
qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
DP_RX_CACHED_BUFQ_THRESH);
}
static inline
void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
{
qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
}
#else
static inline
void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
{
}
static inline
void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
{
}
#endif
#ifdef REO_SHARED_QREF_TABLE_EN
void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
struct dp_peer *peer);

View File

@@ -1665,6 +1665,15 @@ void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
int num_buff_elem;
QDF_STATUS status;
if (!peer->txrx_peer) {
if (!peer->sta_self_peer) {
qdf_err("txrx_peer NULL!!");
qdf_assert_always(0);
}
return;
}
if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
qdf_atomic_dec(&peer->flush_in_progress);
return;

View File

@@ -199,6 +199,8 @@ struct dp_mon_mpdu;
#ifdef BE_PKTLOG_SUPPORT
struct dp_mon_filter_be;
#endif
struct dp_peer;
struct dp_txrx_peer;
/**
* enum for DP peer state
@@ -764,6 +766,38 @@ struct dp_reo_cmd_info {
TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
};
/* Rx TID defrag*/
struct dp_rx_tid_defrag {
/* TID */
int tid;
/* only used for defrag right now */
TAILQ_ENTRY(dp_rx_tid_defrag) defrag_waitlist_elem;
/* Store dst desc for reinjection */
hal_ring_desc_t dst_ring_desc;
struct dp_rx_desc *head_frag_desc;
/* Sequence and fragments that are being processed currently */
uint32_t curr_seq_num;
uint32_t curr_frag_num;
/* TODO: Check the following while adding defragmentation support */
struct dp_rx_reorder_array_elem *array;
/* base - single rx reorder element used for non-aggr cases */
struct dp_rx_reorder_array_elem base;
/* rx_tid lock */
qdf_spinlock_t defrag_tid_lock;
/* head PN number */
uint64_t pn128[2];
uint32_t defrag_timeout_ms;
/* defrag usage only, dp_peer pointer related with this tid */
struct dp_txrx_peer *defrag_peer;
};
/* Rx TID */
struct dp_rx_tid {
/* TID */
@@ -801,35 +835,14 @@ struct dp_rx_tid {
/* Starting sequence number in Addba request */
uint16_t startseqnum;
/* TODO: Check the following while adding defragmentation support */
struct dp_rx_reorder_array_elem *array;
/* base - single rx reorder element used for non-aggr cases */
struct dp_rx_reorder_array_elem base;
/* only used for defrag right now */
TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
/* Store dst desc for reinjection */
hal_ring_desc_t dst_ring_desc;
struct dp_rx_desc *head_frag_desc;
/* rx_tid lock */
qdf_spinlock_t tid_lock;
/* Sequence and fragments that are being processed currently */
uint32_t curr_seq_num;
uint32_t curr_frag_num;
/* head PN number */
uint64_t pn128[2];
uint32_t defrag_timeout_ms;
uint16_t dialogtoken;
uint16_t statuscode;
/* user defined ADDBA response status code */
uint16_t userstatuscode;
/* rx_tid lock */
qdf_spinlock_t tid_lock;
/* Store ppdu_id when 2k exception is received */
uint32_t ppdu_id_2k;
@@ -851,9 +864,6 @@ struct dp_rx_tid {
/* Peer TID statistics */
struct cdp_peer_tid_stats stats;
/* defrag usage only, dp_peer pointer related with this tid */
struct dp_peer *defrag_peer;
};
/**
@@ -1725,7 +1735,6 @@ struct dp_arch_ops {
uint8_t bm_id);
uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
uint32_t peer_metadata);
/* Control Arch Ops */
QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
struct dp_vdev *vdev,
@@ -2010,7 +2019,7 @@ struct dp_soc {
/* rx defrag state TBD: do we need this per radio? */
struct {
struct {
TAILQ_HEAD(, dp_rx_tid) waitlist;
TAILQ_HEAD(, dp_rx_tid_defrag) waitlist;
uint32_t timeout_ms;
uint32_t next_flush_ms;
qdf_spinlock_t defrag_lock;
@@ -3073,7 +3082,7 @@ struct dp_vdev {
uint16_t *iv_vlan_map;
/* dp_peer special list */
TAILQ_HEAD(, dp_peer) mpass_peer_list;
TAILQ_HEAD(, dp_txrx_peer) mpass_peer_list;
DP_MUTEX_TYPE mpass_peer_mutex;
#endif
/* Extended data path handle */
@@ -3342,6 +3351,7 @@ struct dp_mld_link_peers {
typedef void *dp_txrx_ref_handle;
/* Peer structure for per packet path usage */
struct dp_txrx_peer {
/* Core TxRx Peer */
@@ -3350,6 +3360,41 @@ struct dp_txrx_peer {
/* peer ID for this peer */
uint16_t peer_id;
uint8_t authorize:1, /* Set when authorized */
in_twt:1, /* in TWT session */
hw_txrx_stats_en:1, /*Indicate HW offload vdev stats */
mld_peer:1; /* MLD peer*/
uint32_t tx_failed;
struct {
enum cdp_sec_type sec_type;
u_int32_t michael_key[2]; /* relevant for TKIP */
} security[2]; /* 0 -> multicast, 1 -> unicast */
uint16_t nawds_enabled:1, /* NAWDS flag */
bss_peer:1, /* set for bss peer */
isolation:1, /* enable peer isolation for this peer */
wds_enabled:1; /* WDS peer */
#ifdef WDS_VENDOR_EXTENSION
dp_ecm_policy wds_ecm;
#endif
#ifdef PEER_CACHE_RX_PKTS
qdf_atomic_t flush_in_progress;
struct dp_peer_cached_bufq bufq_info;
#endif
#ifdef QCA_MULTIPASS_SUPPORT
/* node in the special peer list element */
TAILQ_ENTRY(dp_txrx_peer) mpass_peer_list_elem;
/* vlan id for key */
uint16_t vlan_id;
#endif
#ifdef QCA_SUPPORT_WDS_EXTENDED
struct dp_wds_ext_peer wds_ext;
ol_txrx_rx_fp osif_rx;
#endif
struct dp_rx_tid_defrag rx_tid[DP_MAX_TIDS];
};
/* Peer structure for data path state */
@@ -3358,6 +3403,9 @@ struct dp_peer {
#ifdef WIFI_MONITOR_SUPPORT
struct dp_mon_peer *monitor_peer;
#endif
/* peer ID for this peer */
uint16_t peer_id;
/* VDEV to which this peer is associated */
struct dp_vdev *vdev;
@@ -3365,9 +3413,6 @@ struct dp_peer {
qdf_atomic_t ref_cnt;
/* peer ID for this peer */
uint16_t peer_id;
union dp_align_mac_addr mac_addr;
/* node in the vdev's list of peers */
@@ -3386,28 +3431,17 @@ struct dp_peer {
} security[2]; /* 0 -> multicast, 1 -> unicast */
/* NAWDS Flag and Bss Peer bit */
uint16_t nawds_enabled:1, /* NAWDS flag */
bss_peer:1, /* set for bss peer */
wds_enabled:1, /* WDS peer */
uint16_t bss_peer:1, /* set for bss peer */
authorize:1, /* Set when authorized */
nac:1, /* NAC Peer*/
tx_cap_enabled:1, /* Peer's tx-capture is enabled */
rx_cap_enabled:1, /* Peer's rx-capture is enabled */
valid:1, /* valid bit */
in_twt:1, /* in TWT session */
delete_in_progress:1, /* Indicate kickout sent */
sta_self_peer:1, /* Indicate STA self peer */
hw_txrx_stats_en:1; /*Indicate HW offload vdev stats */
sta_self_peer:1; /* Indicate STA self peer */
#ifdef WLAN_FEATURE_11BE_MLO
uint8_t first_link:1, /* first link peer for MLO */
primary_link:1; /* primary link for MLO */
#endif
#ifdef QCA_SUPPORT_PEER_ISOLATION
bool isolation; /* enable peer isolation for this peer */
#endif
/* MCL specific peer local id */
uint16_t local_id;
enum ol_txrx_peer_state state;
@@ -3422,10 +3456,6 @@ struct dp_peer {
TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
/* TBD */
#ifdef WDS_VENDOR_EXTENSION
dp_ecm_policy wds_ecm;
#endif
/* Active Block ack sessions */
uint16_t active_ba_session_cnt;
@@ -3438,27 +3468,12 @@ struct dp_peer {
*/
uint8_t kill_256_sessions;
qdf_atomic_t is_default_route_set;
/* Peer level flag to check peer based pktlog enabled or
* disabled
*/
uint8_t peer_based_pktlog_filter;
/* rdk statistics context */
struct cdp_peer_rate_stats_ctx *rdkstats_ctx;
/* average sojourn time */
qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
#ifdef QCA_MULTIPASS_SUPPORT
/* node in the special peer list element */
TAILQ_ENTRY(dp_peer) mpass_peer_list_elem;
/* vlan id for key */
uint16_t vlan_id;
#endif
#ifdef PEER_CACHE_RX_PKTS
qdf_atomic_t flush_in_progress;
struct dp_peer_cached_bufq bufq_info;
#endif
#ifdef QCA_PEER_MULTIQ_SUPPORT
struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
#endif
@@ -3478,11 +3493,6 @@ struct dp_peer {
struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
bool mscs_active;
#endif
#ifdef QCA_SUPPORT_WDS_EXTENDED
struct dp_wds_ext_peer wds_ext;
ol_txrx_rx_fp osif_rx;
#endif
#ifdef WLAN_SUPPORT_MESH_LATENCY
struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
#endif

View File

@@ -1814,7 +1814,7 @@ dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
return QDF_STATUS_E_FAILURE;
}
peer->peer_based_pktlog_filter = enb_dsb;
peer->monitor_peer->peer_based_pktlog_filter = enb_dsb;
mon_pdev->dp_peer_based_pktlog = enb_dsb;
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);

View File

@@ -471,7 +471,7 @@ struct dp_mon_ops {
QDF_STATUS (*mon_tx_add_to_comp_queue)(struct dp_soc *soc,
struct dp_tx_desc_s *desc,
struct hal_tx_completion_status *ts,
struct dp_peer *peer);
uint16_t peer_id);
QDF_STATUS (*mon_update_msdu_to_list)(struct dp_soc *soc,
struct dp_pdev *pdev,
struct dp_peer *peer,
@@ -876,6 +876,13 @@ struct dp_mon_peer {
/* delayed ba ppdu id */
uint32_t last_delayed_ba_ppduid;
#endif
/* Peer level flag to check peer based pktlog enabled or
* disabled
*/
uint8_t peer_based_pktlog_filter;
uint8_t tx_cap_enabled:1, /* Peer's tx-capture is enabled */
rx_cap_enabled:1; /* Peer's rx-capture is enabled */
};
#if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
@@ -2165,7 +2172,7 @@ static inline void dp_monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev,
* @soc: point to soc
* @desc: point to tx desc
* @ts: Tx completion status from HAL/HTT descriptor
* @peer: DP peer
* @peer id: DP peer id
*
* Return: QDF_STATUS
*
@@ -2174,7 +2181,7 @@ static inline
QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc,
struct dp_tx_desc_s *desc,
struct hal_tx_completion_status *ts,
struct dp_peer *peer)
uint16_t peer_id)
{
struct dp_mon_ops *monitor_ops;
struct dp_mon_soc *mon_soc = soc->monitor_soc;
@@ -2190,7 +2197,7 @@ QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc,
return QDF_STATUS_E_FAILURE;
}
return monitor_ops->mon_tx_add_to_comp_queue(soc, desc, ts, peer);
return monitor_ops->mon_tx_add_to_comp_queue(soc, desc, ts, peer_id);
}
static inline
@@ -2306,7 +2313,7 @@ static inline
QDF_STATUS dp_monitor_tx_add_to_comp_queue(struct dp_soc *soc,
struct dp_tx_desc_s *desc,
struct hal_tx_completion_status *ts,
struct dp_peer *peer)
uint16_t peer_id)
{
return QDF_STATUS_E_FAILURE;
}

View File

@@ -1659,7 +1659,7 @@ dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
return;
if ((peer->peer_id != HTT_INVALID_PEER) &&
(peer->peer_based_pktlog_filter)) {
(peer->monitor_peer->peer_based_pktlog_filter)) {
dp_wdi_event_handler(
WDI_EVENT_RX_DESC, soc,
status_nbuf,