qcacmn: use txrx_peer in wds APIs

use txrx_peer in wds APIs instead of main dp peer

Change-Id: I3d05de8c29c1ee012405ca2ab22aec3876956bf1
This commit is contained in:
Pavankumar Nandeshwar
2021-12-15 03:26:48 -08:00
committed by Madan Koyyalamudi
parent 04bb406179
commit 826ec6b4f5
2 changed files with 154 additions and 85 deletions

View File

@@ -226,24 +226,25 @@ void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
* *
* @soc: core txrx main context * @soc: core txrx main context
* @rx_tlv_hdr : start address of rx tlvs * @rx_tlv_hdr : start address of rx tlvs
* @ta_peer : Transmitter peer entry * @ta_txrx_peer: Transmitter peer entry
* @nbuf : nbuf to retrieve destination mac for which AST will be added * @nbuf : nbuf to retrieve destination mac for which AST will be added
* *
*/ */
void void
dp_rx_da_learn(struct dp_soc *soc, dp_rx_da_learn(struct dp_soc *soc,
uint8_t *rx_tlv_hdr, uint8_t *rx_tlv_hdr,
struct dp_peer *ta_peer, struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf) qdf_nbuf_t nbuf)
{ {
struct dp_peer *base_peer;
/* For HKv2 DA port learing is not needed */ /* For HKv2 DA port learing is not needed */
if (qdf_likely(soc->ast_override_support)) if (qdf_likely(soc->ast_override_support))
return; return;
if (qdf_unlikely(!ta_peer)) if (qdf_unlikely(!ta_txrx_peer))
return; return;
if (qdf_unlikely(ta_peer->vdev->opmode != wlan_op_mode_ap)) if (qdf_unlikely(ta_txrx_peer->vdev->opmode != wlan_op_mode_ap))
return; return;
if (!soc->da_war_enabled) if (!soc->da_war_enabled)
@@ -251,11 +252,18 @@ dp_rx_da_learn(struct dp_soc *soc,
if (qdf_unlikely(!qdf_nbuf_is_da_valid(nbuf) && if (qdf_unlikely(!qdf_nbuf_is_da_valid(nbuf) &&
!qdf_nbuf_is_da_mcbc(nbuf))) { !qdf_nbuf_is_da_mcbc(nbuf))) {
base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
DP_MOD_ID_AST);
if (base_peer) {
dp_peer_add_ast(soc, dp_peer_add_ast(soc,
ta_peer, base_peer,
qdf_nbuf_data(nbuf), qdf_nbuf_data(nbuf),
CDP_TXRX_AST_TYPE_DA, CDP_TXRX_AST_TYPE_DA,
DP_AST_FLAGS_HM); DP_AST_FLAGS_HM);
dp_peer_unref_delete(base_peer, DP_MOD_ID_AST);
}
} }
} }
@@ -286,10 +294,10 @@ dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST); peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
if (peer) { if (peer) {
peer->wds_ecm.wds_rx_filter = 1; peer->txrx_peer->wds_ecm.wds_rx_filter = 1;
peer->wds_ecm.wds_rx_ucast_4addr = peer->txrx_peer->wds_ecm.wds_rx_ucast_4addr =
(val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0; (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0;
peer->wds_ecm.wds_rx_mcast_4addr = peer->txrx_peer->wds_ecm.wds_rx_mcast_4addr =
(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0; (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
dp_peer_unref_delete(peer, DP_MOD_ID_AST); dp_peer_unref_delete(peer, DP_MOD_ID_AST);
} }
@@ -314,7 +322,8 @@ dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc, uint8_t vdev_id,
uint8_t *peer_mac, int wds_tx_ucast, uint8_t *peer_mac, int wds_tx_ucast,
int wds_tx_mcast) int wds_tx_mcast)
{ {
struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc, struct dp_peer *peer =
dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
peer_mac, 0, peer_mac, 0,
vdev_id, vdev_id,
DP_MOD_ID_AST); DP_MOD_ID_AST);
@@ -325,14 +334,19 @@ dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc, uint8_t vdev_id,
return QDF_STATUS_E_INVAL; return QDF_STATUS_E_INVAL;
} }
if (!peer->txrx_peer) {
dp_peer_unref_delete(peer, DP_MOD_ID_AST);
return QDF_STATUS_E_INVAL;
}
if (wds_tx_ucast || wds_tx_mcast) { if (wds_tx_ucast || wds_tx_mcast) {
peer->wds_enabled = 1; peer->txrx_peer->wds_enabled = 1;
peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast; peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast; peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
} else { } else {
peer->wds_enabled = 0; peer->txrx_peer->wds_enabled = 0;
peer->wds_ecm.wds_tx_ucast_4addr = 0; peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr = 0;
peer->wds_ecm.wds_tx_mcast_4addr = 0; peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr = 0;
} }
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
@@ -341,10 +355,10 @@ dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc, uint8_t vdev_id,
"peer->wds_enabled %d\n", peer->wds_enabled); "peer->wds_enabled %d\n", peer->wds_enabled);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"peer->wds_ecm.wds_tx_ucast_4addr %d\n", "peer->wds_ecm.wds_tx_ucast_4addr %d\n",
peer->wds_ecm.wds_tx_ucast_4addr); peer->txrx_peer->wds_ecm.wds_tx_ucast_4addr);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"peer->wds_ecm.wds_tx_mcast_4addr %d\n", "peer->wds_ecm.wds_tx_mcast_4addr %d\n",
peer->wds_ecm.wds_tx_mcast_4addr); peer->txrx_peer->wds_ecm.wds_tx_mcast_4addr);
dp_peer_unref_delete(peer, DP_MOD_ID_AST); dp_peer_unref_delete(peer, DP_MOD_ID_AST);
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
@@ -352,7 +366,7 @@ dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc, uint8_t vdev_id,
int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
struct dp_vdev *vdev, struct dp_vdev *vdev,
struct dp_peer *peer) struct dp_txrx_peer *txrx_peer)
{ {
struct dp_peer *bss_peer; struct dp_peer *bss_peer;
int fr_ds, to_ds, rx_3addr, rx_4addr; int fr_ds, to_ds, rx_3addr, rx_4addr;
@@ -363,19 +377,19 @@ int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
if (vdev->opmode == wlan_op_mode_ap) { if (vdev->opmode == wlan_op_mode_ap) {
bss_peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST); bss_peer = dp_vdev_bss_peer_ref_n_get(vdev, DP_MOD_ID_AST);
/* if wds policy check is not enabled on this vdev, accept all frames */ /* if wds policy check is not enabled on this vdev, accept all frames */
if (bss_peer && !bss_peer->wds_ecm.wds_rx_filter) { if (bss_peer && !bss_peer->txrx_peer->wds_ecm.wds_rx_filter) {
dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST); dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
return 1; return 1;
} }
rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; rx_policy_ucast = bss_peer->txrx_peerwds_ecm.wds_rx_ucast_4addr;
rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; rx_policy_mcast = bss_peer->txrx_peerwds_ecm.wds_rx_mcast_4addr;
dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST); dp_peer_unref_delete(bss_peer, DP_MOD_ID_AST);
} else { /* sta mode */ } else { /* sta mode */
if (!peer->wds_ecm.wds_rx_filter) { if (!txrx_peer->wds_ecm.wds_rx_filter)
return 1; return 1;
}
rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; rx_policy_ucast = txrx_peer->wds_ecm.wds_rx_ucast_4addr;
rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; rx_policy_mcast = txrx_peer->wds_ecm.wds_rx_mcast_4addr;
} }
/* ------------------------------------------------ /* ------------------------------------------------
@@ -413,9 +427,11 @@ int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
rx_4addr = fr_ds & to_ds; rx_4addr = fr_ds & to_ds;
if (vdev->opmode == wlan_op_mode_ap) { if (vdev->opmode == wlan_op_mode_ap) {
if ((!peer->wds_enabled && rx_3addr && to_ds) || if ((!txrx_peer->wds_enabled && rx_3addr && to_ds) ||
(peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || (txrx_peer->wds_enabled && !rx_mcast &&
(peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { (rx_4addr == rx_policy_ucast)) ||
(txrx_peer->wds_enabled && rx_mcast &&
(rx_4addr == rx_policy_mcast))) {
return 1; return 1;
} }
} else { /* sta mode */ } else { /* sta mode */
@@ -494,6 +510,7 @@ static
uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev, uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t buf, uint16_t *vlan_id) qdf_nbuf_t buf, uint16_t *vlan_id)
{ {
struct dp_txrx_peer *txrx_peer = NULL;
struct dp_peer *peer = NULL; struct dp_peer *peer = NULL;
qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf); qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
struct vlan_ethhdr *veh = NULL; struct vlan_ethhdr *veh = NULL;
@@ -508,9 +525,9 @@ uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) { if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
qdf_spin_lock_bh(&vdev->mpass_peer_mutex); qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(peer, &vdev->mpass_peer_list, TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
mpass_peer_list_elem) { mpass_peer_list_elem) {
if (*vlan_id == peer->vlan_id) { if (*vlan_id == txrx_peer->vlan_id) {
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex); qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
return DP_VLAN_TAGGED_MULTICAST; return DP_VLAN_TAGGED_MULTICAST;
} }
@@ -529,7 +546,7 @@ uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
* Do not drop the frame when vlan_id doesn't match. * Do not drop the frame when vlan_id doesn't match.
* Send the frame as it is. * Send the frame as it is.
*/ */
if (*vlan_id == peer->vlan_id) { if (*vlan_id == peer->txrx_peer->vlan_id) {
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS); dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
return DP_VLAN_TAGGED_UNICAST; return DP_VLAN_TAGGED_UNICAST;
} }
@@ -617,7 +634,7 @@ bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
/** /**
* dp_rx_multipass_process - insert vlan tag on frames for traffic separation * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
* @vdev: DP vdev handle * @txrx_peer: DP txrx peer handle
* @nbuf: skb * @nbuf: skb
* @tid: traffic priority * @tid: traffic priority
* *
@@ -629,11 +646,12 @@ bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
* i. Frame comes from multipass peer but doesn't contain vlan header. * i. Frame comes from multipass peer but doesn't contain vlan header.
* In failure case, drop such frames. * In failure case, drop such frames.
*/ */
bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid) bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
uint8_t tid)
{ {
struct vlan_ethhdr *vethhdrp; struct vlan_ethhdr *vethhdrp;
if (qdf_unlikely(!peer->vlan_id)) if (qdf_unlikely(!txrx_peer->vlan_id))
return true; return true;
vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf); vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf);
@@ -648,10 +666,10 @@ bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
return false; return false;
vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) | vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
(peer->vlan_id & VLAN_VID_MASK)); (txrx_peer->vlan_id & VLAN_VID_MASK));
if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE)) if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE))
dp_tx_remove_vlan_tag(peer->vdev, nbuf); dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf);
return true; return true;
} }
@@ -671,14 +689,15 @@ bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
void dp_peer_multipass_list_remove(struct dp_peer *peer) void dp_peer_multipass_list_remove(struct dp_peer *peer)
{ {
struct dp_vdev *vdev = peer->vdev; struct dp_vdev *vdev = peer->vdev;
struct dp_peer *tpeer = NULL; struct dp_txrx_peer *tpeer = NULL;
bool found = 0; bool found = 0;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex); qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) { TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
if (tpeer == peer) { if (tpeer == peer->txrx_peer) {
found = 1; found = 1;
TAILQ_REMOVE(&vdev->mpass_peer_list, peer, mpass_peer_list_elem); TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
mpass_peer_list_elem);
break; break;
} }
} }
@@ -702,7 +721,8 @@ static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
uint8_t vdev_id, uint16_t vlan_id) uint8_t vdev_id, uint16_t vlan_id)
{ {
struct dp_peer *peer = struct dp_peer *peer =
dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id, dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
vdev_id,
DP_MOD_ID_TX_MULTIPASS); DP_MOD_ID_TX_MULTIPASS);
if (qdf_unlikely(!peer)) { if (qdf_unlikely(!peer)) {
@@ -710,27 +730,36 @@ static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
return; return;
} }
if (qdf_unlikely(!peer->txrx_peer))
goto fail;
/* If peer already exists in vdev multipass list, do not add it. /* If peer already exists in vdev multipass list, do not add it.
* This may happen if key install comes twice or re-key * This may happen if key install comes twice or re-key
* happens for a peer. * happens for a peer.
*/ */
if (peer->vlan_id) { if (peer->txrx_peer->vlan_id) {
dp_debug("peer already added to vdev multipass list" dp_debug("peer already added to vdev multipass list"
"MAC: "QDF_MAC_ADDR_FMT" vlan: %d ", "MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->vlan_id); QDF_MAC_ADDR_REF(peer->mac_addr.raw),
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS); peer->txrx_peer->vlan_id);
return; goto fail;
} }
/* /*
* Ref_cnt is incremented inside dp_peer_find_hash_find(). * Ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it when element is deleted from the list. * Decrement it when element is deleted from the list.
*/ */
peer->vlan_id = vlan_id; peer->txrx_peer->vlan_id = vlan_id;
qdf_spin_lock_bh(&peer->vdev->mpass_peer_mutex); qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
TAILQ_INSERT_HEAD(&peer->vdev->mpass_peer_list, peer, TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
peer->txrx_peer,
mpass_peer_list_elem); mpass_peer_list_elem);
qdf_spin_unlock_bh(&peer->vdev->mpass_peer_mutex); qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
return;
fail:
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
return;
} }
/** /**
@@ -817,11 +846,10 @@ fail:
*/ */
void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev) void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
{ {
struct dp_peer *peer = NULL; struct dp_txrx_peer *txrx_peer = NULL;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex); qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(peer, &vdev->mpass_peer_list, mpass_peer_list_elem) TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list, mpass_peer_list_elem)
qdf_err("Peers present in mpass list :" QDF_MAC_ADDR_FMT, qdf_err("Peers present in mpass list : %d", txrx_peer->peer_id);
QDF_MAC_ADDR_REF(peer->mac_addr.raw));
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex); qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
if (vdev->iv_vlan_map) { if (vdev->iv_vlan_map) {
@@ -1175,7 +1203,7 @@ void dp_hmwds_ast_add_notify(struct dp_peer *peer,
QDF_STATUS QDF_STATUS
dp_get_completion_indication_for_stack(struct dp_soc *soc, dp_get_completion_indication_for_stack(struct dp_soc *soc,
struct dp_pdev *pdev, struct dp_pdev *pdev,
struct dp_peer *peer, struct dp_txrx_peer *txrx_peer,
struct hal_tx_completion_status *ts, struct hal_tx_completion_status *ts,
qdf_nbuf_t netbuf, qdf_nbuf_t netbuf,
uint64_t time_latency) uint64_t time_latency)
@@ -1186,13 +1214,14 @@ dp_get_completion_indication_for_stack(struct dp_soc *soc,
uint8_t first_msdu = ts->first_msdu; uint8_t first_msdu = ts->first_msdu;
uint8_t last_msdu = ts->last_msdu; uint8_t last_msdu = ts->last_msdu;
uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr); uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
struct dp_peer *peer;
if (qdf_unlikely(!dp_monitor_is_enable_tx_sniffer(pdev) && if (qdf_unlikely(!dp_monitor_is_enable_tx_sniffer(pdev) &&
!dp_monitor_is_enable_mcopy_mode(pdev) && !dp_monitor_is_enable_mcopy_mode(pdev) &&
!pdev->latency_capture_enable)) !pdev->latency_capture_enable))
return QDF_STATUS_E_NOSUPPORT; return QDF_STATUS_E_NOSUPPORT;
if (!peer) { if (!txrx_peer) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("Peer Invalid")); FL("Peer Invalid"));
return QDF_STATUS_E_INVAL; return QDF_STATUS_E_INVAL;
@@ -1224,10 +1253,15 @@ dp_get_completion_indication_for_stack(struct dp_soc *soc,
} }
ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf); ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw, qdf_mem_copy(ppdu_hdr->ta, txrx_peer->vdev->mac_addr.raw,
QDF_MAC_ADDR_SIZE); QDF_MAC_ADDR_SIZE);
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX_COMP);
if (peer) {
qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw, qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
QDF_MAC_ADDR_SIZE); QDF_MAC_ADDR_SIZE);
dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
}
ppdu_hdr->ppdu_id = ppdu_id; ppdu_hdr->ppdu_id = ppdu_id;
ppdu_hdr->peer_id = peer_id; ppdu_hdr->peer_id = peer_id;
ppdu_hdr->first_msdu = first_msdu; ppdu_hdr->first_msdu = first_msdu;

View File

@@ -42,7 +42,7 @@ int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
void void
dp_rx_da_learn(struct dp_soc *soc, dp_rx_da_learn(struct dp_soc *soc,
uint8_t *rx_tlv_hdr, uint8_t *rx_tlv_hdr,
struct dp_peer *ta_peer, struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf); qdf_nbuf_t nbuf);
void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status); void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status);
@@ -116,7 +116,7 @@ static inline void dp_wds_ext_peer_learn(struct dp_soc *soc,
if (ta_peer->vdev->wds_ext_enabled && if (ta_peer->vdev->wds_ext_enabled &&
!qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT, !qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
&ta_peer->wds_ext.init)) { &ta_peer->txrx_peer->wds_ext.init)) {
qdf_mem_copy(wds_ext_src_mac, &ta_peer->mac_addr.raw[0], qdf_mem_copy(wds_ext_src_mac, &ta_peer->mac_addr.raw[0],
QDF_MAC_ADDR_SIZE); QDF_MAC_ADDR_SIZE);
soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn( soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
@@ -137,7 +137,7 @@ static inline void dp_wds_ext_peer_learn(struct dp_soc *soc,
* dp_rx_wds_add_or_update_ast() - Add or update the ast entry. * dp_rx_wds_add_or_update_ast() - Add or update the ast entry.
* *
* @soc: core txrx main context * @soc: core txrx main context
* @ta_peer: WDS repeater peer * @ta_txrx_peer: WDS repeater txrx peer
* @mac_addr: mac address of the peer * @mac_addr: mac address of the peer
* @is_ad4_valid: 4-address valid flag * @is_ad4_valid: 4-address valid flag
* @is_sa_valid: source address valid flag * @is_sa_valid: source address valid flag
@@ -148,7 +148,8 @@ static inline void dp_wds_ext_peer_learn(struct dp_soc *soc,
* Return: void: * Return: void:
*/ */
static inline void static inline void
dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer, dp_rx_wds_add_or_update_ast(struct dp_soc *soc,
struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf, uint8_t is_ad4_valid, qdf_nbuf_t nbuf, uint8_t is_ad4_valid,
uint8_t is_sa_valid, uint8_t is_chfrag_start, uint8_t is_sa_valid, uint8_t is_chfrag_start,
uint16_t sa_idx, uint16_t sa_sw_peer_id) uint16_t sa_idx, uint16_t sa_sw_peer_id)
@@ -157,7 +158,10 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
struct dp_ast_entry *ast; struct dp_ast_entry *ast;
uint32_t flags = DP_AST_FLAGS_HM; uint32_t flags = DP_AST_FLAGS_HM;
uint32_t ret = 0; uint32_t ret = 0;
struct dp_pdev *pdev = ta_peer->vdev->pdev;
uint8_t wds_src_mac[QDF_MAC_ADDR_SIZE]; uint8_t wds_src_mac[QDF_MAC_ADDR_SIZE];
struct dp_peer *ta_base_peer;
if (!(is_chfrag_start && is_ad4_valid)) if (!(is_chfrag_start && is_ad4_valid))
return; return;
@@ -167,12 +171,15 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
(qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE), (qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE),
QDF_MAC_ADDR_SIZE); QDF_MAC_ADDR_SIZE);
dp_wds_ext_peer_learn(soc, ta_peer); ta_base_peer = dp_peer_get_ref_by_id(soc, ta_peer->peer_id,
ret = dp_peer_add_ast(soc, DP_MOD_ID_RX);
ta_peer, if (ta_base_peer) {
wds_src_mac, dp_wds_ext_peer_learn(soc, ta_base_peer);
CDP_TXRX_AST_TYPE_WDS, ret = dp_peer_add_ast(soc, ta_base_peer, wds_src_mac,
flags); CDP_TXRX_AST_TYPE_WDS, flags);
dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
}
return; return;
} }
@@ -199,11 +206,19 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
(qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE), (qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE),
QDF_MAC_ADDR_SIZE); QDF_MAC_ADDR_SIZE);
ret = dp_peer_add_ast(soc, ta_base_peer = dp_peer_get_ref_by_id(soc,
ta_peer, ta_peer->peer_id,
DP_MOD_ID_RX);
if (ta_base_peer) {
ret = dp_peer_add_ast(soc, ta_base_peer,
wds_src_mac, wds_src_mac,
CDP_TXRX_AST_TYPE_WDS, CDP_TXRX_AST_TYPE_WDS,
flags); flags);
dp_peer_unref_delete(ta_base_peer,
DP_MOD_ID_RX);
}
return; return;
} else { } else {
/* In HKv2 smart monitor case, when NAC client is /* In HKv2 smart monitor case, when NAC client is
@@ -213,10 +228,18 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
* smart monitor is enabled and send add_ast command * smart monitor is enabled and send add_ast command
* to FW. * to FW.
*/ */
dp_monitor_neighbour_peer_add_ast(ta_peer->vdev->pdev, ta_base_peer = dp_peer_get_ref_by_id(soc,
ta_peer, ta_peer->peer_id,
wds_src_mac, nbuf, DP_MOD_ID_RX);
if (ta_base_peer) {
dp_monitor_neighbour_peer_add_ast(pdev,
ta_base_peer,
wds_src_mac,
nbuf,
flags); flags);
dp_peer_unref_delete(ta_base_peer,
DP_MOD_ID_RX);
}
return; return;
} }
} }
@@ -263,8 +286,20 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
(ta_peer->vdev->opmode == wlan_op_mode_sta)) { (ta_peer->vdev->opmode == wlan_op_mode_sta)) {
dp_peer_del_ast(soc, ast); dp_peer_del_ast(soc, ast);
} else { } else {
dp_wds_ext_peer_learn(soc, ta_peer); ta_base_peer =
dp_peer_update_ast(soc, ta_peer, ast, flags); dp_peer_get_ref_by_id(soc,
ta_peer->peer_id,
DP_MOD_ID_RX);
if (ta_base_peer) {
dp_wds_ext_peer_learn(soc,
ta_base_peer);
dp_peer_update_ast(soc,
ta_base_peer,
ast, flags);
dp_peer_unref_delete(ta_base_peer,
DP_MOD_ID_RX);
}
} }
qdf_spin_unlock_bh(&soc->ast_lock); qdf_spin_unlock_bh(&soc->ast_lock);
return; return;
@@ -325,7 +360,7 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
static inline void static inline void
dp_rx_wds_srcport_learn(struct dp_soc *soc, dp_rx_wds_srcport_learn(struct dp_soc *soc,
uint8_t *rx_tlv_hdr, uint8_t *rx_tlv_hdr,
struct dp_peer *ta_peer, struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf, qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_end_info) struct hal_rx_msdu_metadata msdu_end_info)
{ {