qcacmn: mcast_mlo handling in intrabss fwd

mcast mlo handling for the intra bss fwd

Change-Id: I23c1a9759ac3ac59f2f46fdb456c616c77823e8c
CRs-Fixed: 3353501
This commit is contained in:
KARTHIK KUMAR T
2022-12-05 00:03:21 +05:30
committed by Madan Koyyalamudi
parent 851e62a653
commit 70e9286f37
6 changed files with 76 additions and 24 deletions

View File

@@ -2502,7 +2502,8 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
#endif #endif
arch_ops->dp_rx_desc_cookie_2_va = arch_ops->dp_rx_desc_cookie_2_va =
dp_rx_desc_cookie_2_va_be; dp_rx_desc_cookie_2_va_be;
arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_be; arch_ops->dp_rx_intrabss_mcast_handler =
dp_rx_intrabss_mcast_handler_be;
arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_be; arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_be;
arch_ops->txrx_soc_attach = dp_soc_attach_be; arch_ops->txrx_soc_attach = dp_soc_attach_be;

View File

@@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* *
* Permission to use, copy, modify, and/or distribute this software for * Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the * any purpose with or without fee is hereby granted, provided that the
@@ -1613,8 +1613,50 @@ void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
{ {
} }
#endif #endif
/*
* dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case #if defined(WLAN_MCAST_MLO) && defined(CONFIG_MLO_SINGLE_DEV)
static inline
bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t nbuf_copy)
{
struct dp_vdev *mcast_primary_vdev = NULL;
struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
struct cdp_tx_exception_metadata tx_exc_metadata = {0};
if (!vdev->mlo_vdev)
return false;
tx_exc_metadata.is_mlo_mcast = 1;
mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc,
be_vdev,
DP_MOD_ID_RX);
if (!mcast_primary_vdev)
return false;
nbuf_copy = dp_tx_send_exception((struct cdp_soc_t *)
mcast_primary_vdev->pdev->soc,
mcast_primary_vdev->vdev_id,
nbuf_copy, &tx_exc_metadata);
if (nbuf_copy)
qdf_nbuf_free(nbuf_copy);
dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc,
mcast_primary_vdev, DP_MOD_ID_RX);
return true;
}
#else
static inline
bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t nbuf_copy)
{
return false;
}
#endif
/**
* dp_rx_intrabss_mcast_handler_be() - handler for mcast packets
* @soc: core txrx main context * @soc: core txrx main context
* @ta_txrx_peer: source txrx_peer entry * @ta_txrx_peer: source txrx_peer entry
* @nbuf_copy: nbuf that has to be intrabss forwarded * @nbuf_copy: nbuf that has to be intrabss forwarded
@@ -1623,10 +1665,10 @@ void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
* Return: true if it is forwarded else false * Return: true if it is forwarded else false
*/ */
bool bool
dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer, struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf_copy, qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats) struct cdp_tid_rx_stats *tid_stats)
{ {
if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) { if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
struct cdp_tx_exception_metadata tx_exc_metadata = {0}; struct cdp_tx_exception_metadata tx_exc_metadata = {0};
@@ -1635,10 +1677,11 @@ dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc,
tx_exc_metadata.peer_id = ta_txrx_peer->peer_id; tx_exc_metadata.peer_id = ta_txrx_peer->peer_id;
tx_exc_metadata.is_intrabss_fwd = 1; tx_exc_metadata.is_intrabss_fwd = 1;
tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID; tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID;
if (dp_tx_send_exception((struct cdp_soc_t *)soc, if (dp_tx_send_exception((struct cdp_soc_t *)soc,
ta_txrx_peer->vdev->vdev_id, ta_txrx_peer->vdev->vdev_id,
nbuf_copy, nbuf_copy,
&tx_exc_metadata)) { &tx_exc_metadata)) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer, DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
rx.intra_bss.fail, 1, rx.intra_bss.fail, 1,
len); len);
@@ -1652,6 +1695,11 @@ dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc,
} }
return true; return true;
} }
if (dp_rx_intrabss_mlo_mcbc_fwd(soc, ta_txrx_peer->vdev,
nbuf_copy))
return true;
return false; return false;
} }

View File

@@ -58,8 +58,8 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
struct hal_rx_msdu_metadata msdu_metadata); struct hal_rx_msdu_metadata msdu_metadata);
#endif #endif
/* /**
* dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case * dp_rx_intrabss_mcast_handler_be() - intrabss mcast handler
* @soc: core txrx main context * @soc: core txrx main context
* @ta_txrx_peer: source txrx_peer entry * @ta_txrx_peer: source txrx_peer entry
* @nbuf_copy: nbuf that has to be intrabss forwarded * @nbuf_copy: nbuf that has to be intrabss forwarded
@@ -68,9 +68,10 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
* Return: true if it is forwarded else false * Return: true if it is forwarded else false
*/ */
bool bool
dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
qdf_nbuf_t nbuf_copy, struct dp_txrx_peer *ta_txrx_peer,
struct cdp_tid_rx_stats *tid_stats); qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats);
void dp_rx_word_mask_subscribe_be(struct dp_soc *soc, void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
uint32_t *msg_word, uint32_t *msg_word,

View File

@@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* *
* Permission to use, copy, modify, and/or distribute this software for * Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the * any purpose with or without fee is hereby granted, provided that the
@@ -1071,8 +1071,9 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy, if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer,
tid_stats)) nbuf_copy,
tid_stats))
return false; return false;
/* Don't send packets if tx is paused */ /* Don't send packets if tx is paused */

View File

@@ -1927,10 +1927,10 @@ struct dp_arch_ops {
struct dp_rx_desc **r_rx_desc); struct dp_rx_desc **r_rx_desc);
bool bool
(*dp_rx_intrabss_handle_nawds)(struct dp_soc *soc, (*dp_rx_intrabss_mcast_handler)(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer, struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf_copy, qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats); struct cdp_tid_rx_stats *tid_stats);
void (*dp_rx_word_mask_subscribe)( void (*dp_rx_word_mask_subscribe)(
struct dp_soc *soc, struct dp_soc *soc,

View File

@@ -643,7 +643,8 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
arch_ops->txrx_peer_setup = dp_peer_setup_li; arch_ops->txrx_peer_setup = dp_peer_setup_li;
arch_ops->dp_rx_desc_cookie_2_va = arch_ops->dp_rx_desc_cookie_2_va =
dp_rx_desc_cookie_2_va_li; dp_rx_desc_cookie_2_va_li;
arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_li; arch_ops->dp_rx_intrabss_mcast_handler =
dp_rx_intrabss_handle_nawds_li;
arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_li; arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_li;
arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li; arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
arch_ops->dp_rx_peer_metadata_peer_id_get = arch_ops->dp_rx_peer_metadata_peer_id_get =