qcacmn: Avoid asserts in dp related to HW interactions

Avoid asserts in data path which related to HW interactions
and instead use work arounds.

Change-Id: I86089d21c5be23784f8a077b085f3f3b8a2308e4
CRs-Fixed: 3564940
This commit is contained in:
Pavankumar Nandeshwar
2023-08-17 02:46:29 -07:00
committed by Rahul Choudhary
szülő 42e618a774
commit ad866ad37f
11 fájl változott, egészen pontosan 131 új sor hozzáadva és 106 régi sor törölve

Fájl megtekintése

@@ -100,6 +100,31 @@ static void dp_ppeds_rings_status(struct dp_soc *soc)
WBM2SW_RELEASE);
}
#ifdef GLOBAL_ASSERT_AVOIDANCE
void dp_ppeds_print_assert_war_stats(struct dp_soc_be *be_soc)
{
DP_PRINT_STATS("PPE-DS Tx WAR stats: [%u] [%u] [%u]",
be_soc->ppeds_stats.tx.tx_comp_buf_src,
be_soc->ppeds_stats.tx.tx_comp_desc_null,
be_soc->ppeds_stats.tx.tx_comp_invalid_flag);
}
static void dp_ppeds_clear_assert_war_stats(struct dp_soc_be *be_soc)
{
be_soc->ppeds_stats.tx.tx_comp_buf_src = 0;
be_soc->ppeds_stats.tx.tx_comp_desc_null = 0;
be_soc->ppeds_stats.tx.tx_comp_invalid_flag = 0;
}
#else
static void dp_ppeds_print_assert_war_stats(struct dp_soc_be *be_soc)
{
}
static void dp_ppeds_clear_assert_war_stats(struct dp_soc_be *be_soc)
{
}
#endif
static void dp_ppeds_inuse_desc(struct dp_soc *soc)
{
struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
@@ -110,6 +135,8 @@ static void dp_ppeds_inuse_desc(struct dp_soc *soc)
DP_PRINT_STATS("PPE-DS Tx desc alloc failed %u",
be_soc->ppeds_stats.tx.desc_alloc_failed);
dp_ppeds_print_assert_war_stats(be_soc);
}
static void dp_ppeds_clear_stats(struct dp_soc *soc)
@@ -117,6 +144,7 @@ static void dp_ppeds_clear_stats(struct dp_soc *soc)
struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
be_soc->ppeds_stats.tx.desc_alloc_failed = 0;
dp_ppeds_clear_assert_war_stats(be_soc);
}
static void dp_ppeds_rings_stats(struct dp_soc *soc)
@@ -3329,7 +3357,6 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
dp_wbm_get_rx_desc_from_hal_desc_be;
arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_be;
arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_be;
arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_be;
arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_be;
#endif

Fájl megtekintése

@@ -354,6 +354,11 @@ struct dp_soc_be {
struct {
struct {
uint64_t desc_alloc_failed;
#ifdef GLOBAL_ASSERT_AVOIDANCE
uint32_t tx_comp_buf_src;
uint32_t tx_comp_desc_null;
uint32_t tx_comp_invalid_flag;
#endif
} tx;
} ppeds_stats;
#endif

Fájl megtekintése

@@ -1366,6 +1366,11 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
&dest_chip_id,
&dest_chip_pmac_id);
if (dp_assert_always_internal_stat(
(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)),
&be_soc->soc, rx.err.intra_bss_bad_chipid))
return false;
params->dest_soc =
dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
dest_chip_id);
@@ -1384,8 +1389,6 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
}
qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
if (dest_chip_id == be_soc->mlo_chip_id) {
if (dest_chip_pmac_id == ta_peer->vdev->pdev->pdev_id)
params->tx_vdev_id = ta_peer->vdev->vdev_id;
@@ -1424,7 +1427,11 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
return false;
dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata);
qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
if (dp_assert_always_internal_stat(
(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)),
&be_soc->soc, rx.err.intra_bss_bad_chipid))
return false;
da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
/* use dest chip id when TA is MLD peer and DA is legacy */
@@ -1728,75 +1735,6 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
}
#endif
bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, uint8_t mac_id)
{
bool mpdu_done = false;
qdf_nbuf_t curr_nbuf = NULL;
qdf_nbuf_t tmp_nbuf = NULL;
struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
if (!dp_pdev) {
dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
return mpdu_done;
}
/* if invalid peer SG list has max values free the buffers in list
* and treat current buffer as start of list
*
* current logic to detect the last buffer from attn_tlv is not reliable
* in OFDMA UL scenario hence add max buffers check to avoid list pile
* up
*/
if (!dp_pdev->first_nbuf ||
(dp_pdev->invalid_peer_head_msdu &&
QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
(dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
dp_pdev->first_nbuf = true;
/* If the new nbuf received is the first msdu of the
* amsdu and there are msdus in the invalid peer msdu
* list, then let us free all the msdus of the invalid
* peer msdu list.
* This scenario can happen when we start receiving
* new a-msdu even before the previous a-msdu is completely
* received.
*/
curr_nbuf = dp_pdev->invalid_peer_head_msdu;
while (curr_nbuf) {
tmp_nbuf = curr_nbuf->next;
dp_rx_nbuf_free(curr_nbuf);
curr_nbuf = tmp_nbuf;
}
dp_pdev->invalid_peer_head_msdu = NULL;
dp_pdev->invalid_peer_tail_msdu = NULL;
dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr);
}
if (qdf_nbuf_is_rx_chfrag_end(nbuf) &&
hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
qdf_assert_always(dp_pdev->first_nbuf);
dp_pdev->first_nbuf = false;
mpdu_done = true;
}
/*
* For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
* should be NULL here, add the checking for debugging purpose
* in case some corner case.
*/
DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
dp_pdev->invalid_peer_tail_msdu);
DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
dp_pdev->invalid_peer_tail_msdu,
nbuf);
return mpdu_done;
}
qdf_nbuf_t
dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota,
@@ -1852,7 +1790,9 @@ dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
continue;
}
qdf_assert_always(rx_desc);
if (dp_assert_always_internal_stat(rx_desc, soc,
rx.err.rx_desc_null))
continue;
if (!dp_rx_desc_check_magic(rx_desc)) {
dp_rx_err_err("%pK: Invalid rx_desc %pK",
@@ -1906,8 +1846,11 @@ dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
/*
* For WBM ring, expect only MSDU buffers
*/
qdf_assert_always(wbm_err.info_bit.buffer_or_desc_type ==
HAL_RX_WBM_BUF_TYPE_REL_BUF);
if (dp_assert_always_internal_stat(
wbm_err.info_bit.buffer_or_desc_type ==
HAL_RX_WBM_BUF_TYPE_REL_BUF,
soc, rx.err.wbm_err_buf_rel_type))
continue;
/*
* Errors are handled only if the source is RXDMA or REO
*/
@@ -2097,7 +2040,6 @@ dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
/* Set length in nbuf */
qdf_nbuf_set_pktlen(
nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
qdf_assert_always(nbuf->data == rx_tlv_hdr);
}
/*
@@ -2138,15 +2080,6 @@ dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
nbuf,
mpdu_done,
pool_id);
} else {
mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
rx_tlv_hdr,
pool_id);
/* Trigger invalid peer handler wrapper */
dp_rx_process_invalid_peer_wrapper(
soc,
pdev->invalid_peer_head_msdu,
mpdu_done, pool_id);
}
if (mpdu_done) {

Fájl megtekintése

@@ -26,6 +26,7 @@
#include <hal_be_api.h>
#include <hal_be_tx.h>
#include <dp_htt.h>
#include "dp_internal.h"
#ifdef FEATURE_WDS
#include "dp_txrx_wds.h"
#endif
@@ -219,7 +220,7 @@ void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
uint8_t vdev_id;
uint32_t *htt_desc = (uint32_t *)status;
qdf_assert_always(!soc->mec_fw_offload);
dp_assert_always_internal(soc->mec_fw_offload);
/*
* Get vdev id from HTT status word in case of MEC
@@ -1079,7 +1080,9 @@ int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
buf_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
dp_err("Tx comp release_src != TQM | FW but from %d",
buf_src);
qdf_assert_always(0);
dp_assert_always_internal_ds_stat(0, be_soc,
tx.tx_comp_buf_src);
continue;
}
dp_tx_comp_get_params_from_hal_desc_be(soc, tx_comp_hal_desc,
@@ -1087,14 +1090,16 @@ int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
if (!tx_desc) {
dp_err("unable to retrieve tx_desc!");
qdf_assert_always(0);
dp_assert_always_internal_ds_stat(0, be_soc,
tx.tx_comp_desc_null);
continue;
}
if (qdf_unlikely(!(tx_desc->flags &
DP_TX_DESC_FLAG_ALLOCATED) ||
!(tx_desc->flags & DP_TX_DESC_FLAG_PPEDS))) {
qdf_assert_always(0);
dp_assert_always_internal_ds_stat(0, be_soc,
tx.tx_comp_invalid_flag);
continue;
}
@@ -1423,7 +1428,7 @@ QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
qdf_assert_always(num_tcl_banks);
dp_assert_always_internal(num_tcl_banks);
be_soc->num_bank_profiles = num_tcl_banks;
be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *

Fájl megtekintése

@@ -3477,20 +3477,20 @@ dp_rx_mlo_timestamp_ind_handler(struct dp_soc *soc,
static void dp_htt_mlo_peer_map_handler(struct htt_soc *soc,
uint32_t *msg_word)
{
qdf_assert_always(0);
dp_alert("Unexpected event");
}
static void dp_htt_mlo_peer_unmap_handler(struct htt_soc *soc,
uint32_t *msg_word)
{
qdf_assert_always(0);
dp_alert("Unexpected event");
}
static void
dp_rx_mlo_timestamp_ind_handler(void *soc_handle,
uint32_t *msg_word)
{
qdf_assert_always(0);
dp_alert("Unexpected event");
}
static void dp_htt_t2h_primary_link_migration(struct htt_soc *soc,

Fájl megtekintése

@@ -201,6 +201,34 @@ static const enum cdp_packet_type hal_2_dp_pkt_type_map[HAL_DOT11_MAX] = {
[HAL_DOT11N_GF] = DOT11_MAX,
};
#ifdef GLOBAL_ASSERT_AVOIDANCE
#define dp_assert_always_internal_stat(_expr, _handle, _field) \
(qdf_unlikely(!(_expr)) ? ((_handle)->stats._field++, true) : false)
#define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \
((_handle)->ppeds_stats._field++)
static inline bool dp_assert_always_internal(bool expr)
{
return !expr;
}
#else
static inline bool __dp_assert_always_internal(bool expr)
{
qdf_assert_always(expr);
return false;
}
#define dp_assert_always_internal(_expr) __dp_assert_always_internal(_expr)
#define dp_assert_always_internal_stat(_expr, _handle, _field) \
dp_assert_always_internal(_expr)
#define dp_assert_always_internal_ds_stat(_expr, _handle, _field) \
dp_assert_always_internal(_expr)
#endif
#ifdef WLAN_FEATURE_11BE
/**
* dp_get_mcs_array_index_by_pkt_type_mcs() - get the destination mcs index

Fájl megtekintése

@@ -761,8 +761,6 @@ QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
if (!rxdma_ring_entry)
break;
qdf_assert_always(rxdma_ring_entry);
desc_list->rx_desc.nbuf = nbuf;
dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf);
desc_list->rx_desc.rx_buf_start = nbuf->data;

Fájl megtekintése

@@ -28,6 +28,7 @@
#include "qdf_nbuf.h"
#include "dp_rx_defrag.h"
#include "dp_ipa.h"
#include "dp_internal.h"
#ifdef WIFI_MONITOR_SUPPORT
#include "dp_htt.h"
#include <dp_mon.h>
@@ -1073,7 +1074,10 @@ more_msdu_link_desc:
soc,
msdu_list.sw_cookie[i]);
qdf_assert_always(rx_desc);
if (dp_assert_always_internal_stat(rx_desc, soc,
rx.err.reo_err_rx_desc_null))
continue;
nbuf = rx_desc->nbuf;
/*
@@ -1638,7 +1642,6 @@ dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
/* Set length in nbuf */
qdf_nbuf_set_pktlen(
nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
qdf_assert_always(nbuf->data == rx_tlv_hdr);
}
/*
@@ -2694,7 +2697,7 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
dp_rx_err_alert("invalid reo push reason %u",
wbm_err.info_bit.reo_psh_rsn);
dp_rx_nbuf_free(nbuf);
qdf_assert_always(0);
dp_assert_always_internal(0);
}
} else if (wbm_err.info_bit.wbm_err_src ==
HAL_RX_WBM_ERR_SRC_RXDMA) {
@@ -2821,7 +2824,7 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
dp_rx_err_alert("invalid rxdma push reason %u",
wbm_err.info_bit.rxdma_psh_rsn);
dp_rx_nbuf_free(nbuf);
qdf_assert_always(0);
dp_assert_always_internal(0);
}
} else {
/* Should not come here */

Fájl megtekintése

@@ -8359,6 +8359,20 @@ void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask)
}
#endif
#ifdef GLOBAL_ASSERT_AVOIDANCE
static void dp_print_assert_war_stats(struct dp_soc *soc)
{
DP_PRINT_STATS("Rx WAR stats: [%d] [%d] [%d] [%d]",
soc->stats.rx.err.rx_desc_null,
soc->stats.rx.err.wbm_err_buf_rel_type,
soc->stats.rx.err.reo_err_rx_desc_null,
soc->stats.rx.err.intra_bss_bad_chipid);
}
#else
static void dp_print_assert_war_stats(struct dp_soc *soc)
{
}
#endif
void
dp_print_soc_rx_stats(struct dp_soc *soc)
{
@@ -8500,6 +8514,7 @@ dp_print_soc_rx_stats(struct dp_soc *soc)
soc->stats.rx.err.defrag_ad1_invalid);
DP_PRINT_STATS("Rx decrypt error frame for valid peer:%d",
soc->stats.rx.err.decrypt_err_drop);
dp_print_assert_war_stats(soc);
}
#ifdef FEATURE_TSO_STATS

Fájl megtekintése

@@ -1369,6 +1369,16 @@ struct dp_soc_stats {
uint32_t defrag_ad1_invalid;
/* decrypt error drop */
uint32_t decrypt_err_drop;
#ifdef GLOBAL_ASSERT_AVOIDANCE
/* rx_desc NULL war count*/
uint32_t rx_desc_null;
/* wbm err invalid release buffer type */
uint32_t wbm_err_buf_rel_type;
/* Reo entry rx desc null */
uint32_t reo_err_rx_desc_null;
/* Invalid chip id received in intrabss path */
uint64_t intra_bss_bad_chipid;
#endif
} err;
/* packet count per core - per ring */

Fájl megtekintése

@@ -1141,10 +1141,10 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
/* XXX */
buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
/*
* For WBM ring, expect only MSDU buffers
*/
qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
if (dp_assert_always_internal_stat(
buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF,
soc, rx.err.wbm_err_buf_rel_type))
continue;
wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
@@ -1157,7 +1157,9 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
continue;
}
qdf_assert_always(rx_desc);
if (dp_assert_always_internal_stat(rx_desc, soc,
rx.err.rx_desc_null))
continue;
if (!dp_rx_desc_check_magic(rx_desc)) {
dp_rx_err_err("%pk: Invalid rx_desc %pk",
@@ -1371,7 +1373,6 @@ dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
/* Set length in nbuf */
qdf_nbuf_set_pktlen(
nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
qdf_assert_always(nbuf->data == rx_tlv_hdr);
}
/*