qcacmn: Dump the rx reo queue descs in ddr

Add iwpriv option 34 to dump the reo rx h/w descs
in DDR for debugging. This cmd will first send cache
flush cmd to REO for all rx tids and invalidate the h/w
cache. Henceforth ensuring that the reo status tlvs and
the DDR values are in sync.
iwpriv wlan0 txrx_stats 34 0
Add fix to ensure bar frame with 2k jump err code is
processed correctly using the REO error code instead of the
REO push reason.

Change-Id: Ia05be668343f3a5d4b3262b8d6a367a50875add5
CRs-Fixed: 2895965
This commit is contained in:
Nisha Menon
2021-02-25 23:04:14 -08:00
committed by snandini
parent fe6b1dc264
commit 5d7e26e27f
9 changed files with 254 additions and 14 deletions

View File

@@ -293,6 +293,7 @@ enum cdp_host_txrx_stats {
TXRX_SOC_INTERRUPT_STATS = 12, TXRX_SOC_INTERRUPT_STATS = 12,
TXRX_SOC_FSE_STATS = 13, TXRX_SOC_FSE_STATS = 13,
TXRX_HAL_REG_WRITE_STATS = 14, TXRX_HAL_REG_WRITE_STATS = 14,
TXRX_SOC_REO_HW_DESC_DUMP = 15,
TXRX_HOST_STATS_MAX, TXRX_HOST_STATS_MAX,
}; };

View File

@@ -417,6 +417,7 @@ const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS}, {TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS}, {TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS}, {TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID} {HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
}; };
@@ -8620,6 +8621,10 @@ dp_print_host_stats(struct dp_vdev *vdev,
hal_dump_reg_write_stats(pdev->soc->hal_soc); hal_dump_reg_write_stats(pdev->soc->hal_soc);
hal_dump_reg_write_srng_stats(pdev->soc->hal_soc); hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
break; break;
case TXRX_SOC_REO_HW_DESC_DUMP:
dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
vdev->vdev_id);
break;
default: default:
dp_info("Wrong Input For TxRx Host Stats"); dp_info("Wrong Input For TxRx Host Stats");
dp_txrx_stats_help(); dp_txrx_stats_help();

View File

@@ -2615,6 +2615,7 @@ try_desc_alloc:
} else { } else {
hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned; hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
} }
rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
/* TODO: Ensure that sec_type is set before ADDBA is received. /* TODO: Ensure that sec_type is set before ADDBA is received.
* Currently this is set based on htt indication * Currently this is set based on htt indication
@@ -4511,3 +4512,91 @@ struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
qdf_spin_unlock_bh(&vdev->peer_list_lock); qdf_spin_unlock_bh(&vdev->peer_list_lock);
return peer; return peer;
} }
#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
void dp_dump_rx_reo_queue_info(
struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
{
struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
if (!rx_tid)
return;
if (reo_status->fl_cache_status.header.status !=
HAL_REO_CMD_SUCCESS) {
dp_err_rl("Rx tid REO HW desc flush failed(%d)",
reo_status->rx_queue_status.header.status);
return;
}
qdf_spin_lock_bh(&rx_tid->tid_lock);
hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
qdf_spin_unlock_bh(&rx_tid->tid_lock);
}
void dp_send_cache_flush_for_rx_tid(
struct dp_soc *soc, struct dp_peer *peer)
{
int i;
struct dp_rx_tid *rx_tid;
struct hal_reo_cmd_params params;
if (!peer) {
dp_err_rl("Peer is NULL");
return;
}
for (i = 0; i < DP_MAX_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
if (!rx_tid)
continue;
qdf_spin_lock_bh(&rx_tid->tid_lock);
if (rx_tid->hw_qdesc_vaddr_aligned) {
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 1;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 0;
if (QDF_STATUS_SUCCESS !=
dp_reo_send_cmd(
soc, CMD_FLUSH_CACHE,
&params, dp_dump_rx_reo_queue_info,
(void *)rx_tid)) {
dp_err_rl("cache flush send failed tid %d",
rx_tid->tid);
qdf_spin_unlock_bh(&rx_tid->tid_lock);
break;
}
}
qdf_spin_unlock_bh(&rx_tid->tid_lock);
}
}
void dp_get_rx_reo_queue_info(
struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
{
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
DP_MOD_ID_GENERIC_STATS);
struct dp_peer *peer = NULL;
if (!vdev) {
dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
goto failed;
}
peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
if (!peer) {
dp_err_rl("Peer is NULL");
goto failed;
}
dp_send_cache_flush_for_rx_tid(soc, peer);
failed:
if (peer)
dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
if (vdev)
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
}
#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */

View File

@@ -22,6 +22,10 @@
#include <qdf_lock.h> #include <qdf_lock.h>
#include "dp_types.h" #include "dp_types.h"
#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
#include "hal_reo.h"
#endif
#define DP_INVALID_PEER_ID 0xffff #define DP_INVALID_PEER_ID 0xffff
#define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */ #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
@@ -944,4 +948,55 @@ static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
{ {
} }
#endif #endif
#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
/**
* dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
* @soc : dp_soc handle
* @peer: peer
*
* This function is used to send cache flush cmd to reo and
* to register the callback to handle the dumping of the reo
* queue stas from DDR
*
* Return: none
*/
void dp_send_cache_flush_for_rx_tid(
struct dp_soc *soc, struct dp_peer *peer);
/**
* dp_get_rx_reo_queue_info() - Handler to get rx tid info
* @soc : cdp_soc_t handle
* @vdev_id: vdev id
*
* Handler to get rx tid info from DDR after h/w cache is
* invalidated first using the cache flush cmd.
*
* Return: none
*/
void dp_get_rx_reo_queue_info(
struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
/**
* dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
* @soc : dp_soc handle
* @cb_ctxt - callback context
* @reo_status: vdev id
*
* This is the callback function registered after sending the reo cmd
* to flush the h/w cache and invalidate it. In the callback the reo
* queue desc info is dumped from DDR.
*
* Return: none
*/
void dp_dump_rx_reo_queue_info(
struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
#else /* DUMP_REO_QUEUE_INFO_IN_DDR */
static inline void dp_get_rx_reo_queue_info(
struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
{
}
#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
#endif /* _DP_PEER_H_ */ #endif /* _DP_PEER_H_ */

View File

@@ -812,12 +812,26 @@ void dp_rx_err_handle_bar(struct dp_soc *soc,
start_seq_num); start_seq_num);
} }
/**
* dp_rx_bar_frame_handle() - Function to handle err BAR frames
* @soc: core DP main context
* @ring_desc: Hal ring desc
* @rx_desc: dp rx desc
* @mpdu_desc_info: mpdu desc info
*
* Handle the error BAR frames received. Ensure the SOC level
* stats are updated based on the REO error code. The BAR frames
* are further processed by updating the Rx tids with the start
* sequence number (SSN) and BA window size. Desc is returned
* to the free desc list
*
* Return: none
*/
static void static void
dp_rx_bar_frame_handle(struct dp_soc *soc, dp_rx_bar_frame_handle(struct dp_soc *soc,
hal_ring_desc_t ring_desc, hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc, struct dp_rx_desc *rx_desc,
struct hal_rx_mpdu_desc_info *mpdu_desc_info, struct hal_rx_mpdu_desc_info *mpdu_desc_info)
uint8_t error)
{ {
qdf_nbuf_t nbuf; qdf_nbuf_t nbuf;
struct dp_pdev *pdev; struct dp_pdev *pdev;
@@ -826,6 +840,7 @@ dp_rx_bar_frame_handle(struct dp_soc *soc,
uint16_t peer_id; uint16_t peer_id;
uint8_t *rx_tlv_hdr; uint8_t *rx_tlv_hdr;
uint32_t tid; uint32_t tid;
uint8_t reo_err_code;
nbuf = rx_desc->nbuf; nbuf = rx_desc->nbuf;
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
@@ -849,24 +864,25 @@ dp_rx_bar_frame_handle(struct dp_soc *soc,
if (!peer) if (!peer)
goto next; goto next;
reo_err_code = HAL_RX_REO_ERROR_GET(ring_desc);
dp_info("BAR frame: peer = "QDF_MAC_ADDR_FMT dp_info("BAR frame: peer = "QDF_MAC_ADDR_FMT
" peer_id = %d" " peer_id = %d"
" tid = %u" " tid = %u"
" SSN = %d" " SSN = %d"
" error status = %d", " error code = %d",
QDF_MAC_ADDR_REF(peer->mac_addr.raw), QDF_MAC_ADDR_REF(peer->mac_addr.raw),
peer->peer_id, peer->peer_id,
tid, tid,
mpdu_desc_info->mpdu_seq, mpdu_desc_info->mpdu_seq,
error); reo_err_code);
switch (error) { switch (reo_err_code) {
case HAL_REO_ERR_BAR_FRAME_2K_JUMP: case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
/* fallthrough */ /* fallthrough */
case HAL_REO_ERR_BAR_FRAME_OOR: case HAL_REO_ERR_BAR_FRAME_OOR:
dp_rx_err_handle_bar(soc, peer, nbuf); dp_rx_err_handle_bar(soc, peer, nbuf);
DP_STATS_INC(soc, DP_STATS_INC(soc,
rx.err.reo_error[error], 1); rx.err.reo_error[reo_err_code], 1);
break; break;
default: default:
DP_STATS_INC(soc, rx.bar_frame, 1); DP_STATS_INC(soc, rx.bar_frame, 1);
@@ -1753,15 +1769,12 @@ dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
dp_rx_bar_frame_handle(soc, dp_rx_bar_frame_handle(soc,
ring_desc, ring_desc,
rx_desc, rx_desc,
&mpdu_desc_info, &mpdu_desc_info);
error);
rx_bufs_reaped[mac_id] += 1; rx_bufs_reaped[mac_id] += 1;
goto next_entry; goto next_entry;
} }
dp_info_rl("Got pkt with REO ERROR: %d", error);
if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
/* /*
* We only handle one msdu per link desc for fragmented * We only handle one msdu per link desc for fragmented

View File

@@ -692,6 +692,7 @@ struct dp_rx_tid {
uint8_t pn_size; uint8_t pn_size;
/* REO TID queue descriptors */ /* REO TID queue descriptors */
void *hw_qdesc_vaddr_unaligned; void *hw_qdesc_vaddr_unaligned;
void *hw_qdesc_vaddr_aligned;
qdf_dma_addr_t hw_qdesc_paddr_unaligned; qdf_dma_addr_t hw_qdesc_paddr_unaligned;
qdf_dma_addr_t hw_qdesc_paddr; qdf_dma_addr_t hw_qdesc_paddr;
uint32_t hw_qdesc_alloc_size; uint32_t hw_qdesc_alloc_size;

View File

@@ -27,6 +27,10 @@
#include "hif_io32.h" #include "hif_io32.h"
#include "qdf_platform.h" #include "qdf_platform.h"
#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
#include "hal_hw_headers.h"
#endif
/* Ring index for WBM2SW2 release ring */ /* Ring index for WBM2SW2 release ring */
#define HAL_IPA_TX_COMP_RING_IDX 2 #define HAL_IPA_TX_COMP_RING_IDX 2
@@ -2488,6 +2492,78 @@ void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
} }
#ifdef DUMP_REO_QUEUE_INFO_IN_DDR
/**
* hal_dump_rx_reo_queue_desc() - Dump reo queue descriptor fields
* @hw_qdesc_vaddr_aligned: Pointer to hw reo queue desc virtual addr
*
* Use the virtual addr pointer to reo h/w queue desc to read
* the values from ddr and log them.
*
* Return: none
*/
static inline void hal_dump_rx_reo_queue_desc(
void *hw_qdesc_vaddr_aligned)
{
struct rx_reo_queue *hw_qdesc =
(struct rx_reo_queue *)hw_qdesc_vaddr_aligned;
if (!hw_qdesc)
return;
hal_info("receive_queue_number %u vld %u window_jump_2k %u"
" hole_count %u ba_window_size %u ignore_ampdu_flag %u"
" svld %u ssn %u current_index %u"
" disable_duplicate_detection %u soft_reorder_enable %u"
" chk_2k_mode %u oor_mode %u mpdu_frames_processed_count %u"
" msdu_frames_processed_count %u total_processed_byte_count %u"
" late_receive_mpdu_count %u seq_2k_error_detected_flag %u"
" pn_error_detected_flag %u current_mpdu_count %u"
" current_msdu_count %u timeout_count %u"
" forward_due_to_bar_count %u duplicate_count %u"
" frames_in_order_count %u bar_received_count %u"
" pn_check_needed %u pn_shall_be_even %u"
" pn_shall_be_uneven %u pn_size %u",
hw_qdesc->receive_queue_number,
hw_qdesc->vld,
hw_qdesc->window_jump_2k,
hw_qdesc->hole_count,
hw_qdesc->ba_window_size,
hw_qdesc->ignore_ampdu_flag,
hw_qdesc->svld,
hw_qdesc->ssn,
hw_qdesc->current_index,
hw_qdesc->disable_duplicate_detection,
hw_qdesc->soft_reorder_enable,
hw_qdesc->chk_2k_mode,
hw_qdesc->oor_mode,
hw_qdesc->mpdu_frames_processed_count,
hw_qdesc->msdu_frames_processed_count,
hw_qdesc->total_processed_byte_count,
hw_qdesc->late_receive_mpdu_count,
hw_qdesc->seq_2k_error_detected_flag,
hw_qdesc->pn_error_detected_flag,
hw_qdesc->current_mpdu_count,
hw_qdesc->current_msdu_count,
hw_qdesc->timeout_count,
hw_qdesc->forward_due_to_bar_count,
hw_qdesc->duplicate_count,
hw_qdesc->frames_in_order_count,
hw_qdesc->bar_received_count,
hw_qdesc->pn_check_needed,
hw_qdesc->pn_shall_be_even,
hw_qdesc->pn_shall_be_uneven,
hw_qdesc->pn_size);
}
#else /* DUMP_REO_QUEUE_INFO_IN_DDR */
static inline void hal_dump_rx_reo_queue_desc(
void *hw_qdesc_vaddr_aligned)
{
}
#endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
/** /**
* hal_srng_dump_ring_desc() - Dump ring descriptor info * hal_srng_dump_ring_desc() - Dump ring descriptor info
* *

View File

@@ -589,7 +589,7 @@ inline int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl,
BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush); BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE, HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
cp->flush_all); cp->flush_entire_cache);
if (hif_pm_runtime_get(hal_soc->hif_handle, if (hif_pm_runtime_get(hal_soc->hif_handle,
RTPM_ID_HAL_REO_CMD, true) == 0) { RTPM_ID_HAL_REO_CMD, true) == 0) {

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
* *
* Permission to use, copy, modify, and/or distribute this software for * Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the * any purpose with or without fee is hereby granted, provided that the
@@ -179,7 +179,7 @@ struct hal_reo_cmd_flush_queue_params {
* @cache_block_res_index: Blocking resource to be used * @cache_block_res_index: Blocking resource to be used
* @flush_no_inval: Flush without invalidatig descriptor * @flush_no_inval: Flush without invalidatig descriptor
* @use_after_flush: Block usage after flush till unblock command * @use_after_flush: Block usage after flush till unblock command
* @flush_all: Flush entire REO cache * @flush_entire_cache: Flush entire REO cache
*/ */
struct hal_reo_cmd_flush_cache_params { struct hal_reo_cmd_flush_cache_params {
bool fwd_mpdus_in_queue; bool fwd_mpdus_in_queue;
@@ -187,7 +187,7 @@ struct hal_reo_cmd_flush_cache_params {
uint8_t cache_block_res_index; uint8_t cache_block_res_index;
bool flush_no_inval; bool flush_no_inval;
bool block_use_after_flush; bool block_use_after_flush;
bool flush_all; bool flush_entire_cache;
}; };
/** /**