qcacmn: Ignore rx hw stats reo command status callback

Host queries for reo hw stats by sending reo queue status command
on all the rx tids. After sending host waits to receive for all the
rx tids reo hw stats. If the hw stats reo status is not received for
all the rx tids with in the specified timeout value corresponding
peers's reference is released. In parallel if disconnect happens
from userspace, there is always a chance to access the freed rxtid
reference in rx hw stats callback. So in case of rx hw stats event
timeout ignore processing the reo command status rx stats callback.

Change-Id: I0aba346a25564a3c29751f1f5a1d4e9204b93d33
CRs-Fixed: 2619720
This commit is contained in:
Sravan Goud
2020-02-11 18:07:03 +05:30
committed by Jinwei Chen
parent c310645579
commit b2c1b4a49b
4 changed files with 110 additions and 19 deletions

View File

@@ -1135,8 +1135,15 @@ void dp_htt_stats_print_tag(struct dp_pdev *pdev,
void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask,
uint8_t mac_id);
void dp_peer_rxtid_stats(struct dp_peer *peer, void (*callback_fn),
void *cb_ctxt);
/**
* dp_rxtid_stats_cmd_cb - function pointer for peer
* rx tid stats cmd call_back
*/
typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status);
int dp_peer_rxtid_stats(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
void *cb_ctxt);
QDF_STATUS
dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
uint8_t *peer_mac, enum cdp_sec_type sec_type,

View File

@@ -77,6 +77,16 @@ cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
#endif
#endif
#ifdef WLAN_FEATURE_STATS_EXT
#define INIT_RX_HW_STATS_LOCK(_soc) \
qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
#define DEINIT_RX_HW_STATS_LOCK(_soc) \
qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
#else
#define INIT_RX_HW_STATS_LOCK(_soc) /* no op */
#define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
#endif
/*
* The max size of cdp_peer_stats_param_t is limited to 16 bytes.
* If the buffer size is exceeding this size limit,
@@ -4423,6 +4433,8 @@ static void dp_soc_deinit(void *txrx_soc)
qdf_spinlock_destroy(&soc->ast_lock);
DEINIT_RX_HW_STATS_LOCK(soc);
dp_soc_mem_reset(soc);
}
@@ -10403,7 +10415,7 @@ dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
#ifdef WLAN_FEATURE_STATS_EXT
/* rx hw stats event wait timeout in ms */
#define DP_REO_STATUS_STATS_TIMEOUT 1000
#define DP_REO_STATUS_STATS_TIMEOUT 1500
/**
* dp_txrx_ext_stats_request - request dp txrx extended stats request
* @soc_hdl: soc handle
@@ -10448,20 +10460,36 @@ dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status)
{
struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
bool is_query_timeout;
qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
is_query_timeout = rx_hw_stats->is_query_timeout;
/* free the cb_ctxt if all pending tid stats query is received */
if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
if (!is_query_timeout) {
qdf_event_set(&soc->rx_hw_stats_event);
soc->is_last_stats_ctx_init = false;
}
qdf_mem_free(rx_hw_stats);
}
if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
dp_info("REO stats failure %d for TID %d",
queue_status->header.status, rx_tid->tid);
dp_info("REO stats failure %d",
queue_status->header.status);
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
return;
}
soc->ext_stats.rx_mpdu_received += queue_status->mpdu_frms_cnt;
soc->ext_stats.rx_mpdu_missed += queue_status->late_recv_mpdu_cnt;
if (rx_tid->tid == (DP_MAX_TIDS - 1))
qdf_event_set(&soc->rx_hw_stats_event);
if (!is_query_timeout) {
soc->ext_stats.rx_mpdu_received +=
queue_status->mpdu_frms_cnt;
soc->ext_stats.rx_mpdu_missed +=
queue_status->late_recv_mpdu_cnt;
}
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
}
/**
@@ -10478,6 +10506,8 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
struct dp_peer *peer;
QDF_STATUS status;
struct dp_req_rx_hw_stats_t *rx_hw_stats;
int rx_stats_sent_cnt = 0;
if (!vdev) {
dp_err("vdev is null for vdev_id: %u", vdev_id);
@@ -10492,12 +10522,39 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
return QDF_STATUS_E_INVAL;
}
rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
if (!rx_hw_stats) {
dp_err("malloc failed for hw stats structure");
return QDF_STATUS_E_NOMEM;
}
qdf_event_reset(&soc->rx_hw_stats_event);
dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, NULL);
qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
rx_stats_sent_cnt =
dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
if (!rx_stats_sent_cnt) {
dp_err("no tid stats sent successfully");
qdf_mem_free(rx_hw_stats);
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
return QDF_STATUS_E_INVAL;
}
qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
rx_stats_sent_cnt);
rx_hw_stats->is_query_timeout = false;
soc->is_last_stats_ctx_init = true;
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
status = qdf_wait_single_event(&soc->rx_hw_stats_event,
DP_REO_STATUS_STATS_TIMEOUT);
qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
if (status != QDF_STATUS_SUCCESS) {
dp_info("rx hw stats event timeout");
if (soc->is_last_stats_ctx_init)
rx_hw_stats->is_query_timeout = true;
}
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
dp_peer_unref_delete(peer);
return status;
@@ -10995,6 +11052,7 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
qdf_spinlock_create(&soc->reo_desc_freelist_lock);
qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
INIT_RX_HW_STATS_LOCK(soc);
/* fill the tx/rx cpu ring map*/
dp_soc_set_txrx_ring_map(soc);

View File

@@ -3586,17 +3586,20 @@ bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
* @dp_stats_cmd_cb: REO command callback function
* @cb_ctxt: Callback context
*
* Return: none
* Return: count of tid stats cmd send succeeded
*/
void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
int dp_peer_rxtid_stats(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
void *cb_ctxt)
{
struct dp_soc *soc = peer->vdev->pdev->soc;
struct hal_reo_cmd_params params;
int i;
int stats_cmd_sent_cnt = 0;
QDF_STATUS status;
if (!dp_stats_cmd_cb)
return;
return stats_cmd_sent_cnt;
qdf_mem_zero(&params, sizeof(params));
for (i = 0; i < DP_MAX_TIDS; i++) {
@@ -3609,13 +3612,20 @@ void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
if (cb_ctxt) {
dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb, cb_ctxt);
status = dp_reo_send_cmd(
soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb,
cb_ctxt);
} else {
dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb, rx_tid);
status = dp_reo_send_cmd(
soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb,
rx_tid);
}
if (QDF_IS_STATUS_SUCCESS(status))
stats_cmd_sent_cnt++;
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging */
qdf_mem_zero(&params, sizeof(params));
@@ -3629,6 +3639,8 @@ void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
NULL);
}
}
return stats_cmd_sent_cnt;
}
QDF_STATUS

View File

@@ -1210,6 +1210,8 @@ struct dp_soc {
uint32_t rx_mpdu_missed;
} ext_stats;
qdf_event_t rx_hw_stats_event;
qdf_spinlock_t rx_hw_stats_lock;
bool is_last_stats_ctx_init;
#endif /* WLAN_FEATURE_STATS_EXT */
/* Smart monitor capability for HKv2 */
@@ -2320,4 +2322,16 @@ struct dp_rx_fst {
#endif /* WLAN_SUPPORT_RX_FISA */
#endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
#ifdef WLAN_FEATURE_STATS_EXT
/*
* dp_req_rx_hw_stats_t: RX peer HW stats query structure
* @pending_tid_query_cnt: pending tid stats count which waits for REO status
* @is_query_timeout: flag to show is stats query timeout
*/
struct dp_req_rx_hw_stats_t {
qdf_atomic_t pending_tid_stats_cnt;
bool is_query_timeout;
};
#endif
#endif /* _DP_TYPES_H_ */