Merge "qcacmn: Ignore rx hw stats reo command status callback"

This commit is contained in:
Linux Build Service Account
2020-02-21 15:10:10 -08:00
committed by Gerrit - the friendly Code Review server
4 changed files with 110 additions and 19 deletions

View File

@@ -1135,8 +1135,15 @@ void dp_htt_stats_print_tag(struct dp_pdev *pdev,
void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf);
QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask, QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev, uint32_t tuple_mask,
uint8_t mac_id); uint8_t mac_id);
void dp_peer_rxtid_stats(struct dp_peer *peer, void (*callback_fn), /**
void *cb_ctxt); * dp_rxtid_stats_cmd_cb - function pointer for peer
* rx tid stats cmd call_back
*/
typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status);
int dp_peer_rxtid_stats(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
void *cb_ctxt);
QDF_STATUS QDF_STATUS
dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
uint8_t *peer_mac, enum cdp_sec_type sec_type, uint8_t *peer_mac, enum cdp_sec_type sec_type,

View File

@@ -77,6 +77,16 @@ cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
#endif #endif
#endif #endif
#ifdef WLAN_FEATURE_STATS_EXT
#define INIT_RX_HW_STATS_LOCK(_soc) \
qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
#define DEINIT_RX_HW_STATS_LOCK(_soc) \
qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
#else
#define INIT_RX_HW_STATS_LOCK(_soc) /* no op */
#define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
#endif
/* /*
* The max size of cdp_peer_stats_param_t is limited to 16 bytes. * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
* If the buffer size is exceeding this size limit, * If the buffer size is exceeding this size limit,
@@ -4348,6 +4358,8 @@ static void dp_soc_deinit(void *txrx_soc)
qdf_spinlock_destroy(&soc->ast_lock); qdf_spinlock_destroy(&soc->ast_lock);
DEINIT_RX_HW_STATS_LOCK(soc);
dp_soc_mem_reset(soc); dp_soc_mem_reset(soc);
} }
@@ -10338,7 +10350,7 @@ dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
#ifdef WLAN_FEATURE_STATS_EXT #ifdef WLAN_FEATURE_STATS_EXT
/* rx hw stats event wait timeout in ms */ /* rx hw stats event wait timeout in ms */
#define DP_REO_STATUS_STATS_TIMEOUT 1000 #define DP_REO_STATUS_STATS_TIMEOUT 1500
/** /**
* dp_txrx_ext_stats_request - request dp txrx extended stats request * dp_txrx_ext_stats_request - request dp txrx extended stats request
* @soc_hdl: soc handle * @soc_hdl: soc handle
@@ -10383,20 +10395,36 @@ dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt, static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status) union hal_reo_status *reo_status)
{ {
struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
struct hal_reo_queue_status *queue_status = &reo_status->queue_status; struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
bool is_query_timeout;
qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
is_query_timeout = rx_hw_stats->is_query_timeout;
/* free the cb_ctxt if all pending tid stats query is received */
if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
if (!is_query_timeout) {
qdf_event_set(&soc->rx_hw_stats_event);
soc->is_last_stats_ctx_init = false;
}
qdf_mem_free(rx_hw_stats);
}
if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
dp_info("REO stats failure %d for TID %d", dp_info("REO stats failure %d",
queue_status->header.status, rx_tid->tid); queue_status->header.status);
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
return; return;
} }
soc->ext_stats.rx_mpdu_received += queue_status->mpdu_frms_cnt; if (!is_query_timeout) {
soc->ext_stats.rx_mpdu_missed += queue_status->late_recv_mpdu_cnt; soc->ext_stats.rx_mpdu_received +=
queue_status->mpdu_frms_cnt;
if (rx_tid->tid == (DP_MAX_TIDS - 1)) soc->ext_stats.rx_mpdu_missed +=
qdf_event_set(&soc->rx_hw_stats_event); queue_status->late_recv_mpdu_cnt;
}
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
} }
/** /**
@@ -10413,6 +10441,8 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id); struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
struct dp_peer *peer; struct dp_peer *peer;
QDF_STATUS status; QDF_STATUS status;
struct dp_req_rx_hw_stats_t *rx_hw_stats;
int rx_stats_sent_cnt = 0;
if (!vdev) { if (!vdev) {
dp_err("vdev is null for vdev_id: %u", vdev_id); dp_err("vdev is null for vdev_id: %u", vdev_id);
@@ -10427,12 +10457,39 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
return QDF_STATUS_E_INVAL; return QDF_STATUS_E_INVAL;
} }
rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
if (!rx_hw_stats) {
dp_err("malloc failed for hw stats structure");
return QDF_STATUS_E_NOMEM;
}
qdf_event_reset(&soc->rx_hw_stats_event); qdf_event_reset(&soc->rx_hw_stats_event);
dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, NULL); qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
rx_stats_sent_cnt =
dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
if (!rx_stats_sent_cnt) {
dp_err("no tid stats sent successfully");
qdf_mem_free(rx_hw_stats);
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
return QDF_STATUS_E_INVAL;
}
qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
rx_stats_sent_cnt);
rx_hw_stats->is_query_timeout = false;
soc->is_last_stats_ctx_init = true;
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
status = qdf_wait_single_event(&soc->rx_hw_stats_event, status = qdf_wait_single_event(&soc->rx_hw_stats_event,
DP_REO_STATUS_STATS_TIMEOUT); DP_REO_STATUS_STATS_TIMEOUT);
qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
if (status != QDF_STATUS_SUCCESS) {
dp_info("rx hw stats event timeout");
if (soc->is_last_stats_ctx_init)
rx_hw_stats->is_query_timeout = true;
}
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
dp_peer_unref_delete(peer); dp_peer_unref_delete(peer);
return status; return status;
@@ -10930,6 +10987,7 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
qdf_spinlock_create(&soc->reo_desc_freelist_lock); qdf_spinlock_create(&soc->reo_desc_freelist_lock);
qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE); qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
INIT_RX_HW_STATS_LOCK(soc);
/* fill the tx/rx cpu ring map*/ /* fill the tx/rx cpu ring map*/
dp_soc_set_txrx_ring_map(soc); dp_soc_set_txrx_ring_map(soc);

View File

@@ -3586,17 +3586,20 @@ bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
* @dp_stats_cmd_cb: REO command callback function * @dp_stats_cmd_cb: REO command callback function
* @cb_ctxt: Callback context * @cb_ctxt: Callback context
* *
* Return: none * Return: count of tid stats cmd send succeeded
*/ */
void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb), int dp_peer_rxtid_stats(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
void *cb_ctxt) void *cb_ctxt)
{ {
struct dp_soc *soc = peer->vdev->pdev->soc; struct dp_soc *soc = peer->vdev->pdev->soc;
struct hal_reo_cmd_params params; struct hal_reo_cmd_params params;
int i; int i;
int stats_cmd_sent_cnt = 0;
QDF_STATUS status;
if (!dp_stats_cmd_cb) if (!dp_stats_cmd_cb)
return; return stats_cmd_sent_cnt;
qdf_mem_zero(&params, sizeof(params)); qdf_mem_zero(&params, sizeof(params));
for (i = 0; i < DP_MAX_TIDS; i++) { for (i = 0; i < DP_MAX_TIDS; i++) {
@@ -3609,13 +3612,20 @@ void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
if (cb_ctxt) { if (cb_ctxt) {
dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, status = dp_reo_send_cmd(
&params, dp_stats_cmd_cb, cb_ctxt); soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb,
cb_ctxt);
} else { } else {
dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, status = dp_reo_send_cmd(
&params, dp_stats_cmd_cb, rx_tid); soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb,
rx_tid);
} }
if (QDF_IS_STATUS_SUCCESS(status))
stats_cmd_sent_cnt++;
/* Flush REO descriptor from HW cache to update stats /* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging */ * in descriptor memory. This is to help debugging */
qdf_mem_zero(&params, sizeof(params)); qdf_mem_zero(&params, sizeof(params));
@@ -3629,6 +3639,8 @@ void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
NULL); NULL);
} }
} }
return stats_cmd_sent_cnt;
} }
QDF_STATUS QDF_STATUS

View File

@@ -1203,6 +1203,8 @@ struct dp_soc {
uint32_t rx_mpdu_missed; uint32_t rx_mpdu_missed;
} ext_stats; } ext_stats;
qdf_event_t rx_hw_stats_event; qdf_event_t rx_hw_stats_event;
qdf_spinlock_t rx_hw_stats_lock;
bool is_last_stats_ctx_init;
#endif /* WLAN_FEATURE_STATS_EXT */ #endif /* WLAN_FEATURE_STATS_EXT */
/* Smart monitor capability for HKv2 */ /* Smart monitor capability for HKv2 */
@@ -2333,4 +2335,16 @@ struct dp_rx_fst {
#endif /* WLAN_SUPPORT_RX_FISA */ #endif /* WLAN_SUPPORT_RX_FISA */
#endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */ #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
#ifdef WLAN_FEATURE_STATS_EXT
/*
* dp_req_rx_hw_stats_t: RX peer HW stats query structure
* @pending_tid_query_cnt: pending tid stats count which waits for REO status
* @is_query_timeout: flag to show is stats query timeout
*/
struct dp_req_rx_hw_stats_t {
qdf_atomic_t pending_tid_stats_cnt;
bool is_query_timeout;
};
#endif
#endif /* _DP_TYPES_H_ */ #endif /* _DP_TYPES_H_ */