qcacmn: Add support for rx stats from REO block

Add support for rx stats from REO block in IPA use case.

Change-Id: I90a6f57507838645dea9095615f378bd5a3a6ffc
CRs-Fixed: 3231322
This commit is contained in:
Amrit Sahai
2022-06-28 20:19:19 +05:30
committed by Madan Koyyalamudi
parent 08067d2b67
commit 9ea6f8dc13
4 changed files with 191 additions and 8 deletions

View File

@@ -2186,6 +2186,12 @@ typedef void (*dp_rxtid_stats_cmd_cb)(struct dp_soc *soc, void *cb_ctxt,
int dp_peer_rxtid_stats(struct dp_peer *peer, int dp_peer_rxtid_stats(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb, dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
void *cb_ctxt); void *cb_ctxt);
#ifdef IPA_OFFLOAD
void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status);
int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb);
#endif
QDF_STATUS QDF_STATUS
dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id, dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
uint8_t *peer_mac, enum cdp_sec_type sec_type, uint8_t *peer_mac, enum cdp_sec_type sec_type,

View File

@@ -2518,6 +2518,51 @@ map_detach:
} }
#endif #endif
#ifdef IPA_OFFLOAD
/*
* dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo
* @soc - soc handle
* @cb_ctxt - combination of peer_id and tid
* @reo_status - reo status
*
* return: void
*/
void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status)
{
struct dp_peer *peer = NULL;
struct dp_rx_tid *rx_tid = NULL;
unsigned long comb_peer_id_tid;
struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
uint16_t tid;
uint16_t peer_id;
if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
dp_err("REO stats failure %d\n",
queue_status->header.status);
return;
}
comb_peer_id_tid = (unsigned long)cb_ctxt;
tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
if (!peer)
return;
rx_tid = &peer->rx_tid[tid];
if (!rx_tid) {
dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
return;
}
rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
}
qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
#endif
void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status) union hal_reo_status *reo_status)
{ {
@@ -5571,6 +5616,59 @@ void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
} }
#endif #endif
#ifdef IPA_OFFLOAD
int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
{
struct dp_soc *soc = peer->vdev->pdev->soc;
struct hal_reo_cmd_params params;
int i;
int stats_cmd_sent_cnt = 0;
QDF_STATUS status;
uint16_t peer_id = peer->peer_id;
if (!dp_stats_cmd_cb)
return stats_cmd_sent_cnt;
qdf_mem_zero(&params, sizeof(params));
for (i = 0; i < DP_MAX_TIDS; i++) {
struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
if (rx_tid->hw_qdesc_vaddr_unaligned) {
params.std.need_status = 1;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.stats_params.clear = 1;
dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb,
(void *)((i << DP_PEER_REO_STATS_TID_SHIFT)
| peer_id));
if (QDF_IS_STATUS_SUCCESS(status))
stats_cmd_sent_cnt++;
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
}
}
return stats_cmd_sent_cnt;
}
qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
#endif
/** /**
* dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
* @peer: DP peer handle * @peer: DP peer handle

View File

@@ -167,6 +167,17 @@
#define DP_TX_MAGIC_PATTERN_INUSE 0xABCD1234 #define DP_TX_MAGIC_PATTERN_INUSE 0xABCD1234
#define DP_TX_MAGIC_PATTERN_FREE 0xDEADBEEF #define DP_TX_MAGIC_PATTERN_FREE 0xDEADBEEF
#ifdef IPA_OFFLOAD
#define DP_PEER_REO_STATS_TID_SHIFT 16
#define DP_PEER_REO_STATS_TID_MASK 0xFFFF0000
#define DP_PEER_REO_STATS_PEER_ID_MASK 0x0000FFFF
#define DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid) \
((comb_peer_id_tid & DP_PEER_REO_STATS_TID_MASK) >> \
DP_PEER_REO_STATS_TID_SHIFT)
#define DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid) \
(comb_peer_id_tid & DP_PEER_REO_STATS_PEER_ID_MASK)
#endif
enum rx_pktlog_mode { enum rx_pktlog_mode {
DP_RX_PKTLOG_DISABLED = 0, DP_RX_PKTLOG_DISABLED = 0,
DP_RX_PKTLOG_FULL, DP_RX_PKTLOG_FULL,
@@ -876,6 +887,11 @@ struct dp_rx_tid {
/* Coex Override preserved windows size 1 based */ /* Coex Override preserved windows size 1 based */
uint16_t rx_ba_win_size_override; uint16_t rx_ba_win_size_override;
#ifdef IPA_OFFLOAD
/* rx msdu count per tid */
struct cdp_pkt_info rx_msdu_cnt;
#endif
}; };
/** /**

View File

@@ -1234,9 +1234,46 @@ void dp_pktlogmod_exit(struct dp_pdev *pdev)
#endif /*DP_CON_MON*/ #endif /*DP_CON_MON*/
#if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT) #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT)
#ifdef IPA_OFFLOAD
void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
struct cdp_interface_peer_stats *peer_stats_intf)
{
struct dp_rx_tid *rx_tid = NULL;
uint8_t i = 0;
for (i = 0; i < DP_MAX_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
peer_stats_intf->rx_byte_count +=
rx_tid->rx_msdu_cnt.bytes;
peer_stats_intf->rx_packet_count +=
rx_tid->rx_msdu_cnt.num;
}
peer_stats_intf->tx_packet_count =
peer->monitor_peer->stats.tx.tx_ucast_success.num;
peer_stats_intf->tx_byte_count =
peer->monitor_peer->stats.tx.tx_ucast_success.bytes;
}
#else
void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
struct cdp_interface_peer_stats *peer_stats_intf)
{
struct dp_txrx_peer *txrx_peer = NULL;
struct dp_peer *tgt_peer = NULL;
tgt_peer = dp_get_tgt_peer_from_peer(peer);
txrx_peer = tgt_peer->txrx_peer;
peer_stats_intf->rx_packet_count = txrx_peer->to_stack.num;
peer_stats_intf->rx_byte_count = txrx_peer->to_stack.bytes;
peer_stats_intf->tx_packet_count =
txrx_peer->stats.per_pkt_stats.tx.ucast.num;
peer_stats_intf->tx_byte_count =
txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
}
#endif
QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer) QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
{ {
struct cdp_interface_peer_stats peer_stats_intf; struct cdp_interface_peer_stats peer_stats_intf = {0};
struct dp_mon_peer_stats *mon_peer_stats = NULL; struct dp_mon_peer_stats *mon_peer_stats = NULL;
struct dp_peer *tgt_peer = NULL; struct dp_peer *tgt_peer = NULL;
struct dp_txrx_peer *txrx_peer = NULL; struct dp_txrx_peer *txrx_peer = NULL;
@@ -1254,7 +1291,6 @@ QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
mon_peer_stats = &peer->monitor_peer->stats; mon_peer_stats = &peer->monitor_peer->stats;
qdf_mem_zero(&peer_stats_intf, sizeof(peer_stats_intf));
if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr) if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr)
peer_stats_intf.rssi_changed = true; peer_stats_intf.rssi_changed = true;
@@ -1269,12 +1305,7 @@ QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate; peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate;
peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr; peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr;
peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi; peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi;
peer_stats_intf.rx_packet_count = txrx_peer->to_stack.num; dp_peer_get_tx_rx_stats(peer, &peer_stats_intf);
peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
peer_stats_intf.tx_packet_count =
txrx_peer->stats.per_pkt_stats.tx.ucast.num;
peer_stats_intf.tx_byte_count =
txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
peer_stats_intf.per = tgt_peer->stats.tx.last_per; peer_stats_intf.per = tgt_peer->stats.tx.last_per;
peer_stats_intf.free_buff = INVALID_FREE_BUFF; peer_stats_intf.free_buff = INVALID_FREE_BUFF;
dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc, dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
@@ -2082,6 +2113,37 @@ void dp_peer_update_telemetry_stats(struct dp_peer *peer)
* @peer : Datapath peer * @peer : Datapath peer
* @arg : argument to iter function * @arg : argument to iter function
*/ */
#ifdef IPA_OFFLOAD
static void
dp_peer_cal_clients_stats_update(struct dp_soc *soc,
struct dp_peer *peer,
void *arg)
{
struct cdp_calibr_stats_intf peer_stats_intf = {0};
struct dp_peer *tgt_peer = NULL;
struct dp_txrx_peer *txrx_peer = NULL;
dp_peer_update_telemetry_stats(peer);
if (!dp_peer_is_primary_link_peer(peer))
return;
tgt_peer = dp_get_tgt_peer_from_peer(peer);
if (!tgt_peer || !(tgt_peer->txrx_peer))
return;
txrx_peer = tgt_peer->txrx_peer;
peer_stats_intf.to_stack = txrx_peer->to_stack;
peer_stats_intf.tx_success =
peer->monitor_peer->stats.tx.tx_ucast_success;
peer_stats_intf.tx_ucast =
peer->monitor_peer->stats.tx.tx_ucast_total;
dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
&tgt_peer->stats);
dp_peer_get_rxtid_stats_ipa(peer, dp_peer_update_tid_stats_from_reo);
}
#else
static void static void
dp_peer_cal_clients_stats_update(struct dp_soc *soc, dp_peer_cal_clients_stats_update(struct dp_soc *soc,
struct dp_peer *peer, struct dp_peer *peer,
@@ -2110,6 +2172,7 @@ dp_peer_cal_clients_stats_update(struct dp_soc *soc,
dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf, dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
&tgt_peer->stats); &tgt_peer->stats);
} }
#endif
/*dp_iterate_update_peer_list - update peer stats on cal client timer /*dp_iterate_update_peer_list - update peer stats on cal client timer
* @pdev_hdl: pdev handle * @pdev_hdl: pdev handle