qcacmn: Add event recording for fastpath & napi

Fastpath & napi did not have CE event recording.
This is a vital debug feature.

Change-Id: I73118f4fa5177158d6d489ea5b9ebf82f05c1229
CRs-Fixed: 1009273
This commit is contained in:
Houston Hoffman
2016-04-26 16:14:13 -07:00
committed by Gerrit - the friendly Code Review server
parent d6f946ce21
commit fa260aa2eb
3 changed files with 59 additions and 3 deletions

View File

@@ -300,12 +300,25 @@ union ce_desc {
* @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update) * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
* @HIF_TX_DESC_POST: event recorded before updating write index of TX ring. * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
* @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring. * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
* @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
* of the RX ring in fastpath
* @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
* index of the RX ring in fastpath
* @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
* of the TX ring in fastpath
* @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
* index of the RX ring in fastpath
*
* @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
* @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
* @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
* @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
* @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
* @HIF_CE_REAP_EXIT: records when we process completion outside of a bh * @HIF_CE_REAP_EXIT: records when we process completion outside of a bh
* @NAPI_SCHEDULE: records when napi is scheduled from the irq context
* @NAPI_POLL_ENTER: records the start of the napi poll function
* @NAPI_COMPLETE: records when interrupts are reenabled
* @NAPI_POLL_EXIT: records when the napi poll function returns
*/ */
enum hif_ce_event_type { enum hif_ce_event_type {
HIF_RX_DESC_POST, HIF_RX_DESC_POST,
@@ -313,12 +326,21 @@ enum hif_ce_event_type {
HIF_TX_GATHER_DESC_POST, HIF_TX_GATHER_DESC_POST,
HIF_TX_DESC_POST, HIF_TX_DESC_POST,
HIF_TX_DESC_COMPLETION, HIF_TX_DESC_COMPLETION,
HIF_IRQ_EVENT, FAST_RX_WRITE_INDEX_UPDATE,
FAST_RX_SOFTWARE_INDEX_UPDATE,
FAST_TX_WRITE_INDEX_UPDATE,
FAST_TX_SOFTWARE_INDEX_UPDATE,
HIF_IRQ_EVENT = 0x10,
HIF_CE_TASKLET_ENTRY, HIF_CE_TASKLET_ENTRY,
HIF_CE_TASKLET_RESCHEDULE, HIF_CE_TASKLET_RESCHEDULE,
HIF_CE_TASKLET_EXIT, HIF_CE_TASKLET_EXIT,
HIF_CE_REAP_ENTRY, HIF_CE_REAP_ENTRY,
HIF_CE_REAP_EXIT, HIF_CE_REAP_EXIT,
NAPI_SCHEDULE,
NAPI_POLL_ENTER,
NAPI_COMPLETE,
NAPI_POLL_EXIT,
}; };
void ce_init_ce_desc_event_log(int ce_id, int size); void ce_init_ce_desc_event_log(int ce_id, int size);

View File

@@ -538,6 +538,10 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
write_index = src_ring->write_index; write_index = src_ring->write_index;
sw_index = src_ring->sw_index; sw_index = src_ring->sw_index;
hif_record_ce_desc_event(scn, ce_state->id,
FAST_TX_SOFTWARE_INDEX_UPDATE,
NULL, NULL, write_index);
if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
< (SLOTS_PER_DATAPATH_TX * num_msdus))) { < (SLOTS_PER_DATAPATH_TX * num_msdus))) {
HIF_ERROR("Source ring full, required %d, available %d", HIF_ERROR("Source ring full, required %d, available %d",
@@ -634,6 +638,10 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
src_ring->write_index = write_index; src_ring->write_index = write_index;
if (hif_pm_runtime_get(hif_hdl) == 0) { if (hif_pm_runtime_get(hif_hdl) == 0) {
hif_record_ce_desc_event(scn, ce_state->id,
FAST_TX_WRITE_INDEX_UPDATE,
NULL, NULL, write_index);
/* Don't call WAR_XXX from here /* Don't call WAR_XXX from here
* Just call XXX instead, that has the reqd. intel * Just call XXX instead, that has the reqd. intel
*/ */
@@ -1355,6 +1363,11 @@ static void ce_fastpath_rx_handle(struct CE_state *ce_state,
/* Update Destination Ring Write Index */ /* Update Destination Ring Write Index */
write_index = dest_ring->write_index; write_index = dest_ring->write_index;
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls); write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
hif_record_ce_desc_event(scn, ce_state->id,
FAST_RX_WRITE_INDEX_UPDATE,
NULL, NULL, write_index);
CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
dest_ring->write_index = write_index; dest_ring->write_index = write_index;
} }
@@ -1456,6 +1469,11 @@ more_data:
* reusing the buffers * reusing the buffers
*/ */
if (nbuf_cmpl_idx == MSG_FLUSH_NUM) { if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
hif_record_ce_desc_event(scn, ce_state->id,
FAST_RX_SOFTWARE_INDEX_UPDATE,
NULL, NULL, sw_index);
dest_ring->sw_index = sw_index;
qdf_spin_unlock(&ce_state->ce_index_lock); qdf_spin_unlock(&ce_state->ce_index_lock);
ce_fastpath_rx_handle(ce_state, cmpl_msdus, ce_fastpath_rx_handle(ce_state, cmpl_msdus,
MSG_FLUSH_NUM, ctrl_addr); MSG_FLUSH_NUM, ctrl_addr);
@@ -1465,6 +1483,12 @@ more_data:
} }
hif_record_ce_desc_event(scn, ce_state->id,
FAST_RX_SOFTWARE_INDEX_UPDATE,
NULL, NULL, sw_index);
dest_ring->sw_index = sw_index;
/* /*
* If there are not enough completions to fill the array, * If there are not enough completions to fill the array,
* just call the message handler here * just call the message handler here
@@ -1477,8 +1501,6 @@ more_data:
nbuf_cmpl_idx = 0; nbuf_cmpl_idx = 0;
} }
qdf_atomic_set(&ce_state->rx_pending, 0); qdf_atomic_set(&ce_state->rx_pending, 0);
dest_ring->sw_index = sw_index;
CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK); HOST_IS_COPY_COMPLETE_MASK);
} }

View File

@@ -391,6 +391,9 @@ int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE,
NULL, NULL, 0);
scn->napi_data.napis[ce_id].stats[cpu].napi_schedules++; scn->napi_data.napis[ce_id].stats[cpu].napi_schedules++;
NAPI_DEBUG("scheduling napi %d (ce:%d)", NAPI_DEBUG("scheduling napi %d (ce:%d)",
scn->napi_data.napis[ce_id].id, ce_id); scn->napi_data.napis[ce_id].id, ce_id);
@@ -436,6 +439,9 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
container_of(napi, struct qca_napi_info, napi); container_of(napi, struct qca_napi_info, napi);
napi_info->stats[cpu].napi_polls++; napi_info->stats[cpu].napi_polls++;
hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
NAPI_POLL_ENTER, NULL, NULL, cpu);
if (unlikely(NULL == hif)) if (unlikely(NULL == hif))
QDF_ASSERT(hif != NULL); /* emit a warning if hif NULL */ QDF_ASSERT(hif != NULL); /* emit a warning if hif NULL */
else { else {
@@ -468,6 +474,9 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
if ((ce_state != NULL && !ce_check_rx_pending(ce_state)) || 0 == rc) { if ((ce_state != NULL && !ce_check_rx_pending(ce_state)) || 0 == rc) {
napi_info->stats[cpu].napi_completes++; napi_info->stats[cpu].napi_completes++;
hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
NULL, NULL, 0);
/* enable interrupts */ /* enable interrupts */
napi_complete(napi); napi_complete(napi);
if (NULL != hif) { if (NULL != hif) {
@@ -481,6 +490,9 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
__func__, __LINE__); __func__, __LINE__);
} }
hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
NAPI_POLL_EXIT, NULL, NULL, normalized);
NAPI_DEBUG("%s <--[normalized=%d]", _func__, normalized); NAPI_DEBUG("%s <--[normalized=%d]", _func__, normalized);
return normalized; return normalized;
} }