Sfoglia il codice sorgente

qcacmn: Add event recording for fastpath & napi

Fastpath & napi did not have CE event recording.
This is a vital debug feature.

Change-Id: I73118f4fa5177158d6d489ea5b9ebf82f05c1229
CRs-Fixed: 1009273
Houston Hoffman 9 anni fa
parent
commit
fa260aa2eb
3 ha cambiato i file con 59 aggiunte e 3 eliminazioni
  1. 23 1
      hif/src/ce/ce_internal.h
  2. 24 2
      hif/src/ce/ce_service.c
  3. 12 0
      hif/src/hif_napi.c

+ 23 - 1
hif/src/ce/ce_internal.h

@@ -300,12 +300,25 @@ union ce_desc {
  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
+ * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
+ *	of the RX ring in fastpath
+ * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
+ *	index of the RX ring in fastpath
+ * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
+ *	of the TX ring in fastpath
+ * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
+ *	index of the RX ring in fastpath
+ *
  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
+ * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
+ * @NAPI_POLL_ENTER: records the start of the napi poll function
+ * @NAPI_COMPLETE: records when interrupts are reenabled
+ * @NAPI_POLL_EXIT: records when the napi poll function returns
  */
 enum hif_ce_event_type {
 	HIF_RX_DESC_POST,
@@ -313,12 +326,21 @@ enum hif_ce_event_type {
 	HIF_TX_GATHER_DESC_POST,
 	HIF_TX_DESC_POST,
 	HIF_TX_DESC_COMPLETION,
-	HIF_IRQ_EVENT,
+	FAST_RX_WRITE_INDEX_UPDATE,
+	FAST_RX_SOFTWARE_INDEX_UPDATE,
+	FAST_TX_WRITE_INDEX_UPDATE,
+	FAST_TX_SOFTWARE_INDEX_UPDATE,
+
+	HIF_IRQ_EVENT = 0x10,
 	HIF_CE_TASKLET_ENTRY,
 	HIF_CE_TASKLET_RESCHEDULE,
 	HIF_CE_TASKLET_EXIT,
 	HIF_CE_REAP_ENTRY,
 	HIF_CE_REAP_EXIT,
+	NAPI_SCHEDULE,
+	NAPI_POLL_ENTER,
+	NAPI_COMPLETE,
+	NAPI_POLL_EXIT,
 };
 
 void ce_init_ce_desc_event_log(int ce_id, int size);

+ 24 - 2
hif/src/ce/ce_service.c

@@ -538,6 +538,10 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
 	write_index = src_ring->write_index;
 	sw_index = src_ring->sw_index;
 
+	hif_record_ce_desc_event(scn, ce_state->id,
+				FAST_TX_SOFTWARE_INDEX_UPDATE,
+				NULL, NULL, write_index);
+
 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
 			 < (SLOTS_PER_DATAPATH_TX * num_msdus))) {
 		HIF_ERROR("Source ring full, required %d, available %d",
@@ -634,6 +638,10 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
 		src_ring->write_index = write_index;
 
 		if (hif_pm_runtime_get(hif_hdl) == 0) {
+			hif_record_ce_desc_event(scn, ce_state->id,
+						 FAST_TX_WRITE_INDEX_UPDATE,
+						 NULL, NULL, write_index);
+
 			/* Don't call WAR_XXX from here
 			 * Just call XXX instead, that has the reqd. intel
 			 */
@@ -1355,6 +1363,11 @@ static void ce_fastpath_rx_handle(struct CE_state *ce_state,
 	/* Update Destination Ring Write Index */
 	write_index = dest_ring->write_index;
 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
+
+	hif_record_ce_desc_event(scn, ce_state->id,
+			FAST_RX_WRITE_INDEX_UPDATE,
+			NULL, NULL, write_index);
+
 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
 	dest_ring->write_index = write_index;
 }
@@ -1456,6 +1469,11 @@ more_data:
 			 * reusing the buffers
 			 */
 			if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
+				hif_record_ce_desc_event(scn, ce_state->id,
+						 FAST_RX_SOFTWARE_INDEX_UPDATE,
+						 NULL, NULL, sw_index);
+				dest_ring->sw_index = sw_index;
+
 				qdf_spin_unlock(&ce_state->ce_index_lock);
 				ce_fastpath_rx_handle(ce_state, cmpl_msdus,
 						      MSG_FLUSH_NUM, ctrl_addr);
@@ -1465,6 +1483,12 @@ more_data:
 
 		}
 
+		hif_record_ce_desc_event(scn, ce_state->id,
+					 FAST_RX_SOFTWARE_INDEX_UPDATE,
+					 NULL, NULL, sw_index);
+
+		dest_ring->sw_index = sw_index;
+
 		/*
 		 * If there are not enough completions to fill the array,
 		 * just call the message handler here
@@ -1477,8 +1501,6 @@ more_data:
 			nbuf_cmpl_idx = 0;
 		}
 		qdf_atomic_set(&ce_state->rx_pending, 0);
-		dest_ring->sw_index = sw_index;
-
 		CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
 					   HOST_IS_COPY_COMPLETE_MASK);
 	}

+ 12 - 0
hif/src/hif_napi.c

@@ -391,6 +391,9 @@ int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
 	int cpu = smp_processor_id();
 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
 
+	hif_record_ce_desc_event(scn,  ce_id, NAPI_SCHEDULE,
+				 NULL, NULL, 0);
+
 	scn->napi_data.napis[ce_id].stats[cpu].napi_schedules++;
 	NAPI_DEBUG("scheduling napi %d (ce:%d)",
 		   scn->napi_data.napis[ce_id].id, ce_id);
@@ -436,6 +439,9 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
 		container_of(napi, struct qca_napi_info, napi);
 	napi_info->stats[cpu].napi_polls++;
 
+	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
+				 NAPI_POLL_ENTER, NULL, NULL, cpu);
+
 	if (unlikely(NULL == hif))
 		QDF_ASSERT(hif != NULL); /* emit a warning if hif NULL */
 	else {
@@ -468,6 +474,9 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
 
 	if ((ce_state != NULL && !ce_check_rx_pending(ce_state)) || 0 == rc) {
 		napi_info->stats[cpu].napi_completes++;
+
+		hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
+					 NULL, NULL, 0);
 		/* enable interrupts */
 		napi_complete(napi);
 		if (NULL != hif) {
@@ -481,6 +490,9 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
 			   __func__, __LINE__);
 	}
 
+	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
+				 NAPI_POLL_EXIT, NULL, NULL, normalized);
+
 	NAPI_DEBUG("%s <--[normalized=%d]", _func__, normalized);
 	return normalized;
 }