Browse Source

qcacmn: Add NAPI statistics to dumpstats

Currently NAPI stats are retrieved as a part of iwpriv getStats command.
The buffer available for this command is limited and NAPI stats get
trucncated.

Add a new dumpStats parameter (9) to dump NAPI stats.

Change-Id: Iaf52a3dcecac2f7b24fde2f8220fbfddc767965b
CRs-Fixed: 1076563
Mohit Khanna 8 năm trước cách đây
mục cha
commit
518eb5092e

+ 1 - 0
dp/inc/cdp_txrx_mob_def.h

@@ -39,6 +39,7 @@
 #define WLAN_TXRX_DESC_STATS  6
 #define WLAN_HIF_STATS  7
 #define WLAN_LRO_STATS  8
+#define WLAN_NAPI_STATS  9
 #define WLAN_SCHEDULER_STATS        21
 #define WLAN_TX_QUEUE_STATS         22
 #define WLAN_BUNDLE_STATS           23

+ 1 - 1
dp/inc/cdp_txrx_ops.h

@@ -854,7 +854,7 @@ struct cdp_throttle_ops {
  * @stats:
  */
 struct cdp_mob_stats_ops {
-	void (*display_stats)(uint16_t bitmap);
+	QDF_STATUS (*display_stats)(uint16_t bitmap);
 	void (*clear_stats)(uint16_t bitmap);
 	int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
 };

+ 3 - 3
dp/inc/cdp_txrx_stats.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -33,12 +33,12 @@
 #define _CDP_TXRX_STATS_H_
 #include <cdp_txrx_ops.h>
 
-static inline void
+static inline QDF_STATUS
 cdp_display_stats(ol_txrx_soc_handle soc, uint16_t bitmap)
 {
 	if (soc->ops->mob_stats_ops->display_stats)
 		return soc->ops->mob_stats_ops->display_stats(bitmap);
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
 static inline void

+ 4 - 2
hif/inc/hif.h

@@ -182,13 +182,15 @@ struct CE_state;
 
 /* NOTE: "napi->scale" can be changed,
    but this does not change the number of buckets */
-#define QCA_NAPI_NUM_BUCKETS (QCA_NAPI_BUDGET / QCA_NAPI_DEF_SCALE)
+#define QCA_NAPI_NUM_BUCKETS 4
 struct qca_napi_stat {
 	uint32_t napi_schedules;
 	uint32_t napi_polls;
 	uint32_t napi_completes;
 	uint32_t napi_workdone;
 	uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
+	uint32_t time_limit_reached;
+	uint32_t rxpkt_thresh_reached;
 };
 
 /**
@@ -201,7 +203,7 @@ struct qca_napi_stat {
 struct qca_napi_info {
 	struct net_device    netdev; /* dummy net_dev */
 	void 		     *hif_ctx;
-	struct napi_struct   napi;    /* one NAPI Instance per CE in phase I */
+	struct napi_struct   napi;
 	uint8_t              scale;   /* currently same on all instances */
 	uint8_t              id;
 	int                  irq;

+ 19 - 1
hif/inc/hif_napi.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -129,6 +129,20 @@ int hif_napi_schedule(struct hif_opaque_softc *scn, int ce_id);
 /* called by hdd_napi, which is called by kernel */
 int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
 			struct napi_struct *napi, int budget);
+#ifdef HELIUMPLUS
+/* called to retrieve NAPI CPU statistics */
+void hif_napi_stats(struct qca_napi_data *napid);
+void hif_napi_update_yield_stats(struct CE_state *ce_state,
+				 bool time_limit_reached,
+				 bool rxpkt_thresh_reached);
+#else
+static inline void hif_napi_stats(struct qca_napi_data *napid) { }
+
+static inline void hif_napi_update_yield_stats(struct CE_state *ce_state,
+				 bool time_limit_reached,
+				 bool rxpkt_thresh_reached) { }
+
+#endif
 
 #ifdef FEATURE_NAPI_DEBUG
 #define NAPI_DEBUG(fmt, ...)			\
@@ -226,6 +240,10 @@ static inline int hif_napi_schedule(struct hif_opaque_softc *hif, int ce_id)
 static inline int hif_napi_poll(struct napi_struct *napi, int budget)
 { return -EPERM; }
 
+static inline void hif_napi_stats(struct qca_napi_data *napid) { }
+static inline void hif_napi_update_yield_stats(struct CE_state *ce_state,
+				 bool time_limit_reached,
+				 bool rxpkt_thresh_reached) { }
 #endif /* FEATURE_NAPI */
 
 static inline int hif_ext_napi_enabled(struct hif_opaque_softc *hif, int ce)

+ 2 - 2
hif/src/ce/ce_main.h

@@ -126,7 +126,7 @@ struct hif_ext_group_entry {
 	void *hif_state;
 };
 
-struct ce_intr_stats {
+struct ce_stats {
 	uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU];
 };
 
@@ -161,7 +161,7 @@ struct HIF_CE_state {
 
 	/* Copy Engine used for Diagnostic Accesses */
 	struct CE_handle *ce_diag;
-	struct ce_intr_stats stats;
+	struct ce_stats stats;
 	struct ce_ops *ce_services;
 };
 

+ 15 - 3
hif/src/ce/ce_service.c

@@ -35,6 +35,7 @@
 #include "regtable.h"
 #include "hif_main.h"
 #include "hif_debug.h"
+#include "hif_napi.h"
 
 #ifdef IPA_OFFLOAD
 #ifdef QCA_WIFI_3_0
@@ -216,9 +217,20 @@ bool hif_ce_service_should_yield(struct hif_softc *scn,
 bool hif_ce_service_should_yield(struct hif_softc *scn,
 				 struct CE_state *ce_state)
 {
-	bool yield = qdf_system_time_after_eq(qdf_system_ticks(),
-					     ce_state->ce_service_yield_time) ||
-		     hif_max_num_receives_reached(scn, ce_state->receive_count);
+	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
+
+	time_limit_reached = qdf_system_time_after_eq(qdf_system_ticks(),
+					ce_state->ce_service_yield_time);
+	if (!time_limit_reached)
+		rxpkt_thresh_reached = hif_max_num_receives_reached
+					(scn, ce_state->receive_count);
+
+	yield =  time_limit_reached || rxpkt_thresh_reached;
+
+	if (yield)
+		hif_napi_update_yield_stats(ce_state,
+					    time_limit_reached,
+					    rxpkt_thresh_reached);
 	return yield;
 }
 #endif

+ 2 - 3
hif/src/ce/ce_tasklet.c

@@ -432,7 +432,6 @@ void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
 	for (i = 0; i < CE_COUNT_MAX; i++) {
 		size = STR_SIZE;
 		pos = 0;
-		qdf_print("CE id: %d", i);
 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
 			ret = snprintf(str_buffer + pos, size, "[%d]: %d",
 				j, hif_ce_state->stats.ce_per_cpu[i][j]);
@@ -441,7 +440,7 @@ void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
 			size -= ret;
 			pos += ret;
 		}
-		qdf_print("%s", str_buffer);
+		qdf_print("CE id[%d] - %s", i, str_buffer);
 	}
 #undef STR_SIZE
 }
@@ -454,7 +453,7 @@ void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state)
  */
 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
 {
-	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_intr_stats));
+	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
 }
 
 /**

+ 75 - 10
hif/src/hif_napi.c

@@ -808,22 +808,78 @@ out:
 }
 
 #ifdef HELIUMPLUS
-/*
- * Local functions
- * - no argument checks, all internal/trusted callers
+/**
+ *
+ * hif_napi_update_yield_stats() - update NAPI yield related stats
+ * @cpu_id: CPU ID for which stats needs to be updates
+ * @ce_id: Copy Engine ID for which yield stats needs to be updates
+ * @time_limit_reached: indicates whether the time limit was reached
+ * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
+ *
+ * Return: None
  */
+void hif_napi_update_yield_stats(struct CE_state *ce_state,
+				 bool time_limit_reached,
+				 bool rxpkt_thresh_reached)
+{
+	struct hif_softc *hif;
+	struct qca_napi_data *napi_data = NULL;
+	int ce_id = 0;
+	int cpu_id = 0;
+
+	if (unlikely(NULL == ce_state)) {
+		QDF_ASSERT(NULL != ce_state);
+		return;
+	}
 
-#ifdef FEATURE_NAPI_DEBUG
-static void hnc_dump_cpus(struct qca_napi_data *napid)
+	hif = ce_state->scn;
+
+	if (unlikely(NULL == hif)) {
+		QDF_ASSERT(NULL != hif);
+		return;
+	} else {
+		napi_data = &(hif->napi_data);
+		if (unlikely(NULL == napi_data))
+			QDF_ASSERT(NULL != napi_data);
+		return;
+	}
+
+	ce_id = ce_state->id;
+	cpu_id = qdf_get_cpu();
+
+	if (time_limit_reached)
+		napi_data->napis[ce_id].stats[cpu_id].time_limit_reached++;
+	else
+		napi_data->napis[ce_id].stats[cpu_id].rxpkt_thresh_reached++;
+}
+
+/**
+ *
+ * hif_napi_stats() - display NAPI CPU statistics
+ * @napid: pointer to qca_napi_data
+ *
+ * Description:
+ *    Prints the various CPU cores on which the NAPI instances /CEs interrupts
+ *    are being executed. Can be called from outside NAPI layer.
+ *
+ * Return: None
+ */
+void hif_napi_stats(struct qca_napi_data *napid)
 {
 	int i;
-	struct qca_napi_cpu *cpu = napid->napi_cpu;
+	struct qca_napi_cpu *cpu;
+
+	if (napid == NULL) {
+		qdf_print("%s: napiid struct is null", __func__);
+		return;
+	}
 
-	NAPI_DEBUG("%s: NAPI CPU TABLE", __func__);
-	NAPI_DEBUG("lilclhead=%d, bigclhead=%d",
+	cpu = napid->napi_cpu;
+	qdf_print("NAPI CPU TABLE");
+	qdf_print("lilclhead=%d, bigclhead=%d",
 		  napid->lilcl_head, napid->bigcl_head);
 	for (i = 0; i < NR_CPUS; i++) {
-		NAPI_DEBUG("CPU[%02d]: state:%d crid=%02d clid=%02d "
+		qdf_print("CPU[%02d]: state:%d crid=%02d clid=%02d "
 			  "crmk:0x%0lx thmk:0x%0lx frq:%d "
 			  "napi = 0x%08x lnk:%d",
 			  i,
@@ -833,7 +889,16 @@ static void hnc_dump_cpus(struct qca_napi_data *napid)
 			  cpu[i].max_freq, cpu[i].napis,
 			  cpu[i].cluster_nxt);
 	}
-	/* return; -- Linus does not like it, I do. */
+}
+
+#ifdef FEATURE_NAPI_DEBUG
+/*
+ * Local functions
+ * - no argument checks, all internal/trusted callers
+ */
+static void hnc_dump_cpus(struct qca_napi_data *napid)
+{
+	hif_napi_stats(napid);
 }
 #else
 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };