Эх сурвалжийг харах

qcacld-3.0: Add periodic stats for flow control

Add periodic stats for flow control and
group credit for HL and LL datapath.

Change-Id: I835ba366e9e45bbb1e4b6015577c451c7c0606c2
CRs-Fixed: 2485544
Nirav Shah 5 жил өмнө
parent
commit
aa34cbb6c6

+ 18 - 3
core/dp/ol/inc/ol_txrx_ctrl_api.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -510,7 +510,8 @@ static inline void ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,
 
 void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev);
 void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev);
-void ol_tx_dump_flow_pool_info(void *);
+void ol_tx_dump_flow_pool_info(void *pdev);
+void ol_tx_dump_flow_pool_info_compact(void *pdev);
 void ol_tx_clear_flow_pool_stats(void);
 void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
 				 uint8_t flow_pool_id, uint16_t flow_pool_size);
@@ -545,6 +546,7 @@ QDF_STATUS ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool);
  * Return: QDF_STATUS_SUCCESS - in case of success
  */
 QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force);
+
 #else
 
 static inline void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
@@ -553,9 +555,22 @@ static inline void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
 static inline void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
 {
 }
-static inline void ol_tx_dump_flow_pool_info(void *ctx)
+
+#if defined(CONFIG_HL_SUPPORT) && defined(QCA_HL_NETDEV_FLOW_CONTROL)
+void ol_tx_dump_flow_pool_info(void *pdev);
+void ol_tx_dump_flow_pool_info_compact(void *pdev);
+#else
+static inline
+void ol_tx_dump_flow_pool_info(void *ctx)
+{
+}
+
+static inline
+void ol_tx_dump_flow_pool_info_compact(void *ctx)
 {
 }
+#endif
+
 static inline void ol_tx_clear_flow_pool_stats(void)
 {
 }

+ 78 - 0
core/dp/txrx/ol_tx_hl.c

@@ -1829,10 +1829,14 @@ void ol_txrx_update_tx_queue_groups(
 			 */
 			if (!vdev_bit_mask) {
 				/* Set Group Pointer (vdev and peer) to NULL */
+				ol_txrx_info("Group membership removed for vdev_id %d from group_id %d",
+					     vdev->vdev_id, group_id);
 				ol_tx_set_vdev_group_ptr(
 						pdev, vdev->vdev_id, NULL);
 			} else {
 				/* Set Group Pointer (vdev and peer) */
+				ol_txrx_info("Group membership updated for vdev_id %d to group_id %d",
+					     vdev->vdev_id, group_id);
 				ol_tx_set_vdev_group_ptr(
 						pdev, vdev->vdev_id, group);
 			}
@@ -1840,6 +1844,8 @@ void ol_txrx_update_tx_queue_groups(
 	}
 	/* Update membership */
 	group->membership = membership;
+	ol_txrx_info("Group membership updated for group_id %d membership 0x%x",
+		     group_id, group->membership);
 credit_update:
 	/* Update Credit */
 	ol_txrx_update_group_credit(group, credit, absolute);
@@ -1994,4 +2000,76 @@ int ol_txrx_set_vdev_tx_desc_limit(u8 vdev_id, u8 chan)
 
 	return 0;
 }
+
+void ol_tx_dump_flow_pool_info_compact(void *ctx)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+	char *comb_log_str;
+	int bytes_written = 0;
+	uint32_t free_size;
+	struct ol_txrx_vdev_t *vdev;
+	int i = 0;
+
+	free_size = WLAN_MAX_VDEVS * 100;
+	comb_log_str = qdf_mem_malloc(free_size);
+	if (!comb_log_str)
+		return;
+
+	qdf_spin_lock_bh(&pdev->tx_mutex);
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		bytes_written += snprintf(&comb_log_str[bytes_written],
+				free_size, "%d (%d,%d)(%d,%d)(%d,%d) |",
+				vdev->vdev_id, vdev->tx_desc_limit,
+				qdf_atomic_read(&vdev->tx_desc_count),
+				qdf_atomic_read(&vdev->os_q_paused),
+				vdev->prio_q_paused, vdev->queue_stop_th,
+				vdev->queue_restart_th);
+		free_size -= bytes_written;
+	}
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
+	qdf_nofl_debug("STATS | FC: %s", comb_log_str);
+
+	free_size = WLAN_MAX_VDEVS * 100;
+	bytes_written = 0;
+	qdf_mem_zero(comb_log_str, free_size);
+
+	bytes_written = snprintf(&comb_log_str[bytes_written], free_size,
+				 "%d ",
+				 qdf_atomic_read(&pdev->target_tx_credit));
+	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
+		bytes_written += snprintf(&comb_log_str[bytes_written],
+					  free_size, "|%d, (0x%x, %d)", i,
+					  OL_TXQ_GROUP_VDEV_ID_MASK_GET(
+					  pdev->txq_grps[i].membership),
+					  qdf_atomic_read(
+					  &pdev->txq_grps[i].credit));
+	       free_size -= bytes_written;
+	}
+	qdf_nofl_debug("STATS | CREDIT: %s", comb_log_str);
+	qdf_mem_free(comb_log_str);
+}
+
+void ol_tx_dump_flow_pool_info(void *ctx)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+	struct ol_txrx_vdev_t *vdev;
+
+	if (!pdev) {
+		ol_txrx_err("pdev is NULL");
+		return;
+	}
+
+	qdf_spin_lock_bh(&pdev->tx_mutex);
+	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		txrx_nofl_info("vdev_id %d", vdev->vdev_id);
+		txrx_nofl_info("limit %d available %d stop_threshold %d restart_threshold %d",
+			       vdev->tx_desc_limit,
+			       qdf_atomic_read(&vdev->tx_desc_count),
+			       vdev->queue_stop_th, vdev->queue_restart_th);
+		txrx_nofl_info("q_paused %d prio_q_paused %d",
+			       qdf_atomic_read(&vdev->os_q_paused),
+			       vdev->prio_q_paused);
+	}
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
+}
 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */

+ 4 - 1
core/dp/txrx/ol_txrx.c

@@ -4419,7 +4419,10 @@ ol_txrx_display_stats(void *soc, uint16_t value,
 		ol_txrx_stats_display_tso(pdev);
 		break;
 	case CDP_DUMP_TX_FLOW_POOL_INFO:
-		ol_tx_dump_flow_pool_info((void *)pdev);
+		if (verb_level == QDF_STATS_VERBOSITY_LEVEL_LOW)
+			ol_tx_dump_flow_pool_info_compact((void *)pdev);
+		else
+			ol_tx_dump_flow_pool_info((void *)pdev);
 		break;
 	case CDP_TXRX_DESC_STATS:
 		qdf_nbuf_tx_desc_count_display();

+ 37 - 0
core/dp/txrx/ol_txrx_flow_control.c

@@ -347,6 +347,43 @@ static const char *ol_tx_flow_pool_status_to_str
 	}
 }
 
+void ol_tx_dump_flow_pool_info_compact(void *ctx)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+	char *comb_log_str;
+	int bytes_written = 0;
+	uint32_t free_size;
+	struct ol_tx_flow_pool_t *pool = NULL;
+
+	free_size = WLAN_MAX_VDEVS * 100 + 100;
+	comb_log_str = qdf_mem_malloc(free_size);
+	if (!comb_log_str)
+		return;
+
+	bytes_written = snprintf(&comb_log_str[bytes_written], free_size,
+				 "G:(%d,%d) ",
+				 pdev->tx_desc.pool_size,
+				 pdev->tx_desc.num_free);
+
+	free_size -= bytes_written;
+
+	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
+		      flow_pool_list_elem) {
+		qdf_spin_lock_bh(&pool->flow_pool_lock);
+		bytes_written += snprintf(&comb_log_str[bytes_written],
+					  free_size, "| %d (%d,%d)",
+					  pool->flow_pool_id,
+					  pool->flow_pool_size,
+					  pool->avail_desc);
+		free_size -= bytes_written;
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
+	}
+	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_nofl_debug("STATS | FC: %s", comb_log_str);
+	qdf_mem_free(comb_log_str);
+}
+
 /**
  * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
  * @ctx: cdp_soc context, required only in lithium_dp flow control.

+ 3 - 0
core/hdd/src/wlan_hdd_main.c

@@ -8112,6 +8112,9 @@ static void hdd_display_periodic_stats(struct hdd_context *hdd_ctx,
 			cdp_display_stats(soc,
 					  CDP_TXRX_PATH_STATS,
 					  QDF_STATS_VERBOSITY_LEVEL_LOW);
+			cdp_display_stats(soc,
+					  CDP_DUMP_TX_FLOW_POOL_INFO,
+					  QDF_STATS_VERBOSITY_LEVEL_LOW);
 			wlan_hdd_display_netif_queue_history
 				(hdd_ctx, QDF_STATS_VERBOSITY_LEVEL_LOW);
 			qdf_dp_trace_dump_stats();

+ 5 - 0
core/hdd/src/wlan_hdd_tx_rx.c

@@ -125,6 +125,7 @@ void hdd_tx_resume_timer_expired_handler(void *adapter_context)
 {
 	struct hdd_adapter *adapter = (struct hdd_adapter *)adapter_context;
 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
 	u32 p_qpaused;
 	u32 np_qpaused;
 
@@ -133,6 +134,10 @@ void hdd_tx_resume_timer_expired_handler(void *adapter_context)
 		return;
 	}
 
+	cdp_display_stats(soc, CDP_DUMP_TX_FLOW_POOL_INFO,
+			  QDF_STATS_VERBOSITY_LEVEL_LOW);
+	wlan_hdd_display_netif_queue_history(hdd_ctx,
+					     QDF_STATS_VERBOSITY_LEVEL_LOW);
 	hdd_debug("Enabling queues");
 	spin_lock_bh(&adapter->pause_map_lock);
 	p_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL_PRIORITY);