Browse Source

qcacld-3.0: Capture few hdd tx stats on per cpu basis

Few of the hdd tx stats are not collected on per cpu
basis due to which these counts could potentially be
lower than the actual values. Collect some of the tx
stats on a per cpu core.

Change-Id: I8eba5df456866bdd50dc5fcfdb9591ac837d9116
CRs-Fixed: 3059303
Yeshwanth Sriram Guntuka 3 years ago
parent
commit
50dc3f6626

+ 16 - 12
core/hdd/inc/wlan_hdd_main.h

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -571,18 +572,21 @@ struct hdd_tx_rx_histogram {
 };
 
 struct hdd_tx_rx_stats {
-	/* start_xmit stats */
-	__u32    tx_called;
-	__u32    tx_dropped;
-	__u32    tx_orphaned;
-	__u32    tx_classified_ac[NUM_TX_QUEUES];
-	__u32    tx_dropped_ac[NUM_TX_QUEUES];
-
-	/* rx stats */
-	__u32 rx_packets[NUM_CPUS];
-	__u32 rx_dropped[NUM_CPUS];
-	__u32 rx_delivered[NUM_CPUS];
-	__u32 rx_refused[NUM_CPUS];
+	struct {
+		/* start_xmit stats */
+		__u32    tx_called;
+		__u32    tx_dropped;
+		__u32    tx_orphaned;
+		__u32    tx_classified_ac[WLAN_MAX_AC];
+		__u32    tx_dropped_ac[WLAN_MAX_AC];
+
+		/* rx stats */
+		__u32 rx_packets;
+		__u32 rx_dropped;
+		__u32 rx_delivered;
+		__u32 rx_refused;
+	} per_cpu[NUM_CPUS];
+
 	qdf_atomic_t rx_usolict_arp_n_mcast_drp;
 
 	/* rx gro */

+ 6 - 3
core/hdd/src/wlan_hdd_ipa.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -454,6 +455,7 @@ void hdd_ipa_send_nbuf_to_network(qdf_nbuf_t nbuf, qdf_netdev_t dev)
 	int result;
 	unsigned int cpu_index;
 	uint32_t enabled;
+	struct hdd_tx_rx_stats *stats;
 
 	if (hdd_validate_adapter(adapter)) {
 		kfree_skb(nbuf);
@@ -465,6 +467,7 @@ void hdd_ipa_send_nbuf_to_network(qdf_nbuf_t nbuf, qdf_netdev_t dev)
 		return;
 	}
 
+	stats = &adapter->hdd_stats.tx_rx_stats;
 	hdd_ipa_update_rx_mcbc_stats(adapter, nbuf);
 
 	if ((adapter->device_mode == QDF_SAP_MODE) &&
@@ -501,7 +504,7 @@ void hdd_ipa_send_nbuf_to_network(qdf_nbuf_t nbuf, qdf_netdev_t dev)
 
 	cpu_index = wlan_hdd_get_cpu();
 
-	++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
+	++stats->per_cpu[cpu_index].rx_packets;
 
 	/*
 	 * Update STA RX exception packet stats.
@@ -513,9 +516,9 @@ void hdd_ipa_send_nbuf_to_network(qdf_nbuf_t nbuf, qdf_netdev_t dev)
 
 	result = hdd_ipa_aggregated_rx_ind(nbuf);
 	if (result == NET_RX_SUCCESS)
-		++adapter->hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
+		++stats->per_cpu[cpu_index].rx_delivered;
 	else
-		++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
+		++stats->per_cpu[cpu_index].rx_refused;
 
 	/*
 	 * Restore PF_WAKE_UP_IDLE flag in the task structure

+ 5 - 2
core/hdd/src/wlan_hdd_main.c

@@ -10273,7 +10273,9 @@ void hdd_send_mscs_action_frame(struct hdd_context *hdd_ctx,
 				struct hdd_adapter *adapter)
 {
 	uint64_t mscs_vo_pkt_delta;
-	unsigned long tx_vo_pkts;
+	unsigned long tx_vo_pkts = 0;
+	uint8_t cpu;
+	struct hdd_tx_rx_stats *stats = &adapter->hdd_stats.tx_rx_stats;
 
 	/*
 	 * To disable MSCS feature in driver set mscs_pkt_threshold = 0
@@ -10282,7 +10284,8 @@ void hdd_send_mscs_action_frame(struct hdd_context *hdd_ctx,
 	if (!hdd_ctx->config->mscs_pkt_threshold)
 		return;
 
-	tx_vo_pkts = adapter->hdd_stats.tx_rx_stats.tx_classified_ac[SME_AC_VO];
+	for (cpu = 0; cpu < NUM_CPUS; cpu++)
+		tx_vo_pkts += stats->per_cpu[cpu].tx_classified_ac[SME_AC_VO];
 
 	if (!adapter->mscs_counter)
 		adapter->mscs_prev_tx_vo_pkts = tx_vo_pkts;

+ 7 - 5
core/hdd/src/wlan_hdd_rx_monitor.c

@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -36,6 +37,7 @@ void hdd_rx_monitor_callback(ol_osif_vdev_handle context,
 	struct sk_buff *skb;
 	struct sk_buff *skb_next;
 	unsigned int cpu_index;
+	struct hdd_tx_rx_stats *stats;
 
 	qdf_assert(context);
 	qdf_assert(rxbuf);
@@ -48,6 +50,7 @@ void hdd_rx_monitor_callback(ol_osif_vdev_handle context,
 	}
 
 	cpu_index = wlan_hdd_get_cpu();
+	stats = &adapter->hdd_stats.tx_rx_stats;
 
 	/* walk the chain until all are processed */
 	skb = (struct sk_buff *)rxbuf;
@@ -55,7 +58,7 @@ void hdd_rx_monitor_callback(ol_osif_vdev_handle context,
 		skb_next = skb->next;
 		skb->dev = adapter->dev;
 
-		++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
+		++stats->per_cpu[cpu_index].rx_packets;
 		++adapter->stats.rx_packets;
 		adapter->stats.rx_bytes += skb->len;
 
@@ -79,10 +82,9 @@ void hdd_rx_monitor_callback(ol_osif_vdev_handle context,
 		}
 
 		if (NET_RX_SUCCESS == rxstat)
-			++adapter->
-				hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
+			++stats->per_cpu[cpu_index].rx_delivered;
 		else
-			++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
+			++stats->per_cpu[cpu_index].rx_refused;
 
 		skb = skb_next;
 	}

+ 20 - 11
core/hdd/src/wlan_hdd_softap_tx_rx.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -170,6 +171,7 @@ static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
 {
 	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
 	int need_orphan = 0;
+	int cpu;
 
 	if (adapter->tx_flow_low_watermark > 0) {
 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
@@ -199,7 +201,8 @@ static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
 
 	if (need_orphan) {
 		skb_orphan(skb);
-		++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
+		cpu = qdf_get_smp_processor_id();
+		++adapter->hdd_stats.tx_rx_stats.per_cpu[cpu].tx_orphaned;
 	} else
 		skb = skb_unshare(skb, GFP_ATOMIC);
 
@@ -221,6 +224,7 @@ static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
 	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
 #endif
+	int cpu;
 
 	hdd_skb_fill_gso_size(adapter->dev, skb);
 
@@ -232,7 +236,8 @@ static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
 		 * to send more packets. The flow would ultimately be controlled
 		 * by the limited number of tx descriptors for the vdev.
 		 */
-		++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
+		cpu = qdf_get_smp_processor_id();
+		++adapter->hdd_stats.tx_rx_stats.per_cpu[cpu].tx_orphaned;
 		skb_orphan(skb);
 	}
 #endif
@@ -573,9 +578,11 @@ static void __hdd_softap_hard_start_xmit(struct sk_buff *skb,
 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
 	uint32_t num_seg;
 	struct hdd_station_info *sta_info = NULL;
+	struct hdd_tx_rx_stats *stats = &adapter->hdd_stats.tx_rx_stats;
+	int cpu = qdf_get_smp_processor_id();
 
-	++adapter->hdd_stats.tx_rx_stats.tx_called;
-	adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
+	++stats->per_cpu[cpu].tx_called;
+	stats->cont_txtimeout_cnt = 0;
 
 	/* Prevent this function from being called during SSR since TL
 	 * context may not be reinitialized at this time which may
@@ -685,7 +692,7 @@ static void __hdd_softap_hard_start_xmit(struct sk_buff *skb,
 
 	/* Get TL AC corresponding to Qdisc queue index/AC. */
 	ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
-	++adapter->hdd_stats.tx_rx_stats.tx_classified_ac[ac];
+	++stats->per_cpu[cpu].tx_classified_ac[ac];
 
 #if defined(IPA_OFFLOAD)
 	if (!qdf_nbuf_ipa_owned_get(skb)) {
@@ -747,7 +754,7 @@ static void __hdd_softap_hard_start_xmit(struct sk_buff *skb,
 		QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_HDD_DATA,
 				   "%s: skb %pK linearize failed. drop the pkt",
 				   __func__, skb);
-		++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
+		++stats->per_cpu[cpu].tx_dropped_ac[ac];
 		goto drop_pkt_and_release_skb;
 	}
 
@@ -756,7 +763,7 @@ static void __hdd_softap_hard_start_xmit(struct sk_buff *skb,
 				   "%s: Failed to send packet to txrx for sta: "
 				   QDF_MAC_ADDR_FMT, __func__,
 				   QDF_MAC_ADDR_REF(dest_mac_addr->bytes));
-		++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
+		++stats->per_cpu[cpu].tx_dropped_ac[ac];
 		goto drop_pkt_and_release_skb;
 	}
 	netif_trans_update(dev);
@@ -780,7 +787,7 @@ drop_pkt_accounting:
 		hdd_put_sta_info_ref(&adapter->sta_info_list, &sta_info, true,
 				     STA_INFO_SOFTAP_HARD_START_XMIT);
 	++adapter->stats.tx_dropped;
-	++adapter->hdd_stats.tx_rx_stats.tx_dropped;
+	++stats->per_cpu[cpu].tx_dropped;
 }
 
 netdev_tx_t hdd_softap_hard_start_xmit(struct sk_buff *skb,
@@ -1100,6 +1107,7 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf)
 	struct qdf_mac_addr *src_mac;
 	struct hdd_station_info *sta_info;
 	bool is_eapol = false;
+	struct hdd_tx_rx_stats *stats;
 
 	/* Sanity check on inputs */
 	if (unlikely((!adapter_context) || (!rx_buf))) {
@@ -1123,6 +1131,7 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf)
 		return QDF_STATUS_E_FAILURE;
 	}
 
+	stats = &adapter->hdd_stats.tx_rx_stats;
 	/* walk the chain until all are processed */
 	next = (struct sk_buff *)rx_buf;
 
@@ -1143,7 +1152,7 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf)
 			continue;
 		}
 		cpu_index = wlan_hdd_get_cpu();
-		++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
+		++stats->per_cpu[cpu_index].rx_packets;
 		++adapter->stats.rx_packets;
 		/* count aggregated RX frame into stats */
 		adapter->stats.rx_packets += qdf_nbuf_get_gso_segs(skb);
@@ -1224,9 +1233,9 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf)
 		}
 
 		if (QDF_IS_STATUS_SUCCESS(qdf_status))
-			++adapter->hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
+			++stats->per_cpu[cpu_index].rx_delivered;
 		else
-			++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
+			++stats->per_cpu[cpu_index].rx_refused;
 	}
 
 	return QDF_STATUS_SUCCESS;

+ 32 - 10
core/hdd/src/wlan_hdd_stats.c

@@ -6716,6 +6716,9 @@ void wlan_hdd_display_txrx_stats(struct hdd_context *ctx)
 	uint32_t total_rx_pkt, total_rx_dropped,
 		 total_rx_delv, total_rx_refused;
 	wlan_net_dev_ref_dbgid dbgid = NET_DEV_HOLD_CACHE_STATION_STATS_CB;
+	uint32_t total_tx_pkt;
+	uint32_t total_tx_dropped;
+	uint32_t total_tx_orphaned;
 
 	hdd_for_each_adapter_dev_held_safe(ctx, adapter, next_adapter,
 					   dbgid) {
@@ -6723,6 +6726,9 @@ void wlan_hdd_display_txrx_stats(struct hdd_context *ctx)
 		total_rx_dropped = 0;
 		total_rx_delv = 0;
 		total_rx_refused = 0;
+		total_tx_pkt = 0;
+		total_tx_dropped = 0;
+		total_tx_orphaned = 0;
 		stats = &adapter->hdd_stats.tx_rx_stats;
 
 		if (adapter->vdev_id == INVAL_VDEV_ID) {
@@ -6731,27 +6737,43 @@ void wlan_hdd_display_txrx_stats(struct hdd_context *ctx)
 		}
 
 		hdd_debug("adapter: %u", adapter->vdev_id);
-		for (; i < NUM_CPUS; i++) {
-			total_rx_pkt += stats->rx_packets[i];
-			total_rx_dropped += stats->rx_dropped[i];
-			total_rx_delv += stats->rx_delivered[i];
-			total_rx_refused += stats->rx_refused[i];
+		for (i = 0; i < NUM_CPUS; i++) {
+			total_rx_pkt += stats->per_cpu[i].rx_packets;
+			total_rx_dropped += stats->per_cpu[i].rx_dropped;
+			total_rx_delv += stats->per_cpu[i].rx_delivered;
+			total_rx_refused += stats->per_cpu[i].rx_refused;
+			total_tx_pkt += stats->per_cpu[i].tx_called;
+			total_tx_dropped += stats->per_cpu[i].tx_dropped;
+			total_tx_orphaned += stats->per_cpu[i].tx_orphaned;
 		}
 
 		/* dev_put has to be done here */
 		hdd_adapter_dev_put_debug(adapter, dbgid);
 
+		for (i = 0; i < NUM_CPUS; i++) {
+			if (!stats->per_cpu[i].tx_called)
+				continue;
+
+			hdd_debug("Tx CPU[%d]: called %u, dropped %u, orphaned %u",
+				  i, stats->per_cpu[i].tx_called,
+				  stats->per_cpu[i].tx_dropped,
+				  stats->per_cpu[i].tx_orphaned);
+		}
+
 		hdd_debug("TX - called %u, dropped %u orphan %u",
-			  stats->tx_called, stats->tx_dropped,
-			  stats->tx_orphaned);
+			  total_tx_pkt, total_tx_dropped,
+			  total_tx_orphaned);
 
 		for (i = 0; i < NUM_CPUS; i++) {
-			if (stats->rx_packets[i] == 0)
+			if (stats->per_cpu[i].rx_packets == 0)
 				continue;
 			hdd_debug("Rx CPU[%d]: packets %u, dropped %u, delivered %u, refused %u",
-				  i, stats->rx_packets[i], stats->rx_dropped[i],
-				  stats->rx_delivered[i], stats->rx_refused[i]);
+				  i, stats->per_cpu[i].rx_packets,
+				  stats->per_cpu[i].rx_dropped,
+				  stats->per_cpu[i].rx_delivered,
+				  stats->per_cpu[i].rx_refused);
 		}
+
 		hdd_debug("RX - packets %u, dropped %u, unsolict_arp_n_mcast_drp %u, delivered %u, refused %u GRO - agg %u drop %u non-agg %u flush_skip %u low_tput_flush %u disabled(conc %u low-tput %u)",
 			  total_rx_pkt, total_rx_dropped,
 			  qdf_atomic_read(&stats->rx_usolict_arp_n_mcast_drp),

+ 37 - 19
core/hdd/src/wlan_hdd_sysfs_stats.c

@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -39,14 +40,29 @@ static void hdd_sysfs_get_stats(struct hdd_adapter *adapter, ssize_t *length,
 	uint32_t len = 0;
 	uint32_t total_rx_pkt = 0, total_rx_dropped = 0;
 	uint32_t total_rx_delv = 0, total_rx_refused = 0;
+	uint32_t total_tx_pkt = 0;
+	uint32_t total_tx_dropped = 0;
+	uint32_t total_tx_orphaned = 0;
+	uint32_t total_tx_classified_ac[WLAN_MAX_AC] = {0};
+	uint32_t total_tx_dropped_ac[WLAN_MAX_AC] = {0};
 	int i = 0;
+	uint8_t ac;
 	struct hdd_context *hdd_ctx = adapter->hdd_ctx;
 
 	for (; i < NUM_CPUS; i++) {
-		total_rx_pkt += stats->rx_packets[i];
-		total_rx_dropped += stats->rx_dropped[i];
-		total_rx_delv += stats->rx_delivered[i];
-		total_rx_refused += stats->rx_refused[i];
+		total_rx_pkt += stats->per_cpu[i].rx_packets;
+		total_rx_dropped += stats->per_cpu[i].rx_dropped;
+		total_rx_delv += stats->per_cpu[i].rx_delivered;
+		total_rx_refused += stats->per_cpu[i].rx_refused;
+		total_tx_pkt += stats->per_cpu[i].tx_called;
+		total_tx_dropped += stats->per_cpu[i].tx_dropped;
+		total_tx_orphaned += stats->per_cpu[i].tx_orphaned;
+		for (ac = 0; ac < WLAN_MAX_AC; ac++) {
+			total_tx_classified_ac[ac] +=
+					 stats->per_cpu[i].tx_classified_ac[ac];
+			total_tx_dropped_ac[ac] +=
+					    stats->per_cpu[i].tx_dropped_ac[ac];
+		}
 	}
 
 	len = scnprintf(buffer, buf_len,
@@ -58,17 +74,17 @@ static void hdd_sysfs_get_stats(struct hdd_adapter *adapter, ssize_t *length,
 			"packets %u, dropped %u, unsolict_arp_n_mcast_drp %u, delivered %u, refused %u\n"
 			"GRO - agg %u non-agg %u flush_skip %u low_tput_flush %u disabled(conc %u low-tput %u)\n",
 			qdf_system_ticks(),
-			stats->tx_called,
-			stats->tx_dropped,
-			stats->tx_orphaned,
-			stats->tx_dropped_ac[SME_AC_BK],
-			stats->tx_dropped_ac[SME_AC_BE],
-			stats->tx_dropped_ac[SME_AC_VI],
-			stats->tx_dropped_ac[SME_AC_VO],
-			stats->tx_classified_ac[SME_AC_BK],
-			stats->tx_classified_ac[SME_AC_BE],
-			stats->tx_classified_ac[SME_AC_VI],
-			stats->tx_classified_ac[SME_AC_VO],
+			total_tx_pkt,
+			total_tx_dropped,
+			total_tx_orphaned,
+			total_tx_dropped_ac[SME_AC_BK],
+			total_tx_dropped_ac[SME_AC_BE],
+			total_tx_dropped_ac[SME_AC_VI],
+			total_tx_dropped_ac[SME_AC_VO],
+			total_tx_classified_ac[SME_AC_BK],
+			total_tx_classified_ac[SME_AC_BE],
+			total_tx_classified_ac[SME_AC_VI],
+			total_tx_classified_ac[SME_AC_VO],
 			qdf_system_ticks(),
 			total_rx_pkt, total_rx_dropped,
 			qdf_atomic_read(&stats->rx_usolict_arp_n_mcast_drp),
@@ -81,13 +97,15 @@ static void hdd_sysfs_get_stats(struct hdd_adapter *adapter, ssize_t *length,
 			qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_low_tput));
 
 	for (i = 0; i < NUM_CPUS; i++) {
-		if (stats->rx_packets[i] == 0)
+		if (stats->per_cpu[i].rx_packets == 0)
 			continue;
 		len += scnprintf(buffer + len, buf_len - len,
 				 "Rx CPU[%d]:"
 				 "packets %u, dropped %u, delivered %u, refused %u\n",
-				 i, stats->rx_packets[i], stats->rx_dropped[i],
-				 stats->rx_delivered[i], stats->rx_refused[i]);
+				 i, stats->per_cpu[i].rx_packets,
+				 stats->per_cpu[i].rx_dropped,
+				 stats->per_cpu[i].rx_delivered,
+				 stats->per_cpu[i].rx_refused);
 	}
 
 	len += scnprintf(buffer + len, buf_len - len,

+ 29 - 22
core/hdd/src/wlan_hdd_tx_rx.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -238,6 +239,7 @@ static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
 {
 	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
 	int need_orphan = 0;
+	int cpu;
 
 	if (adapter->tx_flow_low_watermark > 0) {
 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
@@ -267,7 +269,8 @@ static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
 
 	if (need_orphan) {
 		skb_orphan(skb);
-		++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
+		cpu = qdf_get_smp_processor_id();
+		++adapter->hdd_stats.tx_rx_stats.per_cpu[cpu].tx_orphaned;
 	} else
 		skb = skb_unshare(skb, GFP_ATOMIC);
 
@@ -401,6 +404,7 @@ static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
 	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
 #endif
+	int cpu;
 
 	hdd_skb_fill_gso_size(adapter->dev, skb);
 
@@ -412,7 +416,8 @@ static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
 		 * to send more packets. The flow would ultimately be controlled
 		 * by the limited number of tx descriptors for the vdev.
 		 */
-		++adapter->hdd_stats.tx_rx_stats.tx_orphaned;
+		cpu = qdf_get_smp_processor_id();
+		++adapter->hdd_stats.tx_rx_stats.per_cpu[cpu].tx_orphaned;
 		skb_orphan(skb);
 	}
 #endif
@@ -1038,6 +1043,8 @@ static void __hdd_hard_start_xmit(struct sk_buff *skb,
 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
 	bool is_eapol = false;
 	bool is_dhcp = false;
+	struct hdd_tx_rx_stats *stats = &adapter->hdd_stats.tx_rx_stats;
+	int cpu = qdf_get_smp_processor_id();
 
 #ifdef QCA_WIFI_FTM
 	if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
@@ -1046,8 +1053,8 @@ static void __hdd_hard_start_xmit(struct sk_buff *skb,
 	}
 #endif
 
-	++adapter->hdd_stats.tx_rx_stats.tx_called;
-	adapter->hdd_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
+	++stats->per_cpu[cpu].tx_called;
+	stats->cont_txtimeout_cnt = 0;
 	qdf_mem_copy(mac_addr.bytes, skb->data, sizeof(mac_addr.bytes));
 
 	if (cds_is_driver_transitioning()) {
@@ -1139,7 +1146,7 @@ static void __hdd_hard_start_xmit(struct sk_buff *skb,
 	 */
 	up = skb->priority;
 
-	++adapter->hdd_stats.tx_rx_stats.tx_classified_ac[ac];
+	++stats->per_cpu[cpu].tx_classified_ac[ac];
 #ifdef HDD_WMM_DEBUG
 	QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
 		  "%s: Classified as ac %d up %d", __func__, ac, up);
@@ -1242,7 +1249,7 @@ static void __hdd_hard_start_xmit(struct sk_buff *skb,
 			  FL("Tx not allowed for sta: "
 			  QDF_MAC_ADDR_FMT), QDF_MAC_ADDR_REF(
 			  mac_addr_tx_allowed.bytes));
-		++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
+		++stats->per_cpu[cpu].tx_dropped_ac[ac];
 		goto drop_pkt_and_release_skb;
 	}
 
@@ -1252,7 +1259,7 @@ static void __hdd_hard_start_xmit(struct sk_buff *skb,
 			  QDF_TRACE_LEVEL_INFO_HIGH,
 			  "%s: skb %pK linearize failed. drop the pkt",
 			  __func__, skb);
-		++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
+		++stats->per_cpu[cpu].tx_dropped_ac[ac];
 		goto drop_pkt_and_release_skb;
 	}
 
@@ -1263,7 +1270,7 @@ static void __hdd_hard_start_xmit(struct sk_buff *skb,
 		QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
 			 "%s: TX function not registered by the data path",
 			 __func__);
-		++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
+		++stats->per_cpu[cpu].tx_dropped_ac[ac];
 		goto drop_pkt_and_release_skb;
 	}
 
@@ -1274,7 +1281,7 @@ static void __hdd_hard_start_xmit(struct sk_buff *skb,
 			  "%s: Failed to send packet to txrx for sta_id: "
 			  QDF_MAC_ADDR_FMT,
 			  __func__, QDF_MAC_ADDR_REF(mac_addr.bytes));
-		++adapter->hdd_stats.tx_rx_stats.tx_dropped_ac[ac];
+		++stats->per_cpu[cpu].tx_dropped_ac[ac];
 		goto drop_pkt_and_release_skb;
 	}
 
@@ -1301,7 +1308,7 @@ drop_pkt:
 drop_pkt_accounting:
 
 	++adapter->stats.tx_dropped;
-	++adapter->hdd_stats.tx_rx_stats.tx_dropped;
+	++stats->per_cpu[cpu].tx_dropped;
 	if (is_arp) {
 		++adapter->hdd_stats.hdd_arp_stats.tx_dropped;
 		QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_INFO_HIGH,
@@ -1490,6 +1497,7 @@ QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
 	struct sk_buff *skb;
 	struct sk_buff *skb_next;
 	unsigned int cpu_index;
+	struct hdd_tx_rx_stats *stats;
 
 	/* Sanity check on inputs */
 	if ((!context) || (!rxbuf)) {
@@ -1506,6 +1514,7 @@ QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
 	}
 
 	cpu_index = wlan_hdd_get_cpu();
+	stats = &adapter->hdd_stats.tx_rx_stats;
 
 	/* walk the chain until all are processed */
 	skb = (struct sk_buff *) rxbuf;
@@ -1513,7 +1522,7 @@ QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
 		skb_next = skb->next;
 		skb->dev = adapter->dev;
 
-		++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
+		++stats->per_cpu[cpu_index].rx_packets;
 		++adapter->stats.rx_packets;
 		adapter->stats.rx_bytes += skb->len;
 
@@ -1537,10 +1546,9 @@ QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
 		}
 
 		if (NET_RX_SUCCESS == rxstat)
-			++adapter->
-				hdd_stats.tx_rx_stats.rx_delivered[cpu_index];
+			++stats->per_cpu[cpu_index].rx_delivered;
 		else
-			++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
+			++stats->per_cpu[cpu_index].rx_refused;
 
 		skb = skb_next;
 	}
@@ -2491,6 +2499,7 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
 	bool is_eapol, send_over_nl;
 	bool is_dhcp;
+	struct hdd_tx_rx_stats *stats;
 
 	/* Sanity check on inputs */
 	if (unlikely((!adapter_context) || (!rxBuf))) {
@@ -2515,6 +2524,7 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
 	}
 
 	cpu_index = wlan_hdd_get_cpu();
+	stats = &adapter->hdd_stats.tx_rx_stats;
 
 	next = (struct sk_buff *)rxBuf;
 
@@ -2570,8 +2580,7 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
 		sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
 		if ((sta_ctx->conn_info.proxy_arp_service) &&
 		    hdd_is_gratuitous_arp_unsolicited_na(skb)) {
-			qdf_atomic_inc(&adapter->hdd_stats.tx_rx_stats.
-						rx_usolict_arp_n_mcast_drp);
+			qdf_atomic_inc(&stats->rx_usolict_arp_n_mcast_drp);
 			/* Remove SKB from internal tracking table before
 			 * submitting it to stack.
 			 */
@@ -2607,7 +2616,7 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
 
 		skb->dev = adapter->dev;
 		skb->protocol = eth_type_trans(skb, skb->dev);
-		++adapter->hdd_stats.tx_rx_stats.rx_packets[cpu_index];
+		++stats->per_cpu[cpu_index].rx_packets;
 		++adapter->stats.rx_packets;
 		/* count aggregated RX frame into stats */
 		adapter->stats.rx_packets += qdf_nbuf_get_gso_segs(skb);
@@ -2619,8 +2628,7 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
 		/* Check & drop replayed mcast packets (for IPV6) */
 		if (hdd_ctx->config->multicast_replay_filter &&
 				hdd_is_mcast_replay(skb)) {
-			qdf_atomic_inc(&adapter->hdd_stats.tx_rx_stats.
-						rx_usolict_arp_n_mcast_drp);
+			qdf_atomic_inc(&stats->rx_usolict_arp_n_mcast_drp);
 			qdf_nbuf_free(skb);
 			continue;
 		}
@@ -2661,8 +2669,7 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
 		}
 
 		if (QDF_IS_STATUS_SUCCESS(qdf_status)) {
-			++adapter->hdd_stats.tx_rx_stats.
-						rx_delivered[cpu_index];
+			++stats->per_cpu[cpu_index].rx_delivered;
 			if (track_arp)
 				++adapter->hdd_stats.hdd_arp_stats.
 							rx_delivered;
@@ -2679,7 +2686,7 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
 					skb, adapter,
 					PKT_TYPE_RX_DELIVERED, &pkt_type);
 		} else {
-			++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
+			++stats->per_cpu[cpu_index].rx_refused;
 			if (track_arp)
 				++adapter->hdd_stats.hdd_arp_stats.rx_refused;
 

+ 36 - 18
core/hdd/src/wlan_hdd_wext.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -3026,14 +3027,29 @@ void hdd_wlan_get_stats(struct hdd_adapter *adapter, uint16_t *length,
 	uint32_t len = 0;
 	uint32_t total_rx_pkt = 0, total_rx_dropped = 0;
 	uint32_t total_rx_delv = 0, total_rx_refused = 0;
+	uint32_t total_tx_pkt = 0;
+	uint32_t total_tx_dropped = 0;
+	uint32_t total_tx_orphaned = 0;
+	uint32_t total_tx_classified_ac[WLAN_MAX_AC] = {0};
+	uint32_t total_tx_dropped_ac[WLAN_MAX_AC] = {0};
 	int i = 0;
+	uint8_t ac;
 	struct hdd_context *hdd_ctx = adapter->hdd_ctx;
 
 	for (; i < NUM_CPUS; i++) {
-		total_rx_pkt += stats->rx_packets[i];
-		total_rx_dropped += stats->rx_dropped[i];
-		total_rx_delv += stats->rx_delivered[i];
-		total_rx_refused += stats->rx_refused[i];
+		total_rx_pkt += stats->per_cpu[i].rx_packets;
+		total_rx_dropped += stats->per_cpu[i].rx_dropped;
+		total_rx_delv += stats->per_cpu[i].rx_delivered;
+		total_rx_refused += stats->per_cpu[i].rx_refused;
+		total_tx_pkt += stats->per_cpu[i].tx_called;
+		total_tx_dropped += stats->per_cpu[i].tx_dropped;
+		total_tx_orphaned += stats->per_cpu[i].tx_orphaned;
+		for (ac = 0; ac < WLAN_MAX_AC; ac++) {
+			total_tx_classified_ac[ac] +=
+					 stats->per_cpu[i].tx_classified_ac[ac];
+			total_tx_dropped_ac[ac] +=
+					    stats->per_cpu[i].tx_dropped_ac[ac];
+		}
 	}
 
 	len = scnprintf(buffer, buf_len,
@@ -3045,17 +3061,17 @@ void hdd_wlan_get_stats(struct hdd_adapter *adapter, uint16_t *length,
 			"packets %u, dropped %u, unsolict_arp_n_mcast_drp %u, delivered %u, refused %u\n"
 			"GRO - agg %u non-agg %u flush_skip %u low_tput_flush %u disabled(conc %u low-tput %u)\n",
 			qdf_system_ticks(),
-			stats->tx_called,
-			stats->tx_dropped,
-			stats->tx_orphaned,
-			stats->tx_dropped_ac[SME_AC_BK],
-			stats->tx_dropped_ac[SME_AC_BE],
-			stats->tx_dropped_ac[SME_AC_VI],
-			stats->tx_dropped_ac[SME_AC_VO],
-			stats->tx_classified_ac[SME_AC_BK],
-			stats->tx_classified_ac[SME_AC_BE],
-			stats->tx_classified_ac[SME_AC_VI],
-			stats->tx_classified_ac[SME_AC_VO],
+			total_tx_pkt,
+			total_tx_dropped,
+			total_tx_orphaned,
+			total_tx_dropped_ac[SME_AC_BK],
+			total_tx_dropped_ac[SME_AC_BE],
+			total_tx_dropped_ac[SME_AC_VI],
+			total_tx_dropped_ac[SME_AC_VO],
+			total_tx_classified_ac[SME_AC_BK],
+			total_tx_classified_ac[SME_AC_BE],
+			total_tx_classified_ac[SME_AC_VI],
+			total_tx_classified_ac[SME_AC_VO],
 			qdf_system_ticks(),
 			total_rx_pkt, total_rx_dropped,
 			qdf_atomic_read(&stats->rx_usolict_arp_n_mcast_drp),
@@ -3068,13 +3084,15 @@ void hdd_wlan_get_stats(struct hdd_adapter *adapter, uint16_t *length,
 			qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_low_tput));
 
 	for (i = 0; i < NUM_CPUS; i++) {
-		if (stats->rx_packets[i] == 0)
+		if (stats->per_cpu[i].rx_packets == 0)
 			continue;
 		len += scnprintf(buffer + len, buf_len - len,
 				 "Rx CPU[%d]:"
 				 "packets %u, dropped %u, delivered %u, refused %u\n",
-				 i, stats->rx_packets[i], stats->rx_dropped[i],
-				 stats->rx_delivered[i], stats->rx_refused[i]);
+				 i, stats->per_cpu[i].rx_packets,
+				 stats->per_cpu[i].rx_dropped,
+				 stats->per_cpu[i].rx_delivered,
+				 stats->per_cpu[i].rx_refused);
 	}
 
 	len += scnprintf(buffer + len, buf_len - len,

+ 12 - 6
core/hdd/src/wlan_hdd_wmm.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -382,6 +383,7 @@ static void hdd_wmm_inactivity_timer_cb(void *user_data)
 	QDF_STATUS qdf_status;
 	uint32_t traffic_count = 0;
 	sme_ac_enum_type ac_type;
+	uint8_t cpu;
 
 	if (!qos_context) {
 		hdd_err("invalid user data");
@@ -399,9 +401,9 @@ static void hdd_wmm_inactivity_timer_cb(void *user_data)
 	ac = &adapter->hdd_wmm_status.ac_status[ac_type];
 
 	/* Get the Tx stats for this AC. */
-	traffic_count =
-		adapter->hdd_stats.tx_rx_stats.tx_classified_ac[qos_context->
-								    ac_type];
+	for (cpu = 0; cpu < NUM_CPUS; cpu++)
+		traffic_count += adapter->hdd_stats.tx_rx_stats.per_cpu[cpu].
+					 tx_classified_ac[qos_context->ac_type];
 
 	hdd_warn("WMM inactivity check for AC=%d, count=%u, last=%u",
 		 ac_type, traffic_count, ac->last_traffic_count);
@@ -450,6 +452,7 @@ hdd_wmm_enable_inactivity_timer(struct hdd_wmm_qos_context *qos_context,
 	struct hdd_adapter *adapter = qos_context->adapter;
 	sme_ac_enum_type ac_type = qos_context->ac_type;
 	struct hdd_wmm_ac_status *ac;
+	uint8_t cpu;
 
 	adapter = qos_context->adapter;
 	ac = &adapter->hdd_wmm_status.ac_status[ac_type];
@@ -476,10 +479,13 @@ hdd_wmm_enable_inactivity_timer(struct hdd_wmm_qos_context *qos_context,
 		return qdf_status;
 	}
 	ac->inactivity_time = inactivity_time;
+
+	ac->last_traffic_count = 0;
 	/* Initialize the current tx traffic count on this AC */
-	ac->last_traffic_count =
-		adapter->hdd_stats.tx_rx_stats.tx_classified_ac[qos_context->
-								    ac_type];
+	for (cpu = 0; cpu < NUM_CPUS; cpu++)
+		ac->last_traffic_count +=
+			adapter->hdd_stats.tx_rx_stats.per_cpu[cpu].
+					 tx_classified_ac[qos_context->ac_type];
 	qos_context->is_inactivity_timer_running = true;
 	return qdf_status;
 }