Browse Source

Revert "qcacld-3.0: Code cleanup from HDD module"

This reverts commit I66d52b1af7ae52dfde330c7a1b5046f77ab5fe70.

Change-Id: I11e52f785ef0e1d6ba8481f7e98d7ca9e45f249c
CRs-Fixed: 3196571
Karthik Kantamneni 2 years ago
parent
commit
ff74017c8d

+ 12 - 0
Kbuild

@@ -126,6 +126,10 @@ HDD_OBJS := 	$(HDD_SRC_DIR)/wlan_hdd_assoc.o \
 		$(HDD_SRC_DIR)/wlan_hdd_wmm.o \
 		$(HDD_SRC_DIR)/wlan_hdd_wowl.o\
 
+ifeq ($(CONFIG_WLAN_FEATURE_PERIODIC_STA_STATS), y)
+HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_periodic_sta_stats.o
+endif
+
 ifeq ($(CONFIG_UNIT_TEST), y)
 HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_unit_test.o
 endif
@@ -189,6 +193,10 @@ ifeq ($(CONFIG_WLAN_FEATURE_LPSS), y)
 HDD_OBJS +=	$(HDD_SRC_DIR)/wlan_hdd_lpass.o
 endif
 
+ifeq ($(CONFIG_WLAN_LRO), y)
+HDD_OBJS +=     $(HDD_SRC_DIR)/wlan_hdd_lro.o
+endif
+
 ifeq ($(CONFIG_WLAN_NAPI), y)
 HDD_OBJS +=     $(HDD_SRC_DIR)/wlan_hdd_napi.o
 endif
@@ -479,6 +487,10 @@ ifeq ($(CONFIG_WLAN_BOOTUP_MARKER), y)
 HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_bootup_marker.o
 endif
 
+ifeq ($(CONFIG_FEATURE_BUS_BANDWIDTH_MGR),y)
+HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_bus_bandwidth.o
+endif
+
 ifeq ($(CONFIG_FEATURE_WLAN_CH_AVOID_EXT),y)
 HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_avoid_freq_ext.o
 endif

File diff suppressed because it is too large
+ 1033 - 0
core/hdd/inc/hdd_dp_cfg.h


+ 70 - 1
core/hdd/inc/wlan_hdd_cfg.h

@@ -119,6 +119,7 @@ struct hdd_config {
 	bool apf_enabled;
 	uint16_t sap_tx_leakage_threshold;
 	bool sap_internal_restart;
+	bool tx_orphan_enable;
 	bool is_11k_offload_supported;
 	bool action_oui_enable;
 	uint8_t action_oui_str[ACTION_OUI_MAXIMUM_ID][ACTION_OUI_MAX_STR_LEN];
@@ -148,12 +149,52 @@ struct hdd_config {
 #ifndef REMOVE_PKT_LOG
 	bool enable_packet_log;
 #endif
+	uint32_t rx_mode;
+	uint32_t tx_comp_loop_pkt_limit;
+	uint32_t rx_reap_loop_pkt_limit;
+	uint32_t rx_hp_oos_update_limit;
+	uint64_t rx_softirq_max_yield_duration_ns;
+#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
+	/* bandwidth threshold for super high bandwidth */
+	uint32_t bus_bw_super_high_threshold;
+	/* bandwidth threshold for ultra high bandwidth */
+	uint32_t bus_bw_ultra_high_threshold;
+	/* bandwidth threshold for very high bandwidth */
+	uint32_t bus_bw_very_high_threshold;
+	/* bandwidth threshold for DBS mode bandwidth */
+	uint32_t bus_bw_dbs_threshold;
+	/* bandwidth threshold for high bandwidth */
+	uint32_t bus_bw_high_threshold;
+	/* bandwidth threshold for medium bandwidth */
+	uint32_t bus_bw_medium_threshold;
+	/* bandwidth threshold for low bandwidth */
+	uint32_t bus_bw_low_threshold;
+	uint32_t bus_bw_compute_interval;
+	uint32_t enable_tcp_delack;
+	bool     enable_tcp_limit_output;
+	uint32_t enable_tcp_adv_win_scale;
+	uint32_t tcp_delack_thres_high;
+	uint32_t tcp_delack_thres_low;
+	uint32_t tcp_tx_high_tput_thres;
+	uint32_t tcp_delack_timer_count;
+	bool     enable_tcp_param_update;
+	uint32_t bus_low_cnt_threshold;
+	bool enable_latency_crit_clients;
+#endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
 
 #ifdef WLAN_FEATURE_MSCS
 	uint32_t mscs_pkt_threshold;
 	uint32_t mscs_voice_interval;
 #endif /* WLAN_FEATURE_MSCS */
 
+#ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
+	bool del_ack_enable;
+	uint32_t del_ack_threshold_high;
+	uint32_t del_ack_threshold_low;
+	uint16_t del_ack_timer_value;
+	uint16_t del_ack_pkt_count;
+#endif
+
 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
 	uint32_t tx_flow_low_watermark;
 	uint32_t tx_flow_hi_watermark_offset;
@@ -166,6 +207,18 @@ struct hdd_config {
 	uint32_t tx_hbw_flow_max_queue_depth;
 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 	uint32_t napi_cpu_affinity_mask;
+	/* CPU affinity mask for rx_thread */
+	uint32_t rx_thread_ul_affinity_mask;
+	uint32_t rx_thread_affinity_mask;
+	uint8_t cpu_map_list[CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST_LEN];
+	bool multicast_replay_filter;
+	uint32_t rx_wakelock_timeout;
+	uint8_t num_dp_rx_threads;
+#ifdef CONFIG_DP_TRACE
+	bool enable_dp_trace;
+	uint8_t dp_trace_config[DP_TRACE_CONFIG_STRING_LENGTH];
+#endif
+	uint8_t enable_nud_tracking;
 	uint32_t operating_chan_freq;
 	uint8_t num_vdevs;
 	uint8_t enable_concurrent_sta[CFG_CONCURRENT_IFACE_MAX_LEN];
@@ -187,6 +240,14 @@ struct hdd_config {
 	uint8_t tsf_ptp_options;
 #endif /* WLAN_FEATURE_TSF_PLUS */
 
+#ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
+	uint32_t pkt_bundle_threshold_high;
+	uint32_t pkt_bundle_threshold_low;
+	uint16_t pkt_bundle_timer_value;
+	uint16_t pkt_bundle_size;
+#endif
+	uint32_t dp_proto_event_bitmap;
+
 #ifdef SAR_SAFETY_FEATURE
 	uint32_t sar_safety_timeout;
 	uint32_t sar_safety_unsolicited_timeout;
@@ -198,12 +259,20 @@ struct hdd_config {
 	bool config_sar_safety_sleep_index;
 #endif
 	bool get_roam_chan_from_fw;
-
+	uint32_t fisa_enable;
+
+#ifdef WLAN_FEATURE_PERIODIC_STA_STATS
+	/* Periodicity of logging */
+	uint32_t periodic_stats_timer_interval;
+	/* Duration for which periodic logging should be done */
+	uint32_t periodic_stats_timer_duration;
+#endif /* WLAN_FEATURE_PERIODIC_STA_STATS */
 	uint8_t nb_commands_interval;
 
 #ifdef FEATURE_CLUB_LL_STATS_AND_GET_STATION
 	uint32_t sta_stats_cache_expiry_time;
 #endif
+	int icmp_req_to_fw_mark_interval;
 	bool read_mac_addr_from_mac_file;
 };
 

+ 1 - 0
core/hdd/inc/wlan_hdd_ipa.h

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the

+ 87 - 0
core/hdd/inc/wlan_hdd_lro.h

@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WLAN_HDD_LRO_H__
+#define __WLAN_HDD_LRO_H__
+/**
+ * DOC: wlan_hdd_lro.h
+ *
+ * WLAN LRO interface module headers
+ */
+
+struct hdd_context;
+
+#if defined(FEATURE_LRO)
+/**
+ * hdd_lro_rx() - Handle Rx procesing via LRO
+ * @adapter: pointer to adapter context
+ * @skb: pointer to sk_buff
+ *
+ * Return: QDF_STATUS_SUCCESS if processed via LRO or non zero return code
+ */
+QDF_STATUS hdd_lro_rx(struct hdd_adapter *adapter, struct sk_buff *skb);
+
+void hdd_lro_display_stats(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_lro_set_reset() - vendor command for Disable/Enable LRO
+ * @hdd_ctx: hdd context
+ * @hdd_adapter_t: adapter
+ * @enable_flag: enable or disable LRO.
+ *
+ * Return: none
+ */
+QDF_STATUS hdd_lro_set_reset(struct hdd_context *hdd_ctx,
+			     struct hdd_adapter *adapter,
+			     uint8_t enable_flag);
+
+/**
+ * hdd_is_lro_enabled() - Is LRO enabled
+ * @hdd_ctx: HDD context
+ *
+ * This function checks if LRO is enabled in HDD context.
+ *
+ * Return: 0 - success, < 0 - failure
+ */
+int hdd_is_lro_enabled(struct hdd_context *hdd_ctx);
+
+#else
+static inline QDF_STATUS hdd_lro_rx(struct hdd_adapter *adapter,
+				    struct sk_buff *skb)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+
+static inline void hdd_lro_display_stats(struct hdd_context *hdd_ctx)
+{
+}
+
+static inline QDF_STATUS hdd_lro_set_reset(struct hdd_context *hdd_ctx,
+					   struct hdd_adapter *adapter,
+					   uint8_t enable_flag)
+{
+	return 0;
+}
+
+static inline int hdd_is_lro_enabled(struct hdd_context *hdd_ctx)
+{
+	return -EOPNOTSUPP;
+}
+#endif /* FEATURE_LRO */
+#endif /* __WLAN_HDD_LRO_H__ */

+ 712 - 0
core/hdd/inc/wlan_hdd_main.h

@@ -74,6 +74,7 @@
 #include "wlan_hdd_debugfs.h"
 #include <qdf_defer.h>
 #include "sap_api.h"
+#include <wlan_hdd_lro.h>
 #include "cdp_txrx_flow_ctrl_legacy.h"
 #include <cdp_txrx_peer_ops.h>
 #include <cdp_txrx_misc.h>
@@ -100,6 +101,7 @@
 
 #include <net/neighbour.h>
 #include <net/netevent.h>
+#include "wlan_hdd_nud_tracking.h"
 #include "wlan_hdd_twt.h"
 #include "wma_sar_public_structs.h"
 #include "wlan_mlme_ucfg_api.h"
@@ -116,6 +118,7 @@
 #endif
 
 #include "wlan_hdd_sta_info.h"
+#include "wlan_hdd_bus_bandwidth.h"
 #include <wlan_hdd_cm_api.h>
 #include "wlan_hdd_mlo.h"
 #include "wlan_dp_public_struct.h"
@@ -548,9 +551,45 @@ typedef enum {
 	NET_DEV_HOLD_ID_MAX
 } wlan_net_dev_ref_dbgid;
 
+/**
+ * struct hdd_tx_rx_histogram - structure to keep track of tx and rx packets
+ *				received over 100ms intervals
+ * @interval_rx:	# of rx packets received in the last 100ms interval
+ * @interval_tx:	# of tx packets received in the last 100ms interval
+ * @next_vote_level:	pld_bus_width_type voting level (high or low)
+ *			determined on the basis of total tx and rx packets
+ *			received in the last 100ms interval
+ * @next_rx_level:	pld_bus_width_type voting level (high or low)
+ *			determined on the basis of rx packets received in the
+ *			last 100ms interval
+ * @next_tx_level:	pld_bus_width_type voting level (high or low)
+ *			determined on the basis of tx packets received in the
+ *			last 100ms interval
+ * @is_rx_pm_qos_high	Capture rx_pm_qos voting
+ * @is_tx_pm_qos_high	Capture tx_pm_qos voting
+ * @qtime		timestamp when the record is added
+ *
+ * The structure keeps track of throughput requirements of wlan driver.
+ * An entry is added if either of next_vote_level, next_rx_level or
+ * next_tx_level changes. An entry is not added for every 100ms interval.
+ */
+struct hdd_tx_rx_histogram {
+	uint64_t interval_rx;
+	uint64_t interval_tx;
+	uint32_t next_vote_level;
+	uint32_t next_rx_level;
+	uint32_t next_tx_level;
+	bool is_rx_pm_qos_high;
+	bool is_tx_pm_qos_high;
+	uint64_t qtime;
+};
+
 struct hdd_tx_rx_stats {
 	struct {
 		/* start_xmit stats */
+		__u32    tx_called;
+		__u32    tx_dropped;
+		__u32    tx_orphaned;
 		__u32    tx_classified_ac[WLAN_MAX_AC];
 		__u32    tx_dropped_ac[WLAN_MAX_AC];
 #ifdef TX_MULTIQ_PER_AC
@@ -563,14 +602,32 @@ struct hdd_tx_rx_stats {
 		/* skb->hash calculated in select queue */
 		uint32_t qselect_skb_hash_calc;
 #endif
+		/* rx stats */
+		__u32 rx_packets;
+		__u32 rx_dropped;
+		__u32 rx_delivered;
+		__u32 rx_refused;
 	} per_cpu[NUM_CPUS];
 
+	qdf_atomic_t rx_usolict_arp_n_mcast_drp;
+
+	/* rx gro */
+	__u32 rx_aggregated;
+	__u32 rx_gro_dropped;
+	__u32 rx_non_aggregated;
+	__u32 rx_gro_flush_skip;
+	__u32 rx_gro_low_tput_flush;
+
 	/* txflow stats */
 	bool     is_txflow_paused;
 	__u32    txflow_pause_cnt;
 	__u32    txflow_unpause_cnt;
 	__u32    txflow_timer_cnt;
 
+	/*tx timeout stats*/
+	__u32 tx_timeout_cnt;
+	__u32 cont_txtimeout_cnt;
+	u64 jiffies_last_txtimeout;
 };
 
 /**
@@ -583,6 +640,114 @@ struct hdd_pmf_stats {
 	uint8_t num_unprot_disassoc_rx;
 };
 
+/**
+ * struct hdd_arp_stats_s - arp debug stats count
+ * @tx_arp_req_count: no. of arp req received from network stack
+ * @rx_arp_rsp_count: no. of arp res received from FW
+ * @tx_dropped: no. of arp req dropped at hdd layer
+ * @rx_dropped: no. of arp res dropped
+ * @rx_delivered: no. of arp res delivered to network stack
+ * @rx_refused: no of arp rsp refused (not delivered) to network stack
+ * @tx_host_fw_sent: no of arp req sent by FW OTA
+ * @rx_host_drop_reorder: no of arp res dropped by host
+ * @rx_fw_cnt: no of arp res received by FW
+ * @tx_ack_cnt: no of arp req acked by FW
+ */
+struct hdd_arp_stats_s {
+	uint16_t tx_arp_req_count;
+	uint16_t rx_arp_rsp_count;
+	uint16_t tx_dropped;
+	uint16_t rx_dropped;
+	uint16_t rx_delivered;
+	uint16_t rx_refused;
+	uint16_t tx_host_fw_sent;
+	uint16_t rx_host_drop_reorder;
+	uint16_t rx_fw_cnt;
+	uint16_t tx_ack_cnt;
+};
+
+/**
+ * struct hdd_dns_stats_s - dns debug stats count
+ * @tx_dns_req_count: no. of dns query received from network stack
+ * @rx_dns_rsp_count: no. of dns res received from FW
+ * @tx_dropped: no. of dns query dropped at hdd layer
+ * @rx_delivered: no. of dns res delivered to network stack
+ * @rx_refused: no of dns res refused (not delivered) to network stack
+ * @tx_host_fw_sent: no of dns query sent by FW OTA
+ * @rx_host_drop: no of dns res dropped by host
+ * @tx_ack_cnt: no of dns req acked by FW
+ */
+struct hdd_dns_stats_s {
+	uint16_t tx_dns_req_count;
+	uint16_t rx_dns_rsp_count;
+	uint16_t tx_dropped;
+	uint16_t rx_delivered;
+	uint16_t rx_refused;
+	uint16_t tx_host_fw_sent;
+	uint16_t rx_host_drop;
+	uint16_t tx_ack_cnt;
+};
+
+/**
+ * struct hdd_tcp_stats_s - tcp debug stats count
+ * @tx_tcp_syn_count: no. of tcp syn received from network stack
+ * @@tx_tcp_ack_count: no. of tcp ack received from network stack
+ * @rx_tcp_syn_ack_count: no. of tcp syn ack received from FW
+ * @tx_tcp_syn_dropped: no. of tcp syn dropped at hdd layer
+ * @tx_tcp_ack_dropped: no. of tcp ack dropped at hdd layer
+ * @rx_delivered: no. of tcp syn ack delivered to network stack
+ * @rx_refused: no of tcp syn ack refused (not delivered) to network stack
+ * @tx_tcp_syn_host_fw_sent: no of tcp syn sent by FW OTA
+ * @@tx_tcp_ack_host_fw_sent: no of tcp ack sent by FW OTA
+ * @rx_host_drop: no of tcp syn ack dropped by host
+ * @tx_tcp_syn_ack_cnt: no of tcp syn acked by FW
+ * @tx_tcp_syn_ack_cnt: no of tcp ack acked by FW
+ * @is_tcp_syn_ack_rcv: flag to check tcp syn ack received or not
+ * @is_tcp_ack_sent: flag to check tcp ack sent or not
+ */
+struct hdd_tcp_stats_s {
+	uint16_t tx_tcp_syn_count;
+	uint16_t tx_tcp_ack_count;
+	uint16_t rx_tcp_syn_ack_count;
+	uint16_t tx_tcp_syn_dropped;
+	uint16_t tx_tcp_ack_dropped;
+	uint16_t rx_delivered;
+	uint16_t rx_refused;
+	uint16_t tx_tcp_syn_host_fw_sent;
+	uint16_t tx_tcp_ack_host_fw_sent;
+	uint16_t rx_host_drop;
+	uint16_t rx_fw_cnt;
+	uint16_t tx_tcp_syn_ack_cnt;
+	uint16_t tx_tcp_ack_ack_cnt;
+	bool is_tcp_syn_ack_rcv;
+	bool is_tcp_ack_sent;
+
+};
+
+/**
+ * struct hdd_icmpv4_stats_s - icmpv4 debug stats count
+ * @tx_icmpv4_req_count: no. of icmpv4 req received from network stack
+ * @rx_icmpv4_rsp_count: no. of icmpv4 res received from FW
+ * @tx_dropped: no. of icmpv4 req dropped at hdd layer
+ * @rx_delivered: no. of icmpv4 res delivered to network stack
+ * @rx_refused: no of icmpv4 res refused (not delivered) to network stack
+ * @tx_host_fw_sent: no of icmpv4 req sent by FW OTA
+ * @rx_host_drop: no of icmpv4 res dropped by host
+ * @rx_fw_cnt: no of icmpv4 res received by FW
+ * @tx_ack_cnt: no of icmpv4 req acked by FW
+ */
+struct hdd_icmpv4_stats_s {
+	uint16_t tx_icmpv4_req_count;
+	uint16_t rx_icmpv4_rsp_count;
+	uint16_t tx_dropped;
+	uint16_t rx_delivered;
+	uint16_t rx_refused;
+	uint16_t tx_host_fw_sent;
+	uint16_t rx_host_drop;
+	uint16_t rx_fw_cnt;
+	uint16_t tx_ack_cnt;
+};
+
 /**
  * struct hdd_peer_stats - Peer stats at HDD level
  * @rx_count: RX count
@@ -597,14 +762,64 @@ struct hdd_peer_stats {
 
 #define MAX_SUBTYPES_TRACKED	4
 
+/**
+ * struct hdd_eapol_stats_s - eapol debug stats count
+ * @eapol_m1_count: eapol m1 count
+ * @eapol_m2_count: eapol m2 count
+ * @eapol_m3_count: eapol m3 count
+ * @eapol_m4_count: eapol m4 count
+ * @tx_dropped: no of tx frames dropped by host
+ * @tx_noack_cnt: no of frames for which there is no ack
+ * @rx_delivered: no. of frames delivered to network stack
+ * @rx_refused: no of frames not delivered to network stack
+ */
+struct hdd_eapol_stats_s {
+	uint16_t eapol_m1_count;
+	uint16_t eapol_m2_count;
+	uint16_t eapol_m3_count;
+	uint16_t eapol_m4_count;
+	uint16_t tx_dropped[MAX_SUBTYPES_TRACKED];
+	uint16_t tx_noack_cnt[MAX_SUBTYPES_TRACKED];
+	uint16_t rx_delivered[MAX_SUBTYPES_TRACKED];
+	uint16_t rx_refused[MAX_SUBTYPES_TRACKED];
+};
+
+/**
+ * struct hdd_dhcp_stats_s - dhcp debug stats count
+ * @dhcp_dis_count: dhcp discovery count
+ * @dhcp_off_count: dhcp offer count
+ * @dhcp_req_count: dhcp request count
+ * @dhcp_ack_count: dhcp ack count
+ * @tx_dropped: no of tx frames dropped by host
+ * @tx_noack_cnt: no of frames for which there is no ack
+ * @rx_delivered: no. of frames delivered to network stack
+ * @rx_refused: no of frames not delivered to network stack
+ */
+struct hdd_dhcp_stats_s {
+	uint16_t dhcp_dis_count;
+	uint16_t dhcp_off_count;
+	uint16_t dhcp_req_count;
+	uint16_t dhcp_ack_count;
+	uint16_t tx_dropped[MAX_SUBTYPES_TRACKED];
+	uint16_t tx_noack_cnt[MAX_SUBTYPES_TRACKED];
+	uint16_t rx_delivered[MAX_SUBTYPES_TRACKED];
+	uint16_t rx_refused[MAX_SUBTYPES_TRACKED];
+};
+
 struct hdd_stats {
 	tCsrSummaryStatsInfo summary_stat;
 	tCsrGlobalClassAStatsInfo class_a_stat;
 	tCsrGlobalClassDStatsInfo class_d_stat;
 	struct csr_per_chain_rssi_stats_info  per_chain_rssi_stats;
 	struct hdd_tx_rx_stats tx_rx_stats;
+	struct hdd_arp_stats_s hdd_arp_stats;
+	struct hdd_dns_stats_s hdd_dns_stats;
+	struct hdd_tcp_stats_s hdd_tcp_stats;
+	struct hdd_icmpv4_stats_s hdd_icmpv4_stats;
 	struct hdd_peer_stats peer_stats;
 	struct hdd_pmf_stats hdd_pmf_stats;
+	struct hdd_eapol_stats_s hdd_eapol_stats;
+	struct hdd_dhcp_stats_s hdd_dhcp_stats;
 	struct pmf_bcn_protect_stats bcn_protect_stats;
 
 #ifdef FEATURE_CLUB_LL_STATS_AND_GET_STATION
@@ -776,11 +991,50 @@ struct hdd_rate_info {
 	enum tx_rate_info rate_flags;
 };
 
+/**
+ * struct hdd_mic_info - mic error info in HDD
+ * @ta_mac_addr: transmitter mac address
+ * @multicast: Flag for multicast
+ * @key_id: Key ID
+ * @tsc: Sequence number
+ * @vdev_id: vdev id
+ *
+ */
+struct hdd_mic_error_info {
+	struct qdf_mac_addr ta_mac_addr;
+	bool multicast;
+	uint8_t key_id;
+	uint8_t tsc[SIR_CIPHER_SEQ_CTR_SIZE];
+	uint16_t vdev_id;
+};
+
+enum hdd_mic_work_status {
+	MIC_UNINITIALIZED,
+	MIC_INITIALIZED,
+	MIC_SCHEDULED,
+	MIC_DISABLED
+};
+
 enum hdd_work_status {
 	HDD_WORK_UNINITIALIZED,
 	HDD_WORK_INITIALIZED,
 };
 
+/**
+ * struct hdd_mic_work - mic work info in HDD
+ * @mic_error_work: mic error work
+ * @status: sattus of mic error work
+ * @info: Pointer to mic error information
+ * @lock: lock to synchronixe mic error work
+ *
+ */
+struct hdd_mic_work {
+	qdf_work_t work;
+	enum hdd_mic_work_status status;
+	struct hdd_mic_error_info *info;
+	qdf_spinlock_t lock;
+};
+
 /**
  * struct hdd_fw_txrx_stats - fw txrx status in HDD
  *                            (refer to station_info struct in Kernel)
@@ -990,6 +1244,7 @@ struct wlm_multi_client_info_table {
  * @vdev_lock: lock to protect vdev context access
  * @vdev_id: Unique identifier assigned to the vdev
  * @event_flags: a bitmap of hdd_adapter_flags
+ * @mic_work: mic work information
  * @enable_dynamic_tsf_sync: Enable/Disable TSF sync through NL interface
  * @dynamic_tsf_sync_interval: TSF sync interval configure through NL interface
  * @gpio_tsf_sync_work: work to sync send TSF CAP WMI command
@@ -1008,6 +1263,8 @@ struct wlm_multi_client_info_table {
  *                          as per enum qca_sta_connect_fail_reason_codes
  * @upgrade_udp_qos_threshold: The threshold for user priority upgrade for
 			       any UDP packet.
+ * @gro_disallowed: Flag to check if GRO is enabled or disable for adapter
+ * @gro_flushed: Flag to indicate if GRO explicit flush is done or not
  * @handle_feature_update: Handle feature update only if it is triggered
  *			   by hdd_netdev_feature_update
  * @netdev_features_update_work: work for handling the netdev features update
@@ -1064,6 +1321,11 @@ struct hdd_adapter {
 	/* MLD address for adapter */
 	struct qdf_mac_addr mld_addr;
 
+#ifdef WLAN_NUD_TRACKING
+	struct hdd_nud_tracking_info nud_tracking;
+#endif
+
+	struct hdd_mic_work mic_work;
 	unsigned long event_flags;
 
 	/**Device TX/RX statistics*/
@@ -1198,6 +1460,13 @@ struct hdd_adapter {
 	qdf_list_t blocked_scan_request_q;
 	qdf_mutex_t blocked_scan_request_q_lock;
 
+#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
+	unsigned long prev_rx_packets;
+	unsigned long prev_tx_packets;
+	unsigned long prev_tx_bytes;
+	uint64_t prev_fwd_tx_packets;
+	uint64_t prev_fwd_rx_packets;
+#endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
 
 #ifdef WLAN_FEATURE_MSCS
 	unsigned long mscs_prev_tx_vo_pkts;
@@ -1247,6 +1516,7 @@ struct hdd_adapter {
 	struct hdd_netif_queue_history
 		 queue_oper_history[WLAN_HDD_MAX_HISTORY_ENTRY];
 	struct hdd_netif_queue_stats queue_oper_stats[WLAN_REASON_TYPE_MAX];
+	ol_txrx_tx_fp tx_fn;
 	/* debugfs entry */
 	struct dentry *debugfs_phy;
 	/*
@@ -1262,7 +1532,16 @@ struct hdd_adapter {
 	 */
 	bool roam_ho_fail;
 	struct lfr_firmware_status lfr_fw_status;
+	bool con_status;
+	bool dad;
 	uint8_t active_ac;
+	uint32_t pkt_type_bitmap;
+	uint32_t track_arp_ip;
+	uint8_t dns_payload[256];
+	uint32_t track_dns_domain_len;
+	uint32_t track_src_port;
+	uint32_t track_dest_port;
+	uint32_t track_dest_ipv4;
 	uint32_t mon_chan_freq;
 	uint32_t mon_bandwidth;
 	uint16_t latency_level;
@@ -1295,15 +1574,27 @@ struct hdd_adapter {
 #endif /* WLAN_FEATURE_MOTION_DETECTION */
 	enum qca_disconnect_reason_codes last_disconnect_reason;
 	enum wlan_status_code connect_req_status;
+
+#ifdef WLAN_FEATURE_PERIODIC_STA_STATS
+	/* Indicate whether to display sta periodic stats */
+	bool is_sta_periodic_stats_enabled;
+	uint16_t periodic_stats_timer_count;
+	uint32_t periodic_stats_timer_counter;
+	qdf_mutex_t sta_periodic_stats_lock;
+#endif /* WLAN_FEATURE_PERIODIC_STA_STATS */
 	qdf_event_t peer_cleanup_done;
 #ifdef FEATURE_OEM_DATA
 	bool oem_data_in_progress;
 	void *cookie;
 	bool response_expected;
 #endif
+	uint8_t gro_disallowed[DP_MAX_RX_THREADS];
+	uint8_t gro_flushed[DP_MAX_RX_THREADS];
 	bool handle_feature_update;
 	/* Indicate if TSO and checksum offload features are enabled or not */
 	bool tso_csum_feature_enabled;
+	bool runtime_disable_rx_thread;
+	ol_txrx_rx_fp rx_stack;
 
 	qdf_work_t netdev_features_update_work;
 	enum hdd_work_status netdev_features_update_work_status;
@@ -1651,6 +1942,35 @@ struct hdd_dual_sta_policy {
 	uint8_t primary_vdev_id;
 };
 
+#if defined(WLAN_FEATURE_DP_BUS_BANDWIDTH) && defined(FEATURE_RUNTIME_PM)
+/**
+ * enum hdd_rtpm_tput_policy_state - states to track runtime_pm tput policy
+ * @RTPM_TPUT_POLICY_STATE_INVALID: invalid state
+ * @RTPM_TPUT_POLICY_STATE_REQUIRED: state indicating runtime_pm is required
+ * @RTPM_TPUT_POLICY_STATE_NOT_REQUIRE: state indicating runtime_pm is NOT
+ * required
+ */
+enum hdd_rtpm_tput_policy_state {
+	RTPM_TPUT_POLICY_STATE_INVALID,
+	RTPM_TPUT_POLICY_STATE_REQUIRED,
+	RTPM_TPUT_POLICY_STATE_NOT_REQUIRED
+};
+
+/**
+ * struct hdd_rtpm_tput_policy_context - RTPM throughput policy context
+ * @curr_state: current state of throughput policy (RTPM require or not)
+ * @wake_lock: wakelock for QDF wake_lock acquire/release APIs
+ * @rtpm_lock: lock use for QDF rutime PM prevent/allow APIs
+ * @high_tput_vote: atomic variable to keep track of voting
+ */
+struct hdd_rtpm_tput_policy_context {
+	enum hdd_rtpm_tput_policy_state curr_state;
+	qdf_wake_lock_t wake_lock;
+	qdf_runtime_lock_t rtpm_lock;
+	qdf_atomic_t high_tput_vote;
+};
+#endif
+
 #ifdef FEATURE_WLAN_DYNAMIC_IFACE_CTRL
 /**
  * hdd_get_wlan_driver_status() - get status of soft driver unload
@@ -1773,6 +2093,7 @@ struct hdd_context {
 	int32_t oem_pid;
 #endif
 
+	qdf_atomic_t num_latency_critical_clients;
 	/** Concurrency Parameters*/
 	uint32_t concurrency_mode;
 
@@ -1782,6 +2103,7 @@ struct hdd_context {
 	/** P2P Device MAC Address for the adapter  */
 	struct qdf_mac_addr p2p_device_address;
 
+	qdf_wake_lock_t rx_wake_lock;
 	qdf_wake_lock_t sap_wake_lock;
 
 	/* Flag keeps track of wiphy suspend/resume */
@@ -1790,6 +2112,26 @@ struct hdd_context {
 	/* Flag keeps track of idle shutdown triggered by suspend */
 	bool shutdown_in_suspend;
 
+#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
+	struct qdf_periodic_work bus_bw_work;
+	int cur_vote_level;
+	qdf_spinlock_t bus_bw_lock;
+	int cur_rx_level;
+	uint64_t prev_no_rx_offload_pkts;
+	uint64_t prev_rx_offload_pkts;
+	/* Count of non TSO packets in previous bus bw delta time */
+	uint64_t prev_no_tx_offload_pkts;
+	/* Count of TSO packets in previous bus bw delta time */
+	uint64_t prev_tx_offload_pkts;
+	int cur_tx_level;
+	uint64_t prev_tx;
+	qdf_atomic_t low_tput_gro_enable;
+	uint32_t bus_low_vote_cnt;
+#ifdef FEATURE_RUNTIME_PM
+	struct hdd_rtpm_tput_policy_context rtpm_tput_policy_ctx;
+#endif
+#endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
+
 	struct completion ready_to_suspend;
 	/* defining the solution type */
 	uint32_t target_type;
@@ -1877,11 +2219,18 @@ struct hdd_context {
 	bool connection_in_progress;
 	qdf_spinlock_t connection_status_lock;
 
+	uint16_t hdd_txrx_hist_idx;
+	struct hdd_tx_rx_histogram *hdd_txrx_hist;
+
 	/*
 	 * place to store FTM capab of target. This allows changing of FTM capab
 	 * at runtime and intersecting it with target capab before updating.
 	 */
 	uint32_t fine_time_meas_cap_target;
+	uint32_t rx_high_ind_cnt;
+	/* For Rx thread non GRO/LRO packet accounting */
+	uint64_t no_rx_offload_pkt_cnt;
+	uint64_t no_tx_offload_pkt_cnt;
 	/* Current number of TX X RX chains being used */
 	enum antenna_mode current_antenna_mode;
 
@@ -1890,12 +2239,19 @@ struct hdd_context {
 	qdf_work_t sap_pre_cac_work;
 	bool hbw_requested;
 	bool pm_qos_request;
+	enum RX_OFFLOAD ol_enable;
 #ifdef WLAN_FEATURE_NAN
 	bool nan_datapath_enabled;
 #endif
 	/* Present state of driver cds modules */
 	enum driver_modules_status driver_status;
 	struct qdf_delayed_work psoc_idle_timeout_work;
+	bool rps;
+	bool dynamic_rps;
+	bool enable_rxthread;
+	/* support for DP RX threads */
+	bool enable_dp_rx_threads;
+	bool napi_enable;
 	struct acs_dfs_policy acs_policy;
 	uint16_t wmi_max_len;
 	struct suspend_resume_stats suspend_resume_stats;
@@ -1944,6 +2300,12 @@ struct hdd_context {
 	QDF_STATUS (*receive_offload_cb)(struct hdd_adapter *,
 					 struct sk_buff *);
 	qdf_atomic_t vendor_disable_lro_flag;
+
+	/* disable RX offload (GRO/LRO) in concurrency scenarios */
+	qdf_atomic_t disable_rx_ol_in_concurrency;
+	/* disable RX offload (GRO/LRO) in low throughput scenarios */
+	qdf_atomic_t disable_rx_ol_in_low_tput;
+	bool en_tcp_delack_no_lro;
 	bool force_rsne_override;
 	qdf_wake_lock_t monitor_mode_wakelock;
 	bool lte_coex_ant_share;
@@ -2006,7 +2368,13 @@ struct hdd_context {
 	uint8_t dutycycle_off_percent;
 #endif
 	uint8_t pm_qos_request_flags;
+	uint8_t high_bus_bw_request;
 	qdf_work_t country_change_work;
+	struct {
+		qdf_atomic_t rx_aggregation;
+		uint8_t gro_force_flush[DP_MAX_RX_THREADS];
+		bool force_gro_enable;
+	} dp_agg_param;
 	int current_pcie_gen_speed;
 	qdf_workqueue_t *adapter_ops_wq;
 	struct hdd_adapter_ops_history adapter_ops_history;
@@ -2018,6 +2386,9 @@ struct hdd_context {
 	bool multi_client_thermal_mitigation;
 #endif
 	bool disconnect_for_sta_mon_conc;
+#ifdef FEATURE_BUS_BANDWIDTH_MGR
+	struct bbm_context *bbm_ctx;
+#endif
 	bool is_dual_mac_cfg_updated;
 	bool is_regulatory_update_in_progress;
 	qdf_event_t regulatory_update_event;
@@ -2028,6 +2399,7 @@ struct hdd_context {
 #endif
 	bool is_wifi3_0_target;
 	bool dump_in_progress;
+	uint64_t bw_vote_time;
 	struct hdd_dual_sta_policy dual_sta_policy;
 #if defined(WLAN_FEATURE_11BE_MLO) && defined(CFG80211_11BE_BASIC)
 	struct hdd_mld_mac_info mld_mac_info;
@@ -2035,6 +2407,7 @@ struct hdd_context {
 #ifdef THERMAL_STATS_SUPPORT
 	bool is_therm_stats_in_progress;
 #endif
+	qdf_atomic_t rx_skip_qdisc_chk_conc;
 
 #ifdef WLAN_FEATURE_DYNAMIC_MAC_ADDR_UPDATE
 	bool is_vdev_macaddr_dynamic_update_supported;
@@ -2722,6 +3095,250 @@ QDF_STATUS __wlan_hdd_validate_mac_address(struct qdf_mac_addr *mac_addr,
  */
 bool hdd_is_any_adapter_connected(struct hdd_context *hdd_ctx);
 
+/**
+ * hdd_add_latency_critical_client() - Add latency critical client
+ * @adapter: adapter handle (Should not be NULL)
+ * @phymode: the phymode of the connected adapter
+ *
+ * This function checks if the present connection is latency critical
+ * and adds to the latency critical clients count and informs the
+ * datapath about this connection being latency critical.
+ *
+ * Returns: None
+ */
+static inline void
+hdd_add_latency_critical_client(struct hdd_adapter *adapter,
+				enum qca_wlan_802_11_mode phymode)
+{
+	struct hdd_context *hdd_ctx = adapter->hdd_ctx;
+
+	switch (phymode) {
+	case QCA_WLAN_802_11_MODE_11A:
+	case QCA_WLAN_802_11_MODE_11G:
+		qdf_atomic_inc(&hdd_ctx->num_latency_critical_clients);
+
+		hdd_debug("Adding latency critical connection for vdev %d",
+			  adapter->vdev_id);
+		cdp_vdev_inform_ll_conn(cds_get_context(QDF_MODULE_ID_SOC),
+					adapter->vdev_id,
+					CDP_VDEV_LL_CONN_ADD);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * hdd_del_latency_critical_client() - Add tlatency critical client
+ * @adapter: adapter handle (Should not be NULL)
+ * @phymode: the phymode of the connected adapter
+ *
+ * This function checks if the present connection was latency critical
+ * and removes from the latency critical clients count and informs the
+ * datapath about the removed connection being latency critical.
+ *
+ * Returns: None
+ */
+static inline void
+hdd_del_latency_critical_client(struct hdd_adapter *adapter,
+				enum qca_wlan_802_11_mode phymode)
+{
+	struct hdd_context *hdd_ctx = adapter->hdd_ctx;
+
+	switch (phymode) {
+	case QCA_WLAN_802_11_MODE_11A:
+	case QCA_WLAN_802_11_MODE_11G:
+		qdf_atomic_dec(&hdd_ctx->num_latency_critical_clients);
+
+		hdd_info("Removing latency critical connection for vdev %d",
+			 adapter->vdev_id);
+		cdp_vdev_inform_ll_conn(cds_get_context(QDF_MODULE_ID_SOC),
+					adapter->vdev_id,
+					CDP_VDEV_LL_CONN_DEL);
+		break;
+	default:
+		break;
+	}
+}
+
+#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
+/**
+ * hdd_bus_bw_compute_prev_txrx_stats() - get tx and rx stats
+ * @adapter: hdd adapter reference
+ *
+ * This function get the collected tx and rx stats before starting
+ * the bus bandwidth timer.
+ *
+ * Return: None
+ */
+void hdd_bus_bw_compute_prev_txrx_stats(struct hdd_adapter *adapter);
+
+/**
+ * hdd_bus_bw_compute_reset_prev_txrx_stats() - reset previous tx and rx stats
+ * @adapter: hdd adapter reference
+ *
+ * This function resets the adapter previous tx rx stats.
+ *
+ * Return: None
+ */
+void hdd_bus_bw_compute_reset_prev_txrx_stats(struct hdd_adapter *adapter);
+
+/**
+ * hdd_bus_bw_compute_timer_start() - start the bandwidth timer
+ * @hdd_ctx: the global hdd context
+ *
+ * Return: None
+ */
+void hdd_bus_bw_compute_timer_start(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_bus_bw_compute_timer_try_start() - try to start the bandwidth timer
+ * @hdd_ctx: the global hdd context
+ *
+ * This function ensures there is at least one adapter in the associated state
+ * before starting the bandwidth timer.
+ *
+ * Return: None
+ */
+void hdd_bus_bw_compute_timer_try_start(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_bus_bw_compute_timer_stop() - stop the bandwidth timer
+ * @hdd_ctx: the global hdd context
+ *
+ * Return: None
+ */
+void hdd_bus_bw_compute_timer_stop(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_bus_bw_compute_timer_try_stop() - try to stop the bandwidth timer
+ * @hdd_ctx: the global hdd context
+ *
+ * This function ensures there are no adapters in the associated state before
+ * stopping the bandwidth timer.
+ *
+ * Return: None
+ */
+void hdd_bus_bw_compute_timer_try_stop(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_bus_bandwidth_init() - Initialize bus bandwidth data structures.
+ * @hdd_ctx: HDD context
+ *
+ * Initialize bus bandwidth related data structures like spinlock and timer.
+ *
+ * Return: None.
+ */
+int hdd_bus_bandwidth_init(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_bus_bandwidth_deinit() - De-initialize bus bandwidth data structures.
+ * @hdd_ctx: HDD context
+ *
+ * De-initialize bus bandwidth related data structures like timer.
+ *
+ * Return: None.
+ */
+void hdd_bus_bandwidth_deinit(struct hdd_context *hdd_ctx);
+
+static inline enum pld_bus_width_type
+hdd_get_current_throughput_level(struct hdd_context *hdd_ctx)
+{
+	return hdd_ctx->cur_vote_level;
+}
+
+/**
+ * hdd_set_current_throughput_level() - update the current vote
+ * level
+ * @hdd_ctx: the global hdd context
+ * @next_vote_level: pld_bus_width_type voting level
+ *
+ * This function updates the current vote level to the new level
+ * provided
+ *
+ * Return: None
+ */
+static inline void
+hdd_set_current_throughput_level(struct hdd_context *hdd_ctx,
+				 enum pld_bus_width_type next_vote_level)
+{
+	hdd_ctx->cur_vote_level = next_vote_level;
+}
+
+static inline bool
+hdd_is_low_tput_gro_enable(struct hdd_context *hdd_ctx)
+{
+	return (qdf_atomic_read(&hdd_ctx->low_tput_gro_enable)) ? true : false;
+}
+
+#define GET_CUR_RX_LVL(config) ((config)->cur_rx_level)
+#define GET_BW_COMPUTE_INTV(config) ((config)->bus_bw_compute_interval)
+#else
+
+static inline
+void hdd_bus_bw_compute_prev_txrx_stats(struct hdd_adapter *adapter)
+{
+}
+
+static inline
+void hdd_bus_bw_compute_reset_prev_txrx_stats(struct hdd_adapter *adapter)
+{
+}
+
+static inline
+void hdd_bus_bw_compute_timer_start(struct hdd_context *hdd_ctx)
+{
+}
+
+static inline
+void hdd_bus_bw_compute_timer_try_start(struct hdd_context *hdd_ctx)
+{
+}
+
+static inline
+void hdd_bus_bw_compute_timer_stop(struct hdd_context *hdd_ctx)
+{
+}
+
+static inline
+void hdd_bus_bw_compute_timer_try_stop(struct hdd_context *hdd_ctx)
+{
+}
+
+static inline
+int hdd_bus_bandwidth_init(struct hdd_context *hdd_ctx)
+{
+	return 0;
+}
+
+static inline
+void hdd_bus_bandwidth_deinit(struct hdd_context *hdd_ctx)
+{
+}
+
+static inline enum pld_bus_width_type
+hdd_get_current_throughput_level(struct hdd_context *hdd_ctx)
+{
+	return PLD_BUS_WIDTH_NONE;
+}
+
+static inline void
+hdd_set_current_throughput_level(struct hdd_context *hdd_ctx,
+				 enum pld_bus_width_type next_vote_level)
+{
+}
+
+static inline bool
+hdd_is_low_tput_gro_enable(struct hdd_context *hdd_ctx)
+{
+	return false;
+}
+
+#define GET_CUR_RX_LVL(config) 0
+#define GET_BW_COMPUTE_INTV(config) 0
+
+#endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
+
 /**
  * hdd_init_adapter_ops_wq() - Init global workqueue for adapter operations.
  * @hdd_ctx: pointer to HDD context
@@ -3202,6 +3819,15 @@ int hdd_wlan_dump_stats(struct hdd_adapter *adapter, int stats_id);
  */
 int hdd_wlan_clear_stats(struct hdd_adapter *adapter, int stats_id);
 
+/**
+ * wlan_hdd_display_tx_rx_histogram() - display tx rx histogram
+ * @hdd_ctx: hdd context
+ *
+ * Return: none
+ */
+void wlan_hdd_display_tx_rx_histogram(struct hdd_context *hdd_ctx);
+void wlan_hdd_clear_tx_rx_histogram(struct hdd_context *hdd_ctx);
+
 /**
  * hdd_cb_handle_to_context() - turn an HDD handle into an HDD context
  * @hdd_handle: HDD handle to be converted
@@ -3950,6 +4576,29 @@ static inline void hdd_dev_setup_destructor(struct net_device *dev)
 }
 #endif /* KERNEL_VERSION(4, 12, 0) */
 
+/**
+ * hdd_dp_trace_init() - initialize DP Trace by calling the QDF API
+ * @config: hdd config
+ *
+ * Return: NONE
+ */
+#ifdef CONFIG_DP_TRACE
+void hdd_dp_trace_init(struct hdd_config *config);
+#else
+static inline
+void hdd_dp_trace_init(struct hdd_config *config) {}
+#endif
+
+/**
+ * hdd_set_rx_mode_rps() - Enable/disable RPS in SAP mode
+ * @enable: Set true to enable RPS in SAP mode
+ *
+ * Callback function registered with datapath
+ *
+ * Return: none
+ */
+void hdd_set_rx_mode_rps(bool enable);
+
 /**
  * hdd_update_score_config - API to update candidate scoring related params
  * configuration parameters
@@ -4132,6 +4781,19 @@ int hdd_set_11ax_rate(struct hdd_adapter *adapter, int value,
  */
 void hdd_update_hw_sw_info(struct hdd_context *hdd_ctx);
 
+/**
+ * hdd_get_nud_stats_cb() - callback api to update the stats received from FW
+ * @data: pointer to hdd context.
+ * @rsp: pointer to data received from FW.
+ * @context: callback context
+ *
+ * This is called when wlan driver received response event for
+ * get arp stats to firmware.
+ *
+ * Return: None
+ */
+void hdd_get_nud_stats_cb(void *data, struct rsp_stats *rsp, void *context);
+
 /**
  * hdd_context_get_mac_handle() - get mac handle from hdd context
  * @hdd_ctx: Global HDD context pointer
@@ -4197,6 +4859,56 @@ void hdd_update_dynamic_mac(struct hdd_context *hdd_ctx,
 			    struct qdf_mac_addr *curr_mac_addr,
 			    struct qdf_mac_addr *new_mac_addr);
 
+#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
+/**
+ * wlan_hdd_send_tcp_param_update_event() - Send vendor event to update
+ * TCP parameter through Wi-Fi HAL
+ * @hdd_ctx: Pointer to HDD context
+ * @data: Parameters to update
+ * @dir: Direction(tx/rx) to update
+ *
+ * Return: None
+ */
+void wlan_hdd_send_tcp_param_update_event(struct hdd_context *hdd_ctx,
+					  void *data,
+					  uint8_t dir);
+
+/**
+ * wlan_hdd_update_tcp_rx_param() - update TCP param in RX dir
+ * @hdd_ctx: Pointer to HDD context
+ * @data: Parameters to update
+ *
+ * Return: None
+ */
+void wlan_hdd_update_tcp_rx_param(struct hdd_context *hdd_ctx, void *data);
+
+/**
+ * wlan_hdd_update_tcp_tx_param() - update TCP param in TX dir
+ * @hdd_ctx: Pointer to HDD context
+ * @data: Parameters to update
+ *
+ * Return: None
+ */
+void wlan_hdd_update_tcp_tx_param(struct hdd_context *hdd_ctx, void *data);
+#else
+static inline
+void wlan_hdd_update_tcp_rx_param(struct hdd_context *hdd_ctx, void *data)
+{
+}
+
+static inline
+void wlan_hdd_update_tcp_tx_param(struct hdd_context *hdd_ctx, void *data)
+{
+}
+
+static inline
+void wlan_hdd_send_tcp_param_update_event(struct hdd_context *hdd_ctx,
+					  void *data,
+					  uint8_t dir)
+{
+}
+#endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
+
 #ifdef WLAN_FEATURE_MOTION_DETECTION
 /**
  * hdd_md_host_evt_cb - Callback for Motion Detection Event

+ 115 - 0
core/hdd/inc/wlan_hdd_periodic_sta_stats.h

@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC : wlan_hdd_periodic_sta_stats.h
+ *
+ * WLAN Host Device Driver periodic STA statistics related implementation
+ *
+ */
+
+#if !defined(WLAN_HDD_PERIODIC_STA_STATS_H)
+#define WLAN_HDD_PERIODIC_STA_STATS_H
+
+#ifdef WLAN_FEATURE_PERIODIC_STA_STATS
+/*
+ * Used to get device name from the adapter
+ */
+#define WLAN_HDD_GET_DEV_NAME(adapter) ((adapter)->dev->name)
+
+/**
+ * hdd_periodic_sta_stats_config() - Initialize periodic stats configuration
+ * @config: Pointer to hdd configuration
+ * @psoc: Pointer to psoc
+ *
+ * Return: none
+ */
+void hdd_periodic_sta_stats_config(struct hdd_config *config,
+				   struct wlan_objmgr_psoc *psoc);
+
+/**
+ * hdd_periodic_sta_stats_init() - Initialize periodic stats display flag
+ * @adapter: Pointer to the station adapter
+ *
+ * Return: none
+ */
+void hdd_periodic_sta_stats_init(struct hdd_adapter *adapter);
+
+/**
+ * hdd_periodic_sta_stats_display() - Display periodic stats at STA
+ * @hdd_ctx: hdd context
+ *
+ * Return: none
+ */
+void hdd_periodic_sta_stats_display(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_periodic_sta_stats_start() - Start displaying periodic stats for STA
+ * @adapter: Pointer to the station adapter
+ *
+ * Return: none
+ */
+void hdd_periodic_sta_stats_start(struct hdd_adapter *adapter);
+
+/**
+ * hdd_periodic_sta_stats_stop() - Stop displaying periodic stats for STA
+ * @adapter: Pointer to the station adapter
+ *
+ * Return: none
+ */
+void hdd_periodic_sta_stats_stop(struct hdd_adapter *adapter);
+
+/**
+ * hdd_periodic_sta_stats_mutex_create() - Create mutex for STA periodic stats
+ * @adapter: Pointer to the station adapter
+ *
+ * Return: none
+ */
+void hdd_periodic_sta_stats_mutex_create(struct hdd_adapter *adapter);
+
+/**
+ * hdd_periodic_sta_stats_mutex_destroy() - Destroy STA periodic stats mutex
+ * @adapter: Pointer to the station adapter
+ *
+ * Return: none
+ */
+void hdd_periodic_sta_stats_mutex_destroy(struct hdd_adapter *adapter);
+
+#else
+static inline void
+hdd_periodic_sta_stats_display(struct hdd_context *hdd_ctx) {}
+
+static inline void
+hdd_periodic_sta_stats_config(struct hdd_config *config,
+			      struct wlan_objmgr_psoc *psoc) {}
+
+static inline void hdd_periodic_sta_stats_start(struct hdd_adapter *adapter) {}
+
+static inline void hdd_periodic_sta_stats_stop(struct hdd_adapter *adapter) {}
+
+static inline void
+hdd_periodic_sta_stats_init(struct hdd_adapter *adapter) {}
+
+static inline void
+hdd_periodic_sta_stats_mutex_create(struct hdd_adapter *adapter) {}
+
+static inline void
+hdd_periodic_sta_stats_mutex_destroy(struct hdd_adapter *adapter) {}
+
+#endif /* end #ifdef WLAN_FEATURE_PERIODIC_STA_STATS */
+
+#endif /* end #if !defined(WLAN_HDD_PERIODIC_STA_STATS_H) */

+ 151 - 0
core/hdd/inc/wlan_hdd_softap_tx_rx.h

@@ -104,6 +104,22 @@ void hdd_softap_tx_timeout(struct net_device *dev, unsigned int txqueue);
 #else
 void hdd_softap_tx_timeout(struct net_device *dev);
 #endif
+/**
+ * hdd_softap_init_tx_rx() - Initialize Tx/Rx module
+ * @adapter: pointer to adapter context
+ *
+ * Return: None
+ */
+void hdd_softap_init_tx_rx(struct hdd_adapter *adapter);
+
+/**
+ * hdd_softap_deinit_tx_rx() - Deinitialize Tx/Rx module
+ * @adapter: pointer to adapter context
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_softap_deinit_tx_rx(struct hdd_adapter *adapter);
 
 /**
  * hdd_softap_init_tx_rx_sta() - Initialize Tx/Rx for a softap station
@@ -116,6 +132,20 @@ void hdd_softap_tx_timeout(struct net_device *dev);
 QDF_STATUS hdd_softap_init_tx_rx_sta(struct hdd_adapter *adapter,
 				     struct qdf_mac_addr *sta_mac);
 
+/**
+ * hdd_softap_rx_packet_cbk() - Receive packet handler
+ * @adapter_context: pointer to HDD adapter
+ * @rx_buf: pointer to rx qdf_nbuf chain
+ *
+ * Receive callback registered with the Data Path.  The Data Path will
+ * call this to notify the HDD when one or more packets were received
+ * for a registered STA.
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf);
+
 /**
  * hdd_softap_deregister_sta() - Deregister a STA with the Data Path
  * @adapter: pointer to adapter context
@@ -223,6 +253,78 @@ void hdd_softap_tx_resume_cb(void *adapter_context, bool tx_resume)
 void hdd_ipa_update_rx_mcbc_stats(struct hdd_adapter *adapter,
 				  struct sk_buff *skb);
 
+#ifdef SAP_DHCP_FW_IND
+/**
+ * hdd_post_dhcp_ind() - Send DHCP START/STOP indication to FW
+ * @adapter: pointer to hdd adapter
+ * @mac_addr: mac address
+ * @type: WMA message type
+ *
+ * Return: error number
+ */
+int hdd_post_dhcp_ind(struct hdd_adapter *adapter,
+		      uint8_t *mac_addr, uint16_t type);
+
+/**
+ * hdd_softap_inspect_dhcp_packet() - Inspect DHCP packet
+ * @adapter: pointer to hdd adapter
+ * @skb: pointer to OS packet (sk_buff)
+ * @dir: direction
+ *
+ * Inspect the Tx/Rx frame, and send DHCP START/STOP notification to the FW
+ * through WMI message, during DHCP based IP address acquisition phase.
+ *
+ * - Send DHCP_START notification to FW when SAP gets DHCP Discovery
+ * - Send DHCP_STOP notification to FW when SAP sends DHCP ACK/NAK
+ *
+ * DHCP subtypes are determined by a status octet in the DHCP Message type
+ * option (option code 53 (0x35)).
+ *
+ * Each peer will be in one of 4 DHCP phases, starts from QDF_DHCP_PHASE_ACK,
+ * and transitioned per DHCP message type as it arrives.
+ *
+ * - QDF_DHCP_PHASE_DISCOVER: upon receiving DHCP_DISCOVER message in ACK phase
+ * - QDF_DHCP_PHASE_OFFER: upon receiving DHCP_OFFER message in DISCOVER phase
+ * - QDF_DHCP_PHASE_REQUEST: upon receiving DHCP_REQUEST message in OFFER phase
+ *	or ACK phase (Renewal process)
+ * - QDF_DHCP_PHASE_ACK : upon receiving DHCP_ACK/NAK message in REQUEST phase
+ *	or DHCP_DELINE message in OFFER phase
+ *
+ * Return: error number
+ */
+int hdd_softap_inspect_dhcp_packet(struct hdd_adapter *adapter,
+				   struct sk_buff *skb,
+				   enum qdf_proto_dir dir);
+#else
+static inline
+int hdd_post_dhcp_ind(struct hdd_adapter *adapter,
+		      uint8_t *mac_addr, uint16_t type)
+{
+	return 0;
+}
+
+static inline
+int hdd_softap_inspect_dhcp_packet(struct hdd_adapter *adapter,
+				   struct sk_buff *skb,
+				   enum qdf_proto_dir dir)
+{
+	return 0;
+}
+#endif
+
+/**
+ * hdd_softap_check_wait_for_tx_eap_pkt() - Check and wait for eap failure
+ * pkt completion event
+ * @adapter: pointer to hdd adapter
+ * @mac_addr: mac address of peer
+ *
+ * Check and wait for eap failure pkt tx completion.
+ *
+ * Return: void
+ */
+void hdd_softap_check_wait_for_tx_eap_pkt(struct hdd_adapter *adapter,
+					  struct qdf_mac_addr *mac_addr);
+
 #ifdef FEATURE_WDS
 /**
  * hdd_softap_ind_l2_update() - Send L2 update frame to bridge
@@ -249,4 +351,53 @@ QDF_STATUS hdd_softap_ind_l2_update(struct hdd_adapter *adapter,
 	return QDF_STATUS_SUCCESS;
 }
 #endif
+#ifndef QCA_LL_LEGACY_TX_FLOW_CONTROL
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
+/**
+ * hdd_skb_orphan() - skb_unshare a cloned packed else skb_orphan
+ * @adapter: pointer to HDD adapter
+ * @skb: pointer to skb data packet
+ *
+ * Return: pointer to skb structure
+ */
+static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
+					     struct sk_buff *skb)
+{
+	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+
+	hdd_skb_fill_gso_size(adapter->dev, skb);
+
+	if (skb_cloned(skb)) {
+		++adapter->hdd_stats.tx_rx_stats.
+			per_cpu[qdf_get_smp_processor_id()].tx_orphaned;
+		skb_orphan(skb);
+		return skb;
+	}
+
+	if (unlikely(hdd_ctx->config->tx_orphan_enable)) {
+		/*
+		 * For UDP packets we want to orphan the packet to allow the app
+		 * to send more packets. The flow would ultimately be controlled
+		 * by the limited number of tx descriptors for the vdev.
+		 */
+		++adapter->hdd_stats.tx_rx_stats.
+			per_cpu[qdf_get_smp_processor_id()].tx_orphaned;
+		skb_orphan(skb);
+	}
+
+	return skb;
+}
+#else
+static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
+					     struct sk_buff *skb)
+{
+	struct sk_buff *nskb;
+
+	hdd_skb_fill_gso_size(adapter->dev, skb);
+	nskb = skb_unshare(skb, GFP_ATOMIC);
+
+	return nskb;
+}
+#endif
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 #endif /* end #if !defined(WLAN_HDD_SOFTAP_TX_RX_H) */

+ 326 - 0
core/hdd/inc/wlan_hdd_tx_rx.h

@@ -111,6 +111,87 @@ void hdd_tx_timeout(struct net_device *dev, unsigned int txqueue);
 void hdd_tx_timeout(struct net_device *dev);
 #endif
 
+QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter);
+QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter);
+
+/**
+ * hdd_rx_flush_packet_cbk() - flush rx packet handler
+ * @adapter_context: pointer to HDD adapter context
+ * @vdev_id: vdev_id of the packets to be flushed
+ *
+ * Flush rx packet callback registered with data path. DP will call this to
+ * notify HDD when packets for a particular vdev is to be flushed out.
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_rx_flush_packet_cbk(void *adapter_context, uint8_t vdev_id);
+
+/**
+ * hdd_rx_packet_cbk() - Receive packet handler
+ * @adapter_context: pointer to HDD adapter context
+ * @rxBuf: pointer to rx qdf_nbuf
+ *
+ * Receive callback registered with data path.  DP will call this to notify
+ * the HDD when one or more packets were received for a registered
+ * STA.
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rxBuf);
+
+#if defined(WLAN_SUPPORT_RX_FISA)
+/**
+ * hdd_rx_fisa_cbk() - Entry function to FISA to handle aggregation
+ * @soc: core txrx main context
+ * @vdev: Handle DP vdev
+ * @nbuf_list: List nbufs to be aggregated
+ *
+ * Return: Success on aggregation
+ */
+QDF_STATUS hdd_rx_fisa_cbk(void *dp_soc, void *dp_vdev, qdf_nbuf_t rxbuf_list);
+
+/**
+ * hdd_rx_fisa_flush_by_ctx_id() - Flush function to end of context
+ *				   flushing of aggregates
+ * @soc: core txrx main context
+ * @ring_num: REO number to flush the flow Rxed on the REO
+ *
+ * Return: Success on flushing the flows for the REO
+ */
+QDF_STATUS hdd_rx_fisa_flush_by_ctx_id(void *dp_soc, int ring_num);
+
+/**
+ * hdd_rx_fisa_flush_by_vdev_id() - Flush fisa aggregates per vdev id
+ * @soc: core txrx main context
+ * @vdev_id: vdev ID
+ *
+ * Return: Success on flushing the flows for the vdev
+ */
+QDF_STATUS hdd_rx_fisa_flush_by_vdev_id(void *dp_soc, uint8_t vdev_id);
+#else
+static inline QDF_STATUS hdd_rx_fisa_flush_by_vdev_id(void *dp_soc,
+						      uint8_t vdev_id)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * hdd_rx_deliver_to_stack() - HDD helper function to deliver RX pkts to stack
+ * @adapter: pointer to HDD adapter context
+ * @skb: pointer to skb
+ *
+ * The function calls the appropriate stack function depending upon the packet
+ * type and whether GRO/LRO is enabled.
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
+				   struct sk_buff *skb);
+
 #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
 /**
  * hdd_tsf_timestamp_rx() - HDD function to set rx packet timestamp
@@ -150,6 +231,41 @@ void hdd_get_tsf_time_cb(uint8_t vdev_id, uint64_t input_time,
 qdf_napi_struct
 *hdd_legacy_gro_get_napi(qdf_nbuf_t nbuf, bool enable_rxthread);
 
+/**
+ * hdd_rx_thread_gro_flush_ind_cbk() - receive handler to flush GRO packets
+ * @adapter: pointer to HDD adapter
+ * @rx_ctx_id: RX CTX Id for which flush should happen
+ *
+ * Receive callback registered with DP layer which flushes GRO packets
+ * for a given RX CTX ID (RX Thread)
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_rx_thread_gro_flush_ind_cbk(void *adapter, int rx_ctx_id);
+
+/**
+ * hdd_rx_pkt_thread_enqueue_cbk() - receive pkt handler to enqueue into thread
+ * @adapter: pointer to HDD adapter
+ * @nbuf_list: pointer to qdf_nbuf list
+ *
+ * Receive callback registered with DP layer which enqueues packets into dp rx
+ * thread
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter_context,
+					 qdf_nbuf_t nbuf_list);
+
+/**
+ * hdd_rx_ol_init() - Initialize Rx offload mode (LRO or GRO)
+ * @hdd_ctx: pointer to HDD Station Context
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+int hdd_rx_ol_init(struct hdd_context *hdd_ctx);
+
 /**
  * hdd_rx_handle_concurrency() - Handle concurrency related operations
  *  for rx
@@ -159,6 +275,36 @@ qdf_napi_struct
  */
 void hdd_rx_handle_concurrency(bool is_concurrency);
 
+/**
+ * hdd_disable_rx_ol_for_low_tput() - Disable Rx offload in low TPUT scenario
+ * @hdd_ctx: hdd context
+ * @disable: true/false to disable/enable the Rx offload
+ *
+ * Return: none
+ */
+void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable);
+
+/**
+ * hdd_reset_all_adapters_connectivity_stats() - reset connectivity stats
+ * @hdd_ctx: pointer to HDD Station Context
+ *
+ * Return: None
+ */
+void hdd_reset_all_adapters_connectivity_stats(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_tx_rx_collect_connectivity_stats_info() - collect connectivity stats
+ * @skb: pointer to skb data
+ * @adapter: pointer to vdev apdapter
+ * @action: action done on pkt.
+ * @pkt_type: data pkt type
+ *
+ * Return: None
+ */
+void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
+		void *adapter, enum connectivity_stats_pkt_status action,
+		uint8_t *pkt_type);
+
 /**
  * hdd_tx_queue_cb() - Disable/Enable the Transmit Queues
  * @hdd_handle: HDD handle
@@ -285,12 +431,98 @@ void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
 
 #ifdef FEATURE_MONITOR_MODE_SUPPORT
 int hdd_set_mon_rx_cb(struct net_device *dev);
+/**
+ * hdd_mon_rx_packet_cbk() - Receive callback registered with OL layer.
+ * @context: pointer to qdf context
+ * @rxBuf: pointer to rx qdf_nbuf
+ *
+ * TL will call this to notify the HDD when one or more packets were
+ * received for a registered STA.
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf);
 #else
 static inline
 int hdd_set_mon_rx_cb(struct net_device *dev)
 {
 	return 0;
 }
+static inline
+QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+void hdd_send_rps_ind(struct hdd_adapter *adapter);
+void hdd_send_rps_disable_ind(struct hdd_adapter *adapter);
+
+/**
+ * hdd_adapter_set_rps() - Enable/disable RPS for mode specified
+ * @vdev_id: vdev id of adapter for which RPS needs to be enabled
+ * @enable: Set true to enable RPS in SAP mode
+ *
+ * Callback function registered with ipa
+ *
+ * Return: none
+ */
+#ifdef QCA_CONFIG_RPS
+void hdd_adapter_set_rps(uint8_t vdev_id, bool enable);
+#else
+static inline
+void hdd_adapter_set_rps(uint8_t vdev_id, bool enable)
+{
+}
+#endif
+
+/**
+ * wlan_hdd_mark_pkt_type() - Mark packet type in qdf_nbuf_cb
+ * @skb - sk buff
+ *
+ * The function zeros out skb->cb and marks packet type in it.
+ *
+ * Return: none
+ */
+void wlan_hdd_mark_pkt_type(struct sk_buff *skb);
+
+#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
+void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_reset_tcp_adv_win_scale() - Reset tcp adv window scale value to default
+ * @hdd_ctx: Handle to hdd context
+ *
+ * Function used to reset TCP advance window scale value to its default value
+ *
+ * Return: None
+ */
+void hdd_reset_tcp_adv_win_scale(struct hdd_context *hdd_ctx);
+#ifdef RX_PERFORMANCE
+bool hdd_is_current_high_throughput(struct hdd_context *hdd_ctx);
+#else
+static inline bool hdd_is_current_high_throughput(struct hdd_context *hdd_ctx)
+{
+	return false;
+}
+#endif
+#define HDD_MSM_CFG(msm_cfg)	msm_cfg
+#else
+static inline void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx) {}
+static inline void hdd_reset_tcp_adv_win_scale(struct hdd_context *hdd_ctx) {}
+static inline bool hdd_is_current_high_throughput(struct hdd_context *hdd_ctx)
+{
+	return false;
+}
+#define HDD_MSM_CFG(msm_cfg)	0
+#endif
+
+#ifdef FEATURE_WLAN_DIAG_SUPPORT
+void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir);
+#else
+static inline
+void hdd_event_eapol_log(struct sk_buff *skb, enum qdf_proto_dir dir)
+{}
 #endif
 
 /**
@@ -325,6 +557,18 @@ static inline void netif_trans_update(struct net_device *dev)
 	__func__, jiffies)
 #endif
 
+static inline void
+hdd_skb_fill_gso_size(struct net_device *dev, struct sk_buff *skb)
+{
+	if (skb_cloned(skb) && skb_is_nonlinear(skb) &&
+	    skb_shinfo(skb)->gso_size == 0 &&
+	    ip_hdr(skb)->protocol == IPPROTO_TCP) {
+		skb_shinfo(skb)->gso_size = dev->mtu -
+			((skb_transport_header(skb) - skb_network_header(skb))
+				+ tcp_hdrlen(skb));
+	}
+}
+
 /**
  * hdd_txrx_get_tx_ack_count() - get tx acked count
  * @adapter: Pointer to adapter
@@ -333,6 +577,24 @@ static inline void netif_trans_update(struct net_device *dev)
  */
 uint32_t hdd_txrx_get_tx_ack_count(struct hdd_adapter *adapter);
 
+#ifdef CONFIG_HL_SUPPORT
+static inline QDF_STATUS
+hdd_skb_nontso_linearize(struct sk_buff *skb)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static inline QDF_STATUS
+hdd_skb_nontso_linearize(struct sk_buff *skb)
+{
+	if (qdf_nbuf_is_nonlinear(skb) && qdf_nbuf_is_tso(skb) == false) {
+		if (qdf_unlikely(skb_linearize(skb)))
+			return QDF_STATUS_E_NOMEM;
+	}
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
 /**
  * hdd_dp_cfg_update() - update hdd config for HDD DP INIs
  * @psoc: Pointer to psoc obj
@@ -376,4 +638,68 @@ wlan_hdd_dump_queue_history_state(struct hdd_netif_queue_history *q_hist,
  */
 bool wlan_hdd_rx_rpm_mark_last_busy(struct hdd_context *hdd_ctx,
 				    void *hif_ctx);
+
+/**
+ * hdd_sta_notify_tx_comp_cb() - notify tx comp callback registered with dp
+ * @skb: pointer to skb
+ * @ctx: osif context
+ * @flag: tx status flag
+ *
+ * Return: None
+ */
+void hdd_sta_notify_tx_comp_cb(qdf_nbuf_t skb, void *ctx, uint16_t flag);
+
+/**
+ * hdd_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint
+ *
+ * Return: True if any rx pkt tracepoint is enabled else false
+ */
+static inline bool hdd_rx_pkt_tracepoints_enabled(void)
+{
+	return (qdf_trace_dp_rx_tcp_pkt_enabled() ||
+		qdf_trace_dp_rx_udp_pkt_enabled() ||
+		qdf_trace_dp_rx_pkt_enabled());
+}
+
+#ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
+
+/**
+ * hdd_pkt_add_timestamp() - add timestamp in data payload
+ *
+ * @adapter - adapter context
+ * @index - timestamp index which decides offset in payload
+ * @time - time to update in payload
+ * @skb - socket buffer
+ *
+ * Return: none
+ */
+void hdd_pkt_add_timestamp(struct hdd_adapter *adapter,
+			   enum qdf_pkt_timestamp_index index, uint64_t time,
+			   struct sk_buff *skb);
+#else
+static inline
+void hdd_pkt_add_timestamp(struct hdd_adapter *adapter,
+			   enum qdf_pkt_timestamp_index index, uint64_t time,
+			   struct sk_buff *skb)
+{
+}
+#endif
+
+static inline
+void hdd_debug_pkt_dump(struct sk_buff *skb, int size,
+			uint16_t *dump_level)
+{
+	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(skb)))
+		*dump_level &= DEBUG_PKTLOG_TYPE_EAPOL;
+	else if (qdf_unlikely(qdf_nbuf_is_ipv4_arp_pkt(skb)))
+		*dump_level &= DEBUG_PKTLOG_TYPE_ARP;
+	else if (qdf_unlikely(qdf_nbuf_is_ipv4_dhcp_pkt(skb)))
+		*dump_level &= DEBUG_PKTLOG_TYPE_DHCP;
+	else
+		return;
+
+	if (*dump_level)
+		qdf_trace_hex_dump(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_DEBUG,
+				   qdf_nbuf_data(skb), size);
+}
 #endif /* end #if !defined(WLAN_HDD_TX_RX_H) */

+ 24 - 0
core/hdd/src/wlan_hdd_assoc.c

@@ -68,11 +68,13 @@
 #include "wlan_hdd_bcn_recv.h"
 #include "wlan_mlme_twt_ucfg_api.h"
 
+#include "wlan_hdd_nud_tracking.h"
 #include <wlan_cfg80211_crypto.h>
 #include <wlan_crypto_global_api.h>
 #include "wlan_dlm_ucfg_api.h"
 #include "wlan_hdd_sta_info.h"
 #include "wlan_hdd_ftm_time_sync.h"
+#include "wlan_hdd_periodic_sta_stats.h"
 #include "wlan_cm_roam_api.h"
 
 #include <ol_defines.h>
@@ -1209,6 +1211,28 @@ QDF_STATUS hdd_update_dp_vdev_flags(void *cbk_data,
 	return status;
 }
 
+#if defined(WLAN_SUPPORT_RX_FISA)
+/**
+ * hdd_rx_register_fisa_ops() - FISA callback functions
+ * @txrx_ops: operations handle holding callback functions
+ * @hdd_rx_fisa_cbk: callback for fisa aggregation handle function
+ * @hdd_rx_fisa_flush: callback function to flush fisa aggregation
+ *
+ * Return: None
+ */
+static inline void
+hdd_rx_register_fisa_ops(struct ol_txrx_ops *txrx_ops)
+{
+	txrx_ops->rx.osif_fisa_rx = hdd_rx_fisa_cbk;
+	txrx_ops->rx.osif_fisa_flush = hdd_rx_fisa_flush_by_ctx_id;
+}
+#else
+static inline void
+hdd_rx_register_fisa_ops(struct ol_txrx_ops *txrx_ops)
+{
+}
+#endif
+
 QDF_STATUS hdd_roam_register_sta(struct hdd_adapter *adapter,
 				 struct qdf_mac_addr *bssid,
 				 bool is_auth_required)

+ 471 - 0
core/hdd/src/wlan_hdd_bus_bandwidth.c

@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: wlan_hdd_bus_bandwidth.c
+ *
+ * Bus Bandwidth Manager implementation
+ */
+
+#include <wlan_hdd_includes.h>
+#include "qca_vendor.h"
+#include "wlan_hdd_bus_bandwidth.h"
+#include "wlan_hdd_main.h"
+
+/**
+ * bus_bw_table_default - default table which provides bus bandwidth level
+ *  corresonding to a given connection mode and throughput level.
+ */
+static bus_bw_table_type bus_bw_table_default = {
+	[QCA_WLAN_802_11_MODE_11B] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
+				      BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
+				      BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
+				      BUS_BW_LEVEL_7, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11G] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_5,
+				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
+				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
+				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5},
+	[QCA_WLAN_802_11_MODE_11A] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_5,
+				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
+				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5,
+				      BUS_BW_LEVEL_5, BUS_BW_LEVEL_5},
+	[QCA_WLAN_802_11_MODE_11N] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
+				      BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
+				      BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
+				      BUS_BW_LEVEL_7, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11AC] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
+				       BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
+				       BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
+				       BUS_BW_LEVEL_7, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11AX] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
+				       BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
+				       BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
+				       BUS_BW_LEVEL_7, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11BE] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_1,
+				       BUS_BW_LEVEL_2, BUS_BW_LEVEL_3,
+				       BUS_BW_LEVEL_4, BUS_BW_LEVEL_6,
+				       BUS_BW_LEVEL_7, BUS_BW_LEVEL_8},
+};
+
+/**
+ * bus_bw_table_low_latency - table which provides bus bandwidth level
+ *  corresonding to a given connection mode and throughput level in low
+ *  latency setting.
+ */
+static bus_bw_table_type bus_bw_table_low_latency = {
+	[QCA_WLAN_802_11_MODE_11B] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11G] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11A] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11N] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				      BUS_BW_LEVEL_8, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11AC] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11AX] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8},
+	[QCA_WLAN_802_11_MODE_11BE] = {BUS_BW_LEVEL_NONE, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8,
+				       BUS_BW_LEVEL_8, BUS_BW_LEVEL_8},
+};
+
+/**
+ * bbm_convert_to_pld_bus_lvl() - Convert from internal bus vote level to
+ *  PLD bus vote level
+ * @vote_lvl: internal bus bw vote level
+ *
+ * Returns: PLD bus vote level
+ */
+static enum pld_bus_width_type
+bbm_convert_to_pld_bus_lvl(enum bus_bw_level vote_lvl)
+{
+	switch (vote_lvl) {
+	case BUS_BW_LEVEL_1:
+		return PLD_BUS_WIDTH_IDLE;
+	case BUS_BW_LEVEL_2:
+		return PLD_BUS_WIDTH_LOW;
+	case BUS_BW_LEVEL_3:
+		return PLD_BUS_WIDTH_MEDIUM;
+	case BUS_BW_LEVEL_4:
+		return PLD_BUS_WIDTH_HIGH;
+	case BUS_BW_LEVEL_5:
+		return PLD_BUS_WIDTH_LOW_LATENCY;
+	case BUS_BW_LEVEL_6:
+		return PLD_BUS_WIDTH_VERY_HIGH;
+	case BUS_BW_LEVEL_7:
+		return PLD_BUS_WIDTH_ULTRA_HIGH;
+	case BUS_BW_LEVEL_8:
+		return PLD_BUS_WIDTH_MAX;
+	case BUS_BW_LEVEL_NONE:
+	default:
+		return PLD_BUS_WIDTH_NONE;
+	}
+}
+
+/**
+ * bbm_get_bus_bw_level_vote() - Select bus bw vote level per adapter based
+ *  on connection mode and throughput level
+ * @adapter: HDD adapter. Caller ensures that adapter is valid.
+ * @tput_level: throughput level
+ *
+ * Returns: Bus bw level
+ */
+static enum bus_bw_level
+bbm_get_bus_bw_level_vote(struct hdd_adapter *adapter,
+			  enum tput_level tput_level)
+{
+	struct hdd_station_ctx *sta_ctx;
+	struct hdd_ap_ctx *ap_ctx;
+	enum qca_wlan_802_11_mode i;
+	enum qca_wlan_802_11_mode dot11_mode;
+	enum bus_bw_level vote_lvl = BUS_BW_LEVEL_NONE;
+	struct bbm_context *bbm_ctx = adapter->hdd_ctx->bbm_ctx;
+	bus_bw_table_type *lkp_table = bbm_ctx->curr_bus_bw_lookup_table;
+
+	if (tput_level >= TPUT_LEVEL_MAX) {
+		hdd_err("invalid tput level %d", tput_level);
+		return  BUS_BW_LEVEL_NONE;
+	}
+
+	switch (adapter->device_mode) {
+	case QDF_STA_MODE:
+	case QDF_P2P_CLIENT_MODE:
+		sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
+		if (!hdd_cm_is_vdev_associated(adapter))
+			break;
+
+		dot11_mode = hdd_convert_cfgdot11mode_to_80211mode(sta_ctx->
+							   conn_info.dot11mode);
+		if (dot11_mode >= QCA_WLAN_802_11_MODE_INVALID) {
+			hdd_err("invalid STA/P2P-CLI dot11 mode %d",
+				dot11_mode);
+			break;
+		}
+
+		return (*lkp_table)[dot11_mode][tput_level];
+	case QDF_SAP_MODE:
+	case QDF_P2P_GO_MODE:
+		ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(adapter);
+		if (!ap_ctx->ap_active)
+			break;
+
+		for (i = QCA_WLAN_802_11_MODE_11B;
+		     i < QCA_WLAN_802_11_MODE_INVALID; i++) {
+			if (ap_ctx->client_count[i] &&
+			    (*lkp_table)[i][tput_level] > vote_lvl)
+				vote_lvl = (*lkp_table)[i][tput_level];
+		}
+
+		return vote_lvl;
+	case QDF_NDI_MODE:
+		if (WLAN_HDD_GET_STATION_CTX_PTR(adapter)->
+		    conn_info.conn_state != eConnectionState_NdiConnected)
+			break;
+
+		/*
+		 * NDI dot11mode is currently hardcoded to 11AC in driver and
+		 * since the bus bw levels in table do not differ between 11AC
+		 * and 11AX, using max supported mode instead. Dot11mode of the
+		 * peers are not saved in driver and legacy modes are not
+		 * supported in NAN.
+		 */
+		return (*lkp_table)[QCA_WLAN_802_11_MODE_11AX][tput_level];
+	default:
+		break;
+	}
+
+	return vote_lvl;
+}
+
+/**
+ * bbm_apply_tput_policy() - Apply tput BBM policy by considering
+ *  throughput level and connection modes across adapters
+ * @hdd_ctx: HDD context
+ * @tput_level: throughput level
+ *
+ * Returns: None
+ */
+static void
+bbm_apply_tput_policy(struct hdd_context *hdd_ctx, enum tput_level tput_level)
+{
+	struct hdd_adapter *adapter;
+	struct hdd_adapter *next_adapter;
+	enum bus_bw_level next_vote = BUS_BW_LEVEL_NONE;
+	enum bus_bw_level tmp_vote;
+	struct bbm_context *bbm_ctx = hdd_ctx->bbm_ctx;
+
+	if (tput_level == TPUT_LEVEL_NONE) {
+		/*
+		 * This is to handle the scenario where bus bw periodic work
+		 * is force cancelled
+		 */
+		if (!hdd_is_any_adapter_connected(hdd_ctx))
+			bbm_ctx->per_policy_vote[BBM_TPUT_POLICY] = next_vote;
+		return;
+	}
+
+	hdd_for_each_adapter_dev_held_safe(hdd_ctx, adapter, next_adapter,
+					   NET_DEV_HOLD_BUS_BW_MGR) {
+		tmp_vote = bbm_get_bus_bw_level_vote(adapter, tput_level);
+		if (tmp_vote > next_vote)
+			next_vote = tmp_vote;
+		hdd_adapter_dev_put_debug(adapter, NET_DEV_HOLD_BUS_BW_MGR);
+	}
+
+	bbm_ctx->per_policy_vote[BBM_TPUT_POLICY] = next_vote;
+}
+
+/**
+ * bbm_apply_driver_mode_policy() - Apply driver mode BBM policy
+ * @bbm_ctx: bus bw mgr context
+ * @driver_mode: global driver mode
+ *
+ * Returns: None
+ */
+static void
+bbm_apply_driver_mode_policy(struct bbm_context *bbm_ctx,
+			     enum QDF_GLOBAL_MODE driver_mode)
+{
+	switch (driver_mode) {
+	case QDF_GLOBAL_MONITOR_MODE:
+	case QDF_GLOBAL_FTM_MODE:
+		bbm_ctx->per_policy_vote[BBM_DRIVER_MODE_POLICY] =
+							    BUS_BW_LEVEL_6;
+		return;
+	default:
+		bbm_ctx->per_policy_vote[BBM_DRIVER_MODE_POLICY] =
+							 BUS_BW_LEVEL_NONE;
+		return;
+	}
+}
+
+/**
+ * bbm_apply_non_persistent_policy() - Apply non persistent policy and set
+ *  the bus bandwidth
+ * @hdd_ctx: HDD context
+ * @flag: flag
+ *
+ * Returns: None
+ */
+static void
+bbm_apply_non_persistent_policy(struct hdd_context *hdd_ctx,
+				enum bbm_non_per_flag flag)
+{
+	switch (flag) {
+	case BBM_APPS_RESUME:
+		if (hdd_is_any_adapter_connected(hdd_ctx)) {
+			hdd_ctx->bbm_ctx->curr_vote_level = BUS_BW_LEVEL_RESUME;
+			pld_request_bus_bandwidth(hdd_ctx->parent_dev,
+			       bbm_convert_to_pld_bus_lvl(BUS_BW_LEVEL_RESUME));
+		} else {
+			hdd_ctx->bbm_ctx->curr_vote_level = BUS_BW_LEVEL_NONE;
+			pld_request_bus_bandwidth(hdd_ctx->parent_dev,
+				 bbm_convert_to_pld_bus_lvl(BUS_BW_LEVEL_NONE));
+		}
+		return;
+	case BBM_APPS_SUSPEND:
+		hdd_ctx->bbm_ctx->curr_vote_level = BUS_BW_LEVEL_NONE;
+		pld_request_bus_bandwidth(hdd_ctx->parent_dev,
+			    bbm_convert_to_pld_bus_lvl(BUS_BW_LEVEL_NONE));
+		return;
+	default:
+		hdd_debug("flag %d not handled in res/sus BBM policy", flag);
+		return;
+	}
+}
+
+/**
+ * bbm_apply_wlm_policy() - Apply WLM based BBM policy by selecting
+ *  lookup tables based on the latency level
+ * @bbm_ctx: Bus BW mgr context
+ * @wlm_level: WLM latency level
+ *
+ * Returns: None
+ */
+static void
+bbm_apply_wlm_policy(struct bbm_context *bbm_ctx, enum wlm_ll_level wlm_level)
+{
+	switch (wlm_level) {
+	case WLM_LL_NORMAL:
+		bbm_ctx->curr_bus_bw_lookup_table = &bus_bw_table_default;
+		break;
+	case WLM_LL_LOW:
+		bbm_ctx->curr_bus_bw_lookup_table = &bus_bw_table_low_latency;
+		break;
+	default:
+		hdd_debug("wlm level %d not handled in BBM WLM policy",
+			  wlm_level);
+		break;
+	}
+}
+
+/**
+ * bbm_apply_user_policy() - Apply user specified bus voting
+ *  level
+ * @bbm_ctx: Bus BW mgr context
+ * @set: set or reset flag
+ * @user_level: user bus vote level
+ *
+ * Returns: qdf status
+ */
+static QDF_STATUS
+bbm_apply_user_policy(struct bbm_context *bbm_ctx, bool set,
+		      enum bus_bw_level user_level)
+{
+	if (user_level >= BUS_BW_LEVEL_MAX) {
+		hdd_err("Invalid user vote level %d", user_level);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (set)
+		bbm_ctx->per_policy_vote[BBM_USER_POLICY] = user_level;
+	else
+		bbm_ctx->per_policy_vote[BBM_USER_POLICY] = BUS_BW_LEVEL_NONE;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * bbm_request_bus_bandwidth() - Set bus bandwidth level
+ * @hdd_ctx: HDD context
+ *
+ * Returns: None
+ */
+static void
+bbm_request_bus_bandwidth(struct hdd_context *hdd_ctx)
+{
+	enum bbm_policy i;
+	enum bus_bw_level next_vote = BUS_BW_LEVEL_NONE;
+	enum pld_bus_width_type pld_vote;
+	struct bbm_context *bbm_ctx = hdd_ctx->bbm_ctx;
+
+	for (i = BBM_DRIVER_MODE_POLICY; i < BBM_MAX_POLICY; i++) {
+		if (bbm_ctx->per_policy_vote[i] > next_vote)
+			next_vote = bbm_ctx->per_policy_vote[i];
+	}
+
+	if (next_vote != bbm_ctx->curr_vote_level) {
+		pld_vote = bbm_convert_to_pld_bus_lvl(next_vote);
+		hdd_debug("Bus bandwidth vote level change from %d to %d pld_vote: %d",
+			  bbm_ctx->curr_vote_level, next_vote, pld_vote);
+		bbm_ctx->curr_vote_level = next_vote;
+		pld_request_bus_bandwidth(hdd_ctx->parent_dev, pld_vote);
+	}
+}
+
+void hdd_bbm_apply_independent_policy(struct hdd_context *hdd_ctx,
+				      struct bbm_params *params)
+{
+	struct bbm_context *bbm_ctx;
+	QDF_STATUS status;
+
+	if (!hdd_ctx || !params)
+		return;
+
+	bbm_ctx = hdd_ctx->bbm_ctx;
+
+	qdf_mutex_acquire(&bbm_ctx->bbm_lock);
+
+	switch (params->policy) {
+	case BBM_TPUT_POLICY:
+		bbm_apply_tput_policy(hdd_ctx, params->policy_info.tput_level);
+		break;
+	case BBM_NON_PERSISTENT_POLICY:
+		bbm_apply_non_persistent_policy(hdd_ctx,
+						params->policy_info.flag);
+		goto done;
+	case BBM_DRIVER_MODE_POLICY:
+		bbm_apply_driver_mode_policy(bbm_ctx,
+					     params->policy_info.driver_mode);
+		break;
+	case BBM_SELECT_TABLE_POLICY:
+		bbm_apply_wlm_policy(bbm_ctx, params->policy_info.wlm_level);
+		goto done;
+	case BBM_USER_POLICY:
+		/*
+		 * This policy is not used currently.
+		 */
+		status = bbm_apply_user_policy(bbm_ctx,
+					    params->policy_info.usr.set,
+					    params->policy_info.usr.user_level);
+		if (QDF_IS_STATUS_ERROR(status))
+			goto done;
+		break;
+	default:
+		hdd_debug("BBM policy %d not handled", params->policy);
+		goto done;
+	}
+
+	bbm_request_bus_bandwidth(hdd_ctx);
+
+done:
+	qdf_mutex_release(&bbm_ctx->bbm_lock);
+}
+
+int hdd_bbm_context_init(struct hdd_context *hdd_ctx)
+{
+	struct bbm_context *bbm_ctx;
+	QDF_STATUS status;
+
+	bbm_ctx = qdf_mem_malloc(sizeof(*bbm_ctx));
+	if (!bbm_ctx)
+		return -ENOMEM;
+
+	bbm_ctx->curr_bus_bw_lookup_table = &bus_bw_table_default;
+
+	status = qdf_mutex_create(&bbm_ctx->bbm_lock);
+	if (QDF_IS_STATUS_ERROR(status))
+		goto free_ctx;
+
+	hdd_ctx->bbm_ctx = bbm_ctx;
+
+	return 0;
+
+free_ctx:
+	qdf_mem_free(bbm_ctx);
+
+	return qdf_status_to_os_return(status);
+}
+
+void hdd_bbm_context_deinit(struct hdd_context *hdd_ctx)
+{
+	struct bbm_context *bbm_ctx = hdd_ctx->bbm_ctx;
+
+	if (!bbm_ctx)
+		return;
+
+	hdd_ctx->bbm_ctx = NULL;
+	qdf_mutex_destroy(&bbm_ctx->bbm_lock);
+
+	qdf_mem_free(bbm_ctx);
+}

+ 224 - 0
core/hdd/src/wlan_hdd_bus_bandwidth.h

@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(WLAN_HDD_BUS_BANDWIDTH_H)
+#define WLAN_HDD_BUS_BANDWIDTH_H
+/**
+ * DOC: wlan_hdd_bus_bandwidth.h
+ *
+ * Bus Bandwidth Manager implementation
+ */
+
+#include "qca_vendor.h"
+
+/**
+ * enum bus_bw_level - bus bandwidth vote levels
+ *
+ * @BUS_BW_LEVEL_NONE: No vote for bus bandwidth
+ * @BUS_BW_LEVEL_1: vote for level-1 bus bandwidth
+ * @BUS_BW_LEVEL_2: vote for level-2 bus bandwidth
+ * @BUS_BW_LEVEL_3: vote for level-3 bus bandwidth
+ * @BUS_BW_LEVEL_4: vote for level-4 bus bandwidth
+ * @BUS_BW_LEVEL_5: vote for level-5 bus bandwidth
+ * @BUS_BW_LEVEL_6: vote for level-6 bus bandwidth
+ * @BUS_BW_LEVEL_7: vote for level-7 bus bandwidth
+ */
+enum bus_bw_level {
+	BUS_BW_LEVEL_NONE,
+	BUS_BW_LEVEL_1,
+	BUS_BW_LEVEL_2,
+	BUS_BW_LEVEL_3,
+	BUS_BW_LEVEL_4,
+	BUS_BW_LEVEL_5,
+	BUS_BW_LEVEL_6,
+	BUS_BW_LEVEL_7,
+	BUS_BW_LEVEL_8,
+	BUS_BW_LEVEL_MAX,
+};
+
+#define BUS_BW_LEVEL_RESUME BUS_BW_LEVEL_3
+
+/**
+ * enum tput_level - throughput levels
+ *
+ * @TPUT_LEVEL_NONE: No throughput
+ * @TPUT_LEVEL_IDLE: idle throughtput level
+ * @TPUT_LEVEL_LOW: low throughput level
+ * @TPUT_LEVEL_MEDIUM: medium throughtput level
+ * @TPUT_LEVEL_HIGH: high throughput level
+ * @TPUT_LEVEL_VERY_HIGH: very high throughput level
+ * @TPUT_LEVEL_ULTRA_HIGH: ultra high throughput level
+ * @TPUT_LEVEL_SUPER_HIGH: super high throughput level
+ */
+enum tput_level {
+	TPUT_LEVEL_NONE,
+	TPUT_LEVEL_IDLE,
+	TPUT_LEVEL_LOW,
+	TPUT_LEVEL_MEDIUM,
+	TPUT_LEVEL_HIGH,
+	TPUT_LEVEL_VERY_HIGH,
+	TPUT_LEVEL_ULTRA_HIGH,
+	TPUT_LEVEL_SUPER_HIGH,
+	TPUT_LEVEL_MAX,
+};
+
+/**
+ * enum bbm_non_per_flag - Non persistent policy related flag
+ *
+ * @BBM_APPS_RESUME: system resume flag
+ * @BBM_APPS_SUSPEND: system suspend flag
+ */
+enum bbm_non_per_flag {
+	BBM_APPS_RESUME,
+	BBM_APPS_SUSPEND,
+	BBM_FLAG_MAX,
+};
+
+/**
+ * enum bbm_policy - BBM policy
+ *
+ * @BBM_DRIVER_MODE_POLICY: driver mode policy
+ * @BBM_TPUT_POLICY: throughput policy
+ * @BBM_USER_POLICY: user policy
+ * @BBM_NON_PERSISTENT_POLICY: non persistent policy. For example, bus resume
+ *  sets the bus bw level to LEVEL_3 if any adapter is connected but
+ *  this is only a one time setting and is not persistent. This bus bw level
+ *  is set without taking other policy vote levels into consideration.
+ * @BBM_SELECT_TABLE_POLICY: policy where bus bw table is selected based on
+ *  the latency level.
+ */
+enum bbm_policy {
+	BBM_DRIVER_MODE_POLICY,
+	BBM_TPUT_POLICY,
+	BBM_USER_POLICY,
+	BBM_NON_PERSISTENT_POLICY,
+	BBM_SELECT_TABLE_POLICY,
+	BBM_MAX_POLICY,
+};
+
+/**
+ * enum wlm_ll_level - WLM latency levels
+ *
+ * @WLM_LL_NORMAL: normal latency level
+ * @WLM_LL_LOW: low latency level
+ */
+enum wlm_ll_level {
+	WLM_LL_NORMAL,
+	WLM_LL_LOW,
+	WLM_LL_MAX,
+};
+
+/**
+ * union bbm_policy_info - BBM policy specific info. Only one of the value
+ *  would be valid based on the BBM policy.
+ *
+ * @driver_mode: global driver mode. valid for BBM_DRIVER_MODE_POLICY.
+ * @flag: BBM non persistent flag. valid for BBM_NON_PERSISTENT_POLICY.
+ * @tput_level: throughput level. valid for BBM_TPUT_POLICY.
+ * @wlm_level: latency level. valid for BBM_WLM_POLICY.
+ * @user_level: user bus bandwidth vote. valid for BBM_USER_POLICY.
+ * @set: set or reset user level. valid for BBM_USER_POLICY.
+ */
+union bbm_policy_info {
+	enum QDF_GLOBAL_MODE driver_mode;
+	enum bbm_non_per_flag flag;
+	enum tput_level tput_level;
+	enum wlm_ll_level wlm_level;
+	struct {
+		enum bus_bw_level user_level;
+		bool set;
+	} usr;
+};
+
+/**
+ * struct bbm_params - BBM params
+ *
+ * @policy: BBM policy
+ * @policy_info: policy related info
+ */
+struct bbm_params {
+	enum bbm_policy policy;
+	union bbm_policy_info policy_info;
+};
+
+typedef const enum bus_bw_level
+	bus_bw_table_type[QCA_WLAN_802_11_MODE_INVALID][TPUT_LEVEL_MAX];
+
+/**
+ * struct bbm_context: Bus Bandwidth Manager context
+ *
+ * @curr_bus_bw_lookup_table: current bus bw lookup table
+ * @curr_vote_level: current vote level
+ * @per_policy_vote: per BBM policy related vote
+ * @bbm_lock: BBM API lock
+ */
+struct bbm_context {
+	bus_bw_table_type *curr_bus_bw_lookup_table;
+	enum bus_bw_level curr_vote_level;
+	enum bus_bw_level per_policy_vote[BBM_MAX_POLICY];
+	qdf_mutex_t bbm_lock;
+};
+
+#ifdef FEATURE_BUS_BANDWIDTH_MGR
+/**
+ * hdd_bbm_context_init() - Initialize BBM context
+ * @hdd_ctx: HDD context
+ *
+ * Returns: error code
+ */
+int hdd_bbm_context_init(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_bbm_context_deinit() - De-initialize BBM context
+ * @hdd_ctx: HDD context
+ *
+ * Returns: None
+ */
+void hdd_bbm_context_deinit(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_bbm_apply_independent_policy() - Function to apply independent policies
+ *  to set the bus bw level
+ * @hdd_ctx: HDD context
+ * @params: BBM policy related params
+ *
+ * The function applies BBM related policies and appropriately sets the bus
+ * bandwidth level.
+ *
+ * Returns: None
+ */
+void hdd_bbm_apply_independent_policy(struct hdd_context *hdd_ctx,
+				      struct bbm_params *params);
+#else
+static inline int hdd_bbm_context_init(struct hdd_context *hdd_ctx)
+{
+	return 0;
+}
+
+static inline void hdd_bbm_context_deinit(struct hdd_context *hdd_ctx)
+{
+}
+
+static inline
+void hdd_bbm_apply_independent_policy(struct hdd_context *hdd_ctx,
+				      struct bbm_params *params)
+{
+}
+#endif
+#endif

+ 500 - 0
core/hdd/src/wlan_hdd_cfg80211.c

@@ -14965,6 +14965,240 @@ qca_wlan_vendor_set_connectivity_check_stats[CONNECTIVITY_STATS_SET_MAX + 1] = {
 					.len = ICMPv6_ADDR_LEN },
 };
 
+/**
+ * hdd_dns_unmake_name_query() - Convert an uncompressed DNS name to a
+ *			     NUL-terminated string
+ * @name: DNS name
+ *
+ * Return: Produce a printable version of a DNS name.
+ */
+static inline uint8_t *hdd_dns_unmake_name_query(uint8_t *name)
+{
+	uint8_t *p;
+	unsigned int len;
+
+	p = name;
+	while ((len = *p)) {
+		*(p++) = '.';
+		p += len;
+	}
+
+	return name + 1;
+}
+
+/**
+ * hdd_dns_make_name_query() - Convert a standard NUL-terminated string
+ *				to DNS name
+ * @string: Name as a NUL-terminated string
+ * @buf: Buffer in which to place DNS name
+ *
+ * DNS names consist of "<length>element" pairs.
+ *
+ * Return: Byte following constructed DNS name
+ */
+static uint8_t *hdd_dns_make_name_query(const uint8_t *string,
+					uint8_t *buf, uint8_t len)
+{
+	uint8_t *length_byte = buf++;
+	uint8_t c;
+
+	if (string[len - 1]) {
+		hdd_debug("DNS name is not null terminated");
+		return NULL;
+	}
+
+	while ((c = *(string++))) {
+		if (c == '.') {
+			*length_byte = buf - length_byte - 1;
+			length_byte = buf;
+		}
+		*(buf++) = c;
+	}
+	*length_byte = buf - length_byte - 1;
+	*(buf++) = '\0';
+	return buf;
+}
+
+/**
+ * hdd_set_clear_connectivity_check_stats_info() - set/clear stats info
+ * @adapter: Pointer to hdd adapter
+ * @arp_stats_params: arp stats structure to be sent to FW
+ * @tb: nl attribute
+ * @is_set_stats: set/clear stats
+ *
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int hdd_set_clear_connectivity_check_stats_info(
+		struct hdd_adapter *adapter,
+		struct set_arp_stats_params *arp_stats_params,
+		struct nlattr **tb, bool is_set_stats)
+{
+	struct nlattr *tb2[CONNECTIVITY_STATS_SET_MAX + 1];
+	struct nlattr *curr_attr = NULL;
+	int err = 0;
+	uint32_t pkt_bitmap;
+	int rem;
+
+	/* Set NUD command for start tracking is received. */
+	nla_for_each_nested(curr_attr,
+			    tb[STATS_SET_DATA_PKT_INFO],
+			    rem) {
+
+		if (wlan_cfg80211_nla_parse(tb2,
+				CONNECTIVITY_STATS_SET_MAX,
+				nla_data(curr_attr), nla_len(curr_attr),
+				qca_wlan_vendor_set_connectivity_check_stats)) {
+			hdd_err("nla_parse failed");
+			err = -EINVAL;
+			goto end;
+		}
+
+		if (tb2[STATS_PKT_INFO_TYPE]) {
+			pkt_bitmap = nla_get_u32(tb2[STATS_PKT_INFO_TYPE]);
+			if (!pkt_bitmap) {
+				hdd_err("pkt tracking bitmap is empty");
+				err = -EINVAL;
+				goto end;
+			}
+
+			if (is_set_stats) {
+				arp_stats_params->pkt_type_bitmap = pkt_bitmap;
+				arp_stats_params->flag = true;
+				adapter->pkt_type_bitmap |=
+					arp_stats_params->pkt_type_bitmap;
+
+				if (pkt_bitmap & CONNECTIVITY_CHECK_SET_ARP) {
+					if (!tb[STATS_GW_IPV4]) {
+						hdd_err("GW ipv4 address is not present");
+						err = -EINVAL;
+						goto end;
+					}
+					arp_stats_params->ip_addr =
+						nla_get_u32(tb[STATS_GW_IPV4]);
+					arp_stats_params->pkt_type =
+						WLAN_NUD_STATS_ARP_PKT_TYPE;
+					adapter->track_arp_ip =
+						arp_stats_params->ip_addr;
+				}
+
+				if (pkt_bitmap & CONNECTIVITY_CHECK_SET_DNS) {
+					uint8_t *domain_name;
+
+					if (!tb2[STATS_DNS_DOMAIN_NAME]) {
+						hdd_err("DNS domain id is not present");
+						err = -EINVAL;
+						goto end;
+					}
+					domain_name = nla_data(
+						tb2[STATS_DNS_DOMAIN_NAME]);
+					adapter->track_dns_domain_len =
+						nla_len(tb2[
+							STATS_DNS_DOMAIN_NAME]);
+					if (!hdd_dns_make_name_query(
+						domain_name,
+						adapter->dns_payload,
+						adapter->track_dns_domain_len))
+						adapter->track_dns_domain_len =
+							0;
+					/* DNStracking isn't supported in FW. */
+					arp_stats_params->pkt_type_bitmap &=
+						~CONNECTIVITY_CHECK_SET_DNS;
+				}
+
+				if (pkt_bitmap &
+				    CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE) {
+					if (!tb2[STATS_SRC_PORT] ||
+					    !tb2[STATS_DEST_PORT]) {
+						hdd_err("Source/Dest port is not present");
+						err = -EINVAL;
+						goto end;
+					}
+					arp_stats_params->tcp_src_port =
+						nla_get_u32(
+							tb2[STATS_SRC_PORT]);
+					arp_stats_params->tcp_dst_port =
+						nla_get_u32(
+							tb2[STATS_DEST_PORT]);
+					adapter->track_src_port =
+						arp_stats_params->tcp_src_port;
+					adapter->track_dest_port =
+						arp_stats_params->tcp_dst_port;
+				}
+
+				if (pkt_bitmap &
+				    CONNECTIVITY_CHECK_SET_ICMPV4) {
+					if (!tb2[STATS_DEST_IPV4]) {
+						hdd_err("destination ipv4 address to track ping packets is not present");
+						err = -EINVAL;
+						goto end;
+					}
+					arp_stats_params->icmp_ipv4 =
+						nla_get_u32(
+							tb2[STATS_DEST_IPV4]);
+					adapter->track_dest_ipv4 =
+						arp_stats_params->icmp_ipv4;
+				}
+			} else {
+				/* clear stats command received */
+				arp_stats_params->pkt_type_bitmap = pkt_bitmap;
+				arp_stats_params->flag = false;
+				adapter->pkt_type_bitmap &=
+					(~arp_stats_params->pkt_type_bitmap);
+
+				if (pkt_bitmap & CONNECTIVITY_CHECK_SET_ARP) {
+					arp_stats_params->pkt_type =
+						WLAN_NUD_STATS_ARP_PKT_TYPE;
+					qdf_mem_zero(&adapter->hdd_stats.
+								hdd_arp_stats,
+						     sizeof(adapter->hdd_stats.
+								hdd_arp_stats));
+					adapter->track_arp_ip = 0;
+				}
+
+				if (pkt_bitmap & CONNECTIVITY_CHECK_SET_DNS) {
+					/* DNStracking isn't supported in FW. */
+					arp_stats_params->pkt_type_bitmap &=
+						~CONNECTIVITY_CHECK_SET_DNS;
+					qdf_mem_zero(&adapter->hdd_stats.
+								hdd_dns_stats,
+						     sizeof(adapter->hdd_stats.
+								hdd_dns_stats));
+					qdf_mem_zero(adapter->dns_payload,
+						adapter->track_dns_domain_len);
+					adapter->track_dns_domain_len = 0;
+				}
+
+				if (pkt_bitmap &
+				    CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE) {
+					qdf_mem_zero(&adapter->hdd_stats.
+								hdd_tcp_stats,
+						     sizeof(adapter->hdd_stats.
+								hdd_tcp_stats));
+					adapter->track_src_port = 0;
+					adapter->track_dest_port = 0;
+				}
+
+				if (pkt_bitmap &
+				    CONNECTIVITY_CHECK_SET_ICMPV4) {
+					qdf_mem_zero(&adapter->hdd_stats.
+							hdd_icmpv4_stats,
+						     sizeof(adapter->hdd_stats.
+							hdd_icmpv4_stats));
+					adapter->track_dest_ipv4 = 0;
+				}
+			}
+		} else {
+			hdd_err("stats list empty");
+			err = -EINVAL;
+			goto end;
+		}
+	}
+
+end:
+	return err;
+}
+
 const struct nla_policy qca_wlan_vendor_set_trace_level_policy[
 		QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MAX + 1] = {
 	[QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_PARAM] =
@@ -15275,6 +15509,272 @@ qca_wlan_vendor_get_nud_stats[STATS_GET_MAX + 1] = {
 	[DATA_PKT_STATS] = {.type = NLA_U16 },
 };
 
+/**
+ * hdd_populate_dns_stats_info() - send dns stats info to network stack
+ * @adapter: pointer to adapter context
+ * @skb: pointer to skb
+ *
+ *
+ * Return: An error code or 0 on success.
+ */
+static int hdd_populate_dns_stats_info(struct hdd_adapter *adapter,
+				       struct sk_buff *skb)
+{
+	uint8_t *dns_query;
+
+	dns_query = qdf_mem_malloc(adapter->track_dns_domain_len + 1);
+	if (!dns_query)
+		return -EINVAL;
+
+	qdf_mem_copy(dns_query, adapter->dns_payload,
+		     adapter->track_dns_domain_len);
+
+	if (nla_put_u16(skb, CHECK_STATS_PKT_TYPE,
+		CONNECTIVITY_CHECK_SET_DNS) ||
+	    nla_put(skb, CHECK_STATS_PKT_DNS_DOMAIN_NAME,
+		adapter->track_dns_domain_len,
+		hdd_dns_unmake_name_query(dns_query)) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_FROM_NETDEV,
+		adapter->hdd_stats.hdd_dns_stats.tx_dns_req_count) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_TO_LOWER_MAC,
+		adapter->hdd_stats.hdd_dns_stats.tx_host_fw_sent) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_REQ_RX_COUNT_BY_LOWER_MAC,
+		adapter->hdd_stats.hdd_dns_stats.tx_host_fw_sent) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_TX_SUCCESS,
+		adapter->hdd_stats.hdd_dns_stats.tx_ack_cnt) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_RSP_RX_COUNT_BY_UPPER_MAC,
+		adapter->hdd_stats.hdd_dns_stats.rx_dns_rsp_count) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_RSP_COUNT_TO_NETDEV,
+		adapter->hdd_stats.hdd_dns_stats.rx_delivered) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_RSP_COUNT_OUT_OF_ORDER_DROP,
+		adapter->hdd_stats.hdd_dns_stats.rx_host_drop)) {
+		hdd_err("nla put fail");
+		qdf_mem_free(dns_query);
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+	qdf_mem_free(dns_query);
+	return 0;
+}
+
+/**
+ * hdd_populate_tcp_stats_info() - send tcp stats info to network stack
+ * @adapter: pointer to adapter context
+ * @skb: pointer to skb
+ * @pkt_type: tcp pkt type
+ *
+ * Return: An error code or 0 on success.
+ */
+static int hdd_populate_tcp_stats_info(struct hdd_adapter *adapter,
+				       struct sk_buff *skb,
+				       uint8_t pkt_type)
+{
+	switch (pkt_type) {
+	case CONNECTIVITY_CHECK_SET_TCP_SYN:
+		/* Fill info for tcp syn packets (tx packet) */
+		if (nla_put_u16(skb, CHECK_STATS_PKT_TYPE,
+			CONNECTIVITY_CHECK_SET_TCP_SYN) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_SRC_PORT,
+			adapter->track_src_port) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_DEST_PORT,
+			adapter->track_dest_port) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_FROM_NETDEV,
+			adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_count) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_TO_LOWER_MAC,
+			adapter->hdd_stats.hdd_tcp_stats.
+						tx_tcp_syn_host_fw_sent) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_REQ_RX_COUNT_BY_LOWER_MAC,
+			adapter->hdd_stats.hdd_tcp_stats.
+						tx_tcp_syn_host_fw_sent) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_TX_SUCCESS,
+			adapter->hdd_stats.hdd_tcp_stats.tx_tcp_syn_ack_cnt)) {
+			hdd_err("nla put fail");
+			kfree_skb(skb);
+			return -EINVAL;
+		}
+		break;
+	case CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
+		/* Fill info for tcp syn-ack packets (rx packet) */
+		if (nla_put_u16(skb, CHECK_STATS_PKT_TYPE,
+			CONNECTIVITY_CHECK_SET_TCP_SYN_ACK) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_SRC_PORT,
+			adapter->track_src_port) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_DEST_PORT,
+			adapter->track_dest_port) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_RSP_RX_COUNT_BY_LOWER_MAC,
+			adapter->hdd_stats.hdd_tcp_stats.rx_fw_cnt) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_RSP_RX_COUNT_BY_UPPER_MAC,
+			adapter->hdd_stats.hdd_tcp_stats.
+							rx_tcp_syn_ack_count) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_RSP_COUNT_TO_NETDEV,
+			adapter->hdd_stats.hdd_tcp_stats.rx_delivered) ||
+		    nla_put_u16(skb,
+			CHECK_STATS_PKT_RSP_COUNT_OUT_OF_ORDER_DROP,
+			adapter->hdd_stats.hdd_tcp_stats.rx_host_drop)) {
+			hdd_err("nla put fail");
+			kfree_skb(skb);
+			return -EINVAL;
+		}
+		break;
+	case CONNECTIVITY_CHECK_SET_TCP_ACK:
+		/* Fill info for tcp ack packets (tx packet) */
+		if (nla_put_u16(skb, CHECK_STATS_PKT_TYPE,
+			CONNECTIVITY_CHECK_SET_TCP_ACK) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_SRC_PORT,
+			adapter->track_src_port) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_DEST_PORT,
+			adapter->track_dest_port) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_FROM_NETDEV,
+			adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_count) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_TO_LOWER_MAC,
+			adapter->hdd_stats.hdd_tcp_stats.
+						tx_tcp_ack_host_fw_sent) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_REQ_RX_COUNT_BY_LOWER_MAC,
+			adapter->hdd_stats.hdd_tcp_stats.
+						tx_tcp_ack_host_fw_sent) ||
+		    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_TX_SUCCESS,
+			adapter->hdd_stats.hdd_tcp_stats.tx_tcp_ack_ack_cnt)) {
+			hdd_err("nla put fail");
+			kfree_skb(skb);
+			return -EINVAL;
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/**
+ * hdd_populate_icmpv4_stats_info() - send icmpv4 stats info to network stack
+ * @adapter: pointer to adapter context
+ * @skb: pointer to skb
+ *
+ *
+ * Return: An error code or 0 on success.
+ */
+static int hdd_populate_icmpv4_stats_info(struct hdd_adapter *adapter,
+					  struct sk_buff *skb)
+{
+	if (nla_put_u16(skb, CHECK_STATS_PKT_TYPE,
+		CONNECTIVITY_CHECK_SET_ICMPV4) ||
+	    nla_put_u32(skb, CHECK_STATS_PKT_DEST_IPV4,
+		adapter->track_dest_ipv4) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_FROM_NETDEV,
+		adapter->hdd_stats.hdd_icmpv4_stats.tx_icmpv4_req_count) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_TO_LOWER_MAC,
+		adapter->hdd_stats.hdd_icmpv4_stats.tx_host_fw_sent) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_REQ_RX_COUNT_BY_LOWER_MAC,
+		adapter->hdd_stats.hdd_icmpv4_stats.tx_host_fw_sent) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_REQ_COUNT_TX_SUCCESS,
+		adapter->hdd_stats.hdd_icmpv4_stats.tx_ack_cnt) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_RSP_RX_COUNT_BY_LOWER_MAC,
+		adapter->hdd_stats.hdd_icmpv4_stats.rx_fw_cnt) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_RSP_RX_COUNT_BY_UPPER_MAC,
+		adapter->hdd_stats.hdd_icmpv4_stats.rx_icmpv4_rsp_count) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_RSP_COUNT_TO_NETDEV,
+		adapter->hdd_stats.hdd_icmpv4_stats.rx_delivered) ||
+	    nla_put_u16(skb, CHECK_STATS_PKT_RSP_COUNT_OUT_OF_ORDER_DROP,
+		adapter->hdd_stats.hdd_icmpv4_stats.rx_host_drop)) {
+		hdd_err("nla put fail");
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * hdd_populate_connectivity_check_stats_info() - send connectivity stats info
+ *						  to network stack
+ * @adapter: pointer to adapter context
+ * @skb: pointer to skb
+ *
+ *
+ * Return: An error code or 0 on success.
+ */
+
+static int hdd_populate_connectivity_check_stats_info(
+	struct hdd_adapter *adapter, struct sk_buff *skb)
+{
+	struct nlattr *connect_stats, *connect_info;
+	uint32_t count = 0;
+
+	connect_stats = nla_nest_start(skb, DATA_PKT_STATS);
+	if (!connect_stats) {
+		hdd_err("nla_nest_start failed");
+		return -EINVAL;
+	}
+
+	if (adapter->pkt_type_bitmap & CONNECTIVITY_CHECK_SET_DNS) {
+		connect_info = nla_nest_start(skb, count);
+		if (!connect_info) {
+			hdd_err("nla_nest_start failed count %u", count);
+			return -EINVAL;
+		}
+
+		if (hdd_populate_dns_stats_info(adapter, skb))
+			goto put_attr_fail;
+		nla_nest_end(skb, connect_info);
+		count++;
+	}
+
+	if (adapter->pkt_type_bitmap & CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE) {
+		connect_info = nla_nest_start(skb, count);
+		if (!connect_info) {
+			hdd_err("nla_nest_start failed count %u", count);
+			return -EINVAL;
+		}
+		if (hdd_populate_tcp_stats_info(adapter, skb,
+					CONNECTIVITY_CHECK_SET_TCP_SYN))
+			goto put_attr_fail;
+		nla_nest_end(skb, connect_info);
+		count++;
+
+		connect_info = nla_nest_start(skb, count);
+		if (!connect_info) {
+			hdd_err("nla_nest_start failed count %u", count);
+			return -EINVAL;
+		}
+		if (hdd_populate_tcp_stats_info(adapter, skb,
+					CONNECTIVITY_CHECK_SET_TCP_SYN_ACK))
+			goto put_attr_fail;
+		nla_nest_end(skb, connect_info);
+		count++;
+
+		connect_info = nla_nest_start(skb, count);
+		if (!connect_info) {
+			hdd_err("nla_nest_start failed count %u", count);
+			return -EINVAL;
+		}
+		if (hdd_populate_tcp_stats_info(adapter, skb,
+					CONNECTIVITY_CHECK_SET_TCP_ACK))
+			goto put_attr_fail;
+		nla_nest_end(skb, connect_info);
+		count++;
+	}
+
+	if (adapter->pkt_type_bitmap & CONNECTIVITY_CHECK_SET_ICMPV4) {
+		connect_info = nla_nest_start(skb, count);
+		if (!connect_info) {
+			hdd_err("nla_nest_start failed count %u", count);
+			return -EINVAL;
+		}
+
+		if (hdd_populate_icmpv4_stats_info(adapter, skb))
+			goto put_attr_fail;
+		nla_nest_end(skb, connect_info);
+		count++;
+	}
+
+	nla_nest_end(skb, connect_stats);
+	return 0;
+
+put_attr_fail:
+	hdd_err("QCA_WLAN_VENDOR_ATTR put fail. count %u", count);
+	return -EINVAL;
+}
+
+
 /**
  * __wlan_hdd_cfg80211_get_nud_stats() - get arp stats command to firmware
  * @wiphy: pointer to wireless wiphy structure.

+ 2 - 1
core/hdd/src/wlan_hdd_cm_connect.c

@@ -30,6 +30,7 @@
 #include "wlan_hdd_connectivity_logging.h"
 #include <osif_cm_req.h>
 #include <wlan_logging_sock_svc.h>
+#include <wlan_hdd_periodic_sta_stats.h>
 #include <wlan_hdd_green_ap.h>
 #include <wlan_hdd_p2p.h>
 #include <wlan_p2p_ucfg_api.h>
@@ -51,7 +52,7 @@
 #include "wlan_hdd_hostapd.h"
 #include <wlan_twt_ucfg_ext_api.h>
 #include <osif_twt_internal.h>
-#include "wlan_dp_ucfg_api.h"
+#include <wlan_dp_ucfg_api.h>
 
 bool hdd_cm_is_vdev_associated(struct hdd_adapter *adapter)
 {

+ 1 - 0
core/hdd/src/wlan_hdd_cm_disconnect.c

@@ -28,6 +28,7 @@
 #include <osif_cm_req.h>
 #include "wlan_hdd_cm_api.h"
 #include "wlan_ipa_ucfg_api.h"
+#include "wlan_hdd_periodic_sta_stats.h"
 #include "wlan_hdd_stats.h"
 #include "wlan_hdd_scan.h"
 #include "sme_power_save_api.h"

+ 166 - 0
core/hdd/src/wlan_hdd_lro.c

@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: wlan_hdd_lro.c
+ *
+ * WLAN HDD LRO interface implementation
+ */
+
+#include <wlan_hdd_includes.h>
+#include <qdf_types.h>
+#include <qdf_lro.h>
+#include <wlan_hdd_lro.h>
+#include <wlan_hdd_napi.h>
+#include <wma_api.h>
+
+#include <linux/inet_lro.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <net/tcp.h>
+
+#define LRO_VALID_FIELDS \
+	(LRO_DESC | LRO_ELIGIBILITY_CHECKED | LRO_TCP_ACK_NUM | \
+	 LRO_TCP_DATA_CSUM | LRO_TCP_SEQ_NUM | LRO_TCP_WIN)
+
+#if defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
+    defined(QCA_WIFI_QCA6490) || defined(QCA_WIFI_QCA6750) || \
+    defined(QCA_WIFI_KIWI)
+#ifdef WLAN_FEATURE_LRO_CTX_IN_CB
+static qdf_lro_ctx_t wlan_hdd_get_lro_ctx(struct sk_buff *skb)
+{
+	return (qdf_lro_ctx_t)QDF_NBUF_CB_RX_LRO_CTX(skb);
+}
+#else
+static qdf_lro_ctx_t wlan_hdd_get_lro_ctx(struct sk_buff *skb)
+{
+	struct hif_opaque_softc *hif_hdl =
+		(struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
+	if (!hif_hdl)
+		return NULL;
+
+	return hif_get_lro_info(QDF_NBUF_CB_RX_CTX_ID(skb), hif_hdl);
+}
+#endif
+
+/**
+ * hdd_lro_rx() - LRO receive function
+ * @adapter: HDD adapter
+ * @skb: network buffer
+ *
+ * Delivers LRO eligible frames to the LRO manager
+ *
+ * Return: QDF_STATUS_SUCCESS - frame delivered to LRO manager
+ * QDF_STATUS_E_FAILURE - frame not delivered
+ */
+QDF_STATUS hdd_lro_rx(struct hdd_adapter *adapter, struct sk_buff *skb)
+{
+	qdf_lro_ctx_t ctx;
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+	struct qdf_lro_info info;
+	struct net_lro_desc *lro_desc = NULL;
+
+	if ((adapter->dev->features & NETIF_F_LRO) != NETIF_F_LRO)
+		return QDF_STATUS_E_NOSUPPORT;
+
+	ctx = wlan_hdd_get_lro_ctx(skb);
+	if (!ctx) {
+		hdd_err("LRO mgr is NULL");
+		return status;
+	}
+
+	info.iph = skb->data;
+	info.tcph = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
+	ctx->lro_mgr->dev = adapter->dev;
+	if (qdf_lro_get_info(ctx, skb, &info, (void **)&lro_desc)) {
+		struct net_lro_info hdd_lro_info;
+
+		hdd_lro_info.valid_fields = LRO_VALID_FIELDS;
+
+		hdd_lro_info.lro_desc = lro_desc;
+		hdd_lro_info.lro_eligible = 1;
+		hdd_lro_info.tcp_ack_num = QDF_NBUF_CB_RX_TCP_ACK_NUM(skb);
+		hdd_lro_info.tcp_data_csum =
+			 csum_unfold(htons(QDF_NBUF_CB_RX_TCP_CHKSUM(skb)));
+		hdd_lro_info.tcp_seq_num = QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb);
+		hdd_lro_info.tcp_win = QDF_NBUF_CB_RX_TCP_WIN(skb);
+
+		lro_receive_skb_ext(ctx->lro_mgr, skb, (void *)adapter,
+				    &hdd_lro_info);
+
+		if (!hdd_lro_info.lro_desc->active)
+			qdf_lro_desc_free(ctx, lro_desc);
+
+		status = QDF_STATUS_SUCCESS;
+	} else {
+		qdf_lro_flush_pkt(ctx, &info);
+	}
+	return status;
+}
+
+/**
+ * hdd_lro_display_stats() - display LRO statistics
+ * @hdd_ctx: hdd context
+ *
+ * Return: none
+ */
+void hdd_lro_display_stats(struct hdd_context *hdd_ctx)
+{
+	hdd_debug("LRO stats is broken, will fix it");
+}
+
+QDF_STATUS
+hdd_lro_set_reset(struct hdd_context *hdd_ctx, struct hdd_adapter *adapter,
+			       uint8_t enable_flag)
+{
+	if ((hdd_ctx->ol_enable != CFG_LRO_ENABLED) ||
+	    (adapter->device_mode != QDF_STA_MODE)) {
+		hdd_debug("LRO is already Disabled");
+		return 0;
+	}
+
+	if (enable_flag) {
+		qdf_atomic_set(&hdd_ctx->vendor_disable_lro_flag, 0);
+		adapter->dev->features |= NETIF_F_LRO;
+	} else {
+		/* Disable LRO, Enable tcpdelack*/
+		qdf_atomic_set(&hdd_ctx->vendor_disable_lro_flag, 1);
+		adapter->dev->features &= ~NETIF_F_LRO;
+		hdd_debug("LRO Disabled");
+
+		if (hdd_ctx->config->enable_tcp_delack) {
+			struct wlan_rx_tp_data rx_tp_data;
+
+			hdd_debug("Enable TCP delack as LRO is disabled");
+			rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
+			rx_tp_data.level = GET_CUR_RX_LVL(hdd_ctx);
+			wlan_hdd_update_tcp_rx_param(hdd_ctx, &rx_tp_data);
+			hdd_ctx->en_tcp_delack_no_lro = 1;
+		}
+	}
+	return 0;
+}
+
+int hdd_is_lro_enabled(struct hdd_context *hdd_ctx)
+{
+	if (hdd_ctx->ol_enable != CFG_LRO_ENABLED)
+		return -EOPNOTSUPP;
+
+	return 0;
+}

File diff suppressed because it is too large
+ 1281 - 3
core/hdd/src/wlan_hdd_main.c


+ 452 - 1
core/hdd/src/wlan_hdd_nud_tracking.c

@@ -28,7 +28,211 @@
 #include "hdd_dp_cfg.h"
 #include <cdp_txrx_misc.h>
 #include "wlan_cm_roam_ucfg_api.h"
-#include "wlan_hdd_nud_tracking.h"
+
+void hdd_nud_set_gateway_addr(struct hdd_adapter *adapter,
+			      struct qdf_mac_addr gw_mac_addr)
+{
+	qdf_mem_copy(adapter->nud_tracking.gw_mac_addr.bytes,
+		     gw_mac_addr.bytes,
+		     sizeof(struct qdf_mac_addr));
+	adapter->nud_tracking.is_gw_updated = true;
+}
+
+void hdd_nud_incr_gw_rx_pkt_cnt(struct hdd_adapter *adapter,
+				struct qdf_mac_addr *mac_addr)
+{
+	if (!adapter->nud_tracking.is_gw_rx_pkt_track_enabled)
+		return;
+
+	if (!adapter->nud_tracking.is_gw_updated)
+		return;
+
+	if (qdf_is_macaddr_equal(&adapter->nud_tracking.gw_mac_addr,
+				 mac_addr))
+		qdf_atomic_inc(&adapter
+			       ->nud_tracking.tx_rx_stats.gw_rx_packets);
+}
+
+void hdd_nud_flush_work(struct hdd_adapter *adapter)
+{
+	struct hdd_context *hdd_ctx;
+
+	hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+
+	if (hdd_adapter_is_link_adapter(adapter))
+		return;
+
+	if (adapter->device_mode == QDF_STA_MODE &&
+	    hdd_ctx->config->enable_nud_tracking) {
+		hdd_debug("Flush the NUD work");
+		qdf_disable_work(&adapter->nud_tracking.nud_event_work);
+	}
+}
+
+void hdd_nud_deinit_tracking(struct hdd_adapter *adapter)
+{
+	struct hdd_context *hdd_ctx;
+
+	hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+
+	if (adapter->device_mode == QDF_STA_MODE &&
+	    hdd_ctx->config->enable_nud_tracking) {
+		hdd_debug("DeInitialize the NUD tracking");
+		qdf_destroy_work(NULL, &adapter->nud_tracking.nud_event_work);
+	}
+}
+
+void hdd_nud_ignore_tracking(struct hdd_adapter *adapter, bool ignoring)
+{
+	struct hdd_context *hdd_ctx;
+
+	hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+
+	if (adapter->device_mode == QDF_STA_MODE &&
+	    hdd_ctx->config->enable_nud_tracking)
+		adapter->nud_tracking.ignore_nud_tracking = ignoring;
+}
+
+void hdd_nud_reset_tracking(struct hdd_adapter *adapter)
+{
+	struct hdd_context *hdd_ctx;
+
+	hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+
+	if (adapter->device_mode == QDF_STA_MODE &&
+	    hdd_ctx->config->enable_nud_tracking) {
+		hdd_debug("Reset the NUD tracking");
+
+		qdf_zero_macaddr(&adapter->nud_tracking.gw_mac_addr);
+		adapter->nud_tracking.is_gw_updated = false;
+		qdf_mem_zero(&adapter->nud_tracking.tx_rx_stats,
+			     sizeof(struct hdd_nud_tx_rx_stats));
+
+		adapter->nud_tracking.curr_state = NUD_NONE;
+		qdf_atomic_set(&adapter
+			       ->nud_tracking.tx_rx_stats.gw_rx_packets, 0);
+	}
+}
+
+/**
+ * hdd_nud_stats_info() - display wlan NUD stats info
+ * @hdd_adapter: Pointer to hdd adapter
+ *
+ * Return: None
+ */
+static void hdd_nud_stats_info(struct hdd_adapter *adapter)
+{
+	struct netdev_queue *txq;
+	int i = 0;
+
+	hdd_debug("**** NUD STATS: ****");
+	hdd_debug("NUD Probe Tx  : %d",
+		  adapter->nud_tracking.tx_rx_stats.pre_tx_packets);
+	hdd_debug("NUD Probe Ack : %d",
+		  adapter->nud_tracking.tx_rx_stats.pre_tx_acked);
+	hdd_debug("NUD Probe Rx  : %d",
+		  adapter->nud_tracking.tx_rx_stats.pre_rx_packets);
+	hdd_debug("NUD Failure Tx  : %d",
+		  adapter->nud_tracking.tx_rx_stats.post_tx_packets);
+	hdd_debug("NUD Failure Ack : %d",
+		  adapter->nud_tracking.tx_rx_stats.post_tx_acked);
+	hdd_debug("NUD Failure Rx  : %d",
+		  adapter->nud_tracking.tx_rx_stats.post_rx_packets);
+	hdd_debug("NUD Gateway Rx  : %d",
+		  qdf_atomic_read(&adapter
+				  ->nud_tracking.tx_rx_stats.gw_rx_packets));
+
+	hdd_debug("carrier state: %d", netif_carrier_ok(adapter->dev));
+
+	for (i = 0; i < NUM_TX_QUEUES; i++) {
+		txq = netdev_get_tx_queue(adapter->dev, i);
+		hdd_debug("Queue: %d status: %d txq->trans_start: %lu",
+			  i, netif_tx_queue_stopped(txq), txq->trans_start);
+	}
+
+	hdd_debug("Current pause_map value %x", adapter->pause_map);
+}
+
+/**
+ * hdd_nud_capture_stats() - capture wlan NUD stats
+ * @hdd_adapter: Pointer to hdd adapter
+ * @nud_state: NUD state for which stats to capture
+ *
+ * Return: None
+ */
+static void hdd_nud_capture_stats(struct hdd_adapter *adapter,
+				  uint8_t nud_state)
+{
+	switch (nud_state) {
+	case NUD_INCOMPLETE:
+	case NUD_PROBE:
+		adapter->nud_tracking.tx_rx_stats.pre_tx_packets =
+				adapter->stats.tx_packets;
+		adapter->nud_tracking.tx_rx_stats.pre_rx_packets =
+				adapter->stats.rx_packets;
+		adapter->nud_tracking.tx_rx_stats.pre_tx_acked =
+				hdd_txrx_get_tx_ack_count(adapter);
+		break;
+	case NUD_FAILED:
+		adapter->nud_tracking.tx_rx_stats.post_tx_packets =
+				adapter->stats.tx_packets;
+		adapter->nud_tracking.tx_rx_stats.post_rx_packets =
+				adapter->stats.rx_packets;
+		adapter->nud_tracking.tx_rx_stats.post_tx_acked =
+				hdd_txrx_get_tx_ack_count(adapter);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * hdd_nud_honour_failure() - check if nud failure to be honored
+ * @hdd_adapter: Pointer to hdd_adapter
+ *
+ * Return: true if nud failure to be honored, else false.
+ */
+static bool hdd_nud_honour_failure(struct hdd_adapter *adapter)
+{
+	uint32_t tx_transmitted;
+	uint32_t tx_acked;
+	uint32_t gw_rx_pkt;
+
+	tx_transmitted = adapter->nud_tracking.tx_rx_stats.post_tx_packets -
+		adapter->nud_tracking.tx_rx_stats.pre_tx_packets;
+	tx_acked = adapter->nud_tracking.tx_rx_stats.post_tx_acked -
+		adapter->nud_tracking.tx_rx_stats.pre_tx_acked;
+	gw_rx_pkt = qdf_atomic_read(&adapter
+			->nud_tracking.tx_rx_stats.gw_rx_packets);
+
+	if (!tx_transmitted || !tx_acked || !gw_rx_pkt) {
+		hdd_debug("NUD_FAILURE_HONORED [mac:"QDF_MAC_ADDR_FMT"]",
+			  QDF_MAC_ADDR_REF(adapter->nud_tracking.gw_mac_addr.bytes));
+		hdd_nud_stats_info(adapter);
+		return true;
+	}
+	hdd_debug("NUD_FAILURE_NOT_HONORED [mac:"QDF_MAC_ADDR_FMT"]",
+		  QDF_MAC_ADDR_REF(adapter->nud_tracking.gw_mac_addr.bytes));
+	hdd_nud_stats_info(adapter);
+	return false;
+}
+
+/**
+ * hdd_nud_set_tracking() - set the NUD tracking info
+ * @hdd_adapter: Pointer to hdd_adapter
+ * @nud_state: Current NUD state to set
+ * @capture_enabled: GW Rx packet to be capture or not
+ *
+ * Return: None
+ */
+static void hdd_nud_set_tracking(struct hdd_adapter *adapter,
+				 uint8_t nud_state,
+				 bool capture_enabled)
+{
+	adapter->nud_tracking.curr_state = nud_state;
+	qdf_atomic_set(&adapter->nud_tracking.tx_rx_stats.gw_rx_packets, 0);
+	adapter->nud_tracking.is_gw_rx_pkt_track_enabled = capture_enabled;
+}
 
 static void
 hdd_handle_nud_fail_sta(struct hdd_context *hdd_ctx,
@@ -69,6 +273,24 @@ hdd_handle_nud_fail_non_sta(struct hdd_adapter *adapter)
 				     false);
 }
 
+#ifdef WLAN_NUD_TRACKING
+static bool
+hdd_is_roam_after_nud_enabled(struct hdd_config *config)
+{
+	if (config->enable_nud_tracking == ROAM_AFTER_NUD_FAIL ||
+	    config->enable_nud_tracking == DISCONNECT_AFTER_ROAM_FAIL)
+		return true;
+
+	return false;
+}
+#else
+static bool
+hdd_is_roam_after_nud_enabled(struct hdd_config *config)
+{
+	return false;
+}
+#endif
+
 /**
  * __hdd_nud_failure_work() - work for nud event
  * @adapter: Pointer to hdd_adapter
@@ -96,6 +318,10 @@ static void __hdd_nud_failure_work(struct hdd_adapter *adapter)
 		hdd_debug("Not in Connected State");
 		return;
 	}
+	if (adapter->nud_tracking.curr_state != NUD_FAILED) {
+		hdd_debug("Not in NUD_FAILED state");
+		return;
+	}
 
 	if (hdd_ctx->hdd_wlan_suspended) {
 		hdd_debug("wlan is suspended, ignore NUD failure event");
@@ -146,3 +372,228 @@ void hdd_nud_failure_work(hdd_cb_handle context, uint8_t vdev_id)
 
 	osif_vdev_sync_op_stop(vdev_sync);
 }
+
+void hdd_nud_init_tracking(struct hdd_adapter *adapter)
+{
+	struct hdd_context *hdd_ctx;
+
+	hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+
+	if (adapter->device_mode == QDF_STA_MODE &&
+	    hdd_ctx->config->enable_nud_tracking) {
+		hdd_debug("Initialize the NUD tracking");
+
+		qdf_zero_macaddr(&adapter->nud_tracking.gw_mac_addr);
+		qdf_mem_zero(&adapter->nud_tracking.tx_rx_stats,
+			     sizeof(struct hdd_nud_tx_rx_stats));
+
+		adapter->nud_tracking.curr_state = NUD_NONE;
+		adapter->nud_tracking.ignore_nud_tracking = false;
+		adapter->nud_tracking.is_gw_updated = false;
+
+		qdf_atomic_init(&adapter
+				->nud_tracking.tx_rx_stats.gw_rx_packets);
+		qdf_create_work(0, &adapter->nud_tracking.nud_event_work,
+				hdd_nud_failure_work, adapter);
+	}
+}
+
+/**
+ * hdd_nud_process_failure_event() - processing NUD_FAILED event
+ * @hdd_adapter: Pointer to hdd_adapter
+ *
+ * Return: None
+ */
+static void hdd_nud_process_failure_event(struct hdd_adapter *adapter)
+{
+	uint8_t curr_state;
+
+	curr_state = adapter->nud_tracking.curr_state;
+	if (curr_state == NUD_PROBE || curr_state == NUD_INCOMPLETE) {
+		hdd_nud_capture_stats(adapter, NUD_FAILED);
+		if (hdd_nud_honour_failure(adapter)) {
+			adapter->nud_tracking.curr_state = NUD_FAILED;
+			qdf_sched_work(0, &adapter
+					->nud_tracking.nud_event_work);
+		} else {
+			hdd_debug("NUD_START [0x%x]", NUD_INCOMPLETE);
+			hdd_nud_capture_stats(adapter, NUD_INCOMPLETE);
+			hdd_nud_set_tracking(adapter, NUD_INCOMPLETE, true);
+		}
+	} else {
+		hdd_debug("NUD FAILED -> Current State [0x%x]", curr_state);
+	}
+}
+
+/**
+ * hdd_nud_filter_netevent() - filter netevents for STA interface
+ * @neighbour: Pointer to neighbour
+ *
+ * Return: None
+ */
+static void hdd_nud_filter_netevent(struct neighbour *neigh)
+{
+	int status;
+	struct hdd_adapter *adapter;
+	struct hdd_context *hdd_ctx;
+	const struct net_device *netdev = neigh->dev;
+
+	hdd_enter();
+
+	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
+	status = wlan_hdd_validate_context(hdd_ctx);
+	if (0 != status)
+		return;
+
+	adapter = hdd_get_adapter_by_macaddr(hdd_ctx, netdev->dev_addr);
+
+	if (!adapter)
+		return;
+
+	status = hdd_validate_adapter(adapter);
+	if (status)
+		return;
+
+	if (adapter->nud_tracking.ignore_nud_tracking) {
+		hdd_debug("NUD Tracking is Disabled");
+		return;
+	}
+
+	if (!adapter->nud_tracking.is_gw_updated)
+		return;
+
+	if (adapter->device_mode != QDF_STA_MODE)
+		return;
+
+	if (!hdd_cm_is_vdev_associated(adapter)) {
+		hdd_debug("Not in Connected State");
+		return;
+	}
+
+	if (!qdf_is_macaddr_equal(&adapter->nud_tracking.gw_mac_addr,
+				  (struct qdf_mac_addr *)&neigh->ha[0]))
+		return;
+
+	if (hdd_ctx->hdd_wlan_suspended) {
+		hdd_debug("wlan is suspended, ignore NUD event");
+		return;
+	}
+
+	switch (neigh->nud_state) {
+	case NUD_PROBE:
+	case NUD_INCOMPLETE:
+		hdd_debug("NUD_START [0x%x]", neigh->nud_state);
+		hdd_nud_capture_stats(adapter, neigh->nud_state);
+		hdd_nud_set_tracking(adapter,
+				     neigh->nud_state,
+				     true);
+		break;
+
+	case NUD_REACHABLE:
+		hdd_debug("NUD_REACHABLE [0x%x]", neigh->nud_state);
+		hdd_nud_set_tracking(adapter, NUD_NONE, false);
+		break;
+
+	case NUD_FAILED:
+		hdd_debug("NUD_FAILED [0x%x]", neigh->nud_state);
+		/*
+		 * This condition is to handle the scenario where NUD_FAILED
+		 * events are received without any NUD_PROBE/INCOMPLETE event
+		 * post roaming. Nud state is set to NONE as part of roaming.
+		 * NUD_FAILED is not honored when the curr state is any state
+		 * other than NUD_PROBE/INCOMPLETE so post roaming, nud state
+		 * is moved to NUD_PROBE to honor future NUD_FAILED events.
+		 */
+		if (adapter->nud_tracking.curr_state == NUD_NONE) {
+			hdd_nud_capture_stats(adapter, NUD_PROBE);
+			hdd_nud_set_tracking(adapter, NUD_PROBE, true);
+		} else {
+			hdd_nud_process_failure_event(adapter);
+		}
+		break;
+	default:
+		hdd_debug("NUD Event For Other State [0x%x]",
+			  neigh->nud_state);
+		break;
+	}
+	hdd_exit();
+}
+
+/**
+ * __hdd_nud_netevent_cb() - netevent callback
+ * @neighbor: neighbor used in the nud event
+ *
+ * Return: None
+ */
+static void __hdd_nud_netevent_cb(struct neighbour *neighbor)
+{
+	hdd_enter();
+	hdd_nud_filter_netevent(neighbor);
+	hdd_exit();
+}
+
+/**
+ * hdd_nud_netevent_cb() - netevent callback
+ * @nb: Pointer to notifier block
+ * @event: Net Event triggered
+ * @data: Pointer to neighbour struct
+ *
+ * Callback for netevent
+ *
+ * Return: 0 on success
+ */
+static int hdd_nud_netevent_cb(struct notifier_block *nb, unsigned long event,
+			       void *data)
+{
+	struct neighbour *neighbor = data;
+	struct osif_vdev_sync *vdev_sync;
+	int errno;
+
+	errno = osif_vdev_sync_op_start(neighbor->dev, &vdev_sync);
+	if (errno)
+		return errno;
+
+	switch (event) {
+	case NETEVENT_NEIGH_UPDATE:
+		__hdd_nud_netevent_cb(neighbor);
+		break;
+	default:
+		break;
+	}
+
+	osif_vdev_sync_op_stop(vdev_sync);
+
+	return 0;
+}
+
+static struct notifier_block wlan_netevent_nb = {
+	.notifier_call = hdd_nud_netevent_cb
+};
+
+int hdd_nud_register_netevent_notifier(struct hdd_context *hdd_ctx)
+{
+	int ret = 0;
+
+	if (hdd_ctx->config->enable_nud_tracking) {
+		ret = register_netevent_notifier(&wlan_netevent_nb);
+		if (!ret)
+			hdd_debug("Registered netevent notifier");
+	}
+	return ret;
+}
+
+void hdd_nud_unregister_netevent_notifier(struct hdd_context *hdd_ctx)
+{
+	int ret;
+
+	if (hdd_ctx->config->enable_nud_tracking) {
+		ret = unregister_netevent_notifier(&wlan_netevent_nb);
+		if (!ret)
+			hdd_debug("Unregistered netevent notifier");
+	}
+}
+
+void hdd_nud_indicate_roam(struct hdd_adapter *adapter)
+{
+	hdd_nud_set_tracking(adapter, NUD_NONE, false);
+}

+ 180 - 0
core/hdd/src/wlan_hdd_nud_tracking.h

@@ -26,6 +26,138 @@
 
 #ifdef WLAN_NUD_TRACKING
 
+/**
+ * struct hdd_nud_tx_rx_stats - Capture tx and rx count during NUD tracking
+ * @pre_tx_packets: Number of tx packets at NUD_PROBE event
+ * @pre_tx_acked: Number of tx acked at NUD_PROBE event
+ * @pre_rx_packets: Number of rx packets at NUD_PROBE event
+ * @post_tx_packets: Number of tx packets at NUD_FAILED event
+ * @post_tx_acked: Number of tx acked at NUD_FAILED event
+ * @post_rx_packets: Number of rx packets at NUD_FAILED event
+ * @gw_rx_packets: Number of rx packets from the registered gateway
+ *                 during the period from NUD_PROBE to NUD_FAILED
+ */
+struct hdd_nud_tx_rx_stats {
+	uint32_t pre_tx_packets;
+	uint32_t pre_tx_acked;
+	uint32_t pre_rx_packets;
+	uint32_t post_tx_packets;
+	uint32_t post_tx_acked;
+	uint32_t post_rx_packets;
+	qdf_atomic_t gw_rx_packets;
+};
+
+ /**
+  * struct hdd_nud_tracking_info - structure to keep track for NUD information
+  * @curr_state: current state of NUD machine
+  * @ignore_nud_tracking: true if nud tracking is not required else false
+  * @tx_rx_stats: Number of packets during NUD tracking
+  * @gw_mac_addr: gateway mac address for which NUD events are tracked
+  * @nud_event_work: work to be scheduled during NUD_FAILED
+  * @is_gw_rx_pkt_track_enabled: true if rx pkt capturing is enabled for GW,
+  *                              else false
+  * @is_gw_updated: true if GW is updated for NUD Tracking
+  */
+struct hdd_nud_tracking_info {
+	uint8_t curr_state;
+	bool ignore_nud_tracking;
+	struct hdd_nud_tx_rx_stats tx_rx_stats;
+	struct qdf_mac_addr gw_mac_addr;
+	qdf_work_t nud_event_work;
+	bool is_gw_rx_pkt_track_enabled;
+	bool is_gw_updated;
+};
+
+/**
+ * hdd_nud_set_gateway_addr() - set gateway mac address
+ * @adapter: Pointer to adapter
+ * @gw_mac_addr: mac address to be set
+ *
+ * Return: none
+ */
+void hdd_nud_set_gateway_addr(struct hdd_adapter *adapter,
+			      struct qdf_mac_addr gw_mac_addr);
+
+/**
+ * hdd_nud_incr_gw_rx_pkt_cnt() - Increment rx count for gateway
+ * @adapter: Pointer to adapter
+ * @mac_addr: Gateway mac address
+ *
+ * Return: None
+ */
+void hdd_nud_incr_gw_rx_pkt_cnt(struct hdd_adapter *adapter,
+				struct qdf_mac_addr *mac_addr);
+
+/**
+ * hdd_nud_init_tracking() - initialize NUD tracking
+ * @hdd_adapter: Pointer to hdd adapter
+ *
+ * Return: None
+ */
+void hdd_nud_init_tracking(struct hdd_adapter *adapter);
+
+/**
+ * hdd_nud_reset_tracking() - reset NUD tracking
+ * @hdd_adapter: Pointer to hdd adapter
+ *
+ * Return: None
+ */
+void hdd_nud_reset_tracking(struct hdd_adapter *adapter);
+
+/**
+ * hdd_nud_deinit_tracking() - deinitialize NUD tracking
+ * @hdd_adapter: Pointer to hdd adapter
+ *
+ * Return: None
+ */
+void hdd_nud_deinit_tracking(struct hdd_adapter *adapter);
+
+/**
+ * hdd_nud_ignore_tracking() - set/reset nud trackig status
+ * @data: Pointer to hdd_adapter
+ * @ignoring: Ignore status to set
+ *
+ * Return: None
+ */
+void hdd_nud_ignore_tracking(struct hdd_adapter *adapter,
+			     bool ignoring);
+
+/**
+ * hdd_nud_register_netevent_notifier - Register netevent notifiers.
+ * @hdd_ctx: HDD context
+ *
+ * Register netevent notifiers.
+ *
+ * Return: 0 on success and errno on failure
+ */
+int hdd_nud_register_netevent_notifier(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_nud_unregister_netevent_notifier - Unregister netevent notifiers.
+ * @hdd_ctx: HDD context
+ *
+ * Unregister netevent notifiers.
+ *
+ * Return: None
+ */
+void hdd_nud_unregister_netevent_notifier(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_nud_flush_work() - flush pending nud work
+ * @adapter: Pointer to hdd adapter
+ *
+ * Return: None
+ */
+void hdd_nud_flush_work(struct hdd_adapter *adapter);
+
+/**
+ * hdd_nud_indicate_roam() - reset NUD when roaming happens
+ * @adapter: Pointer to hdd adapter
+ *
+ * Return: None
+ */
+void hdd_nud_indicate_roam(struct hdd_adapter *adapter);
+
 /**
  * hdd_nud_failure_work() - Handle NUD failuire work
  * @context: HDD context pointer
@@ -35,6 +167,54 @@
  */
 void hdd_nud_failure_work(hdd_cb_handle context, uint8_t vdev_id);
 #else
+static inline void hdd_nud_set_gateway_addr(struct hdd_adapter *adapter,
+					    struct qdf_mac_addr gw_mac_addr)
+{
+}
+
+static inline void hdd_nud_incr_gw_rx_pkt_cnt(struct hdd_adapter *adapter,
+					      struct qdf_mac_addr *mac_addr)
+{
+}
+
+static inline void hdd_nud_init_tracking(struct hdd_adapter *adapter)
+{
+}
+
+static inline void hdd_nud_reset_tracking(struct hdd_adapter *adapter)
+{
+}
+
+static inline void hdd_nud_deinit_tracking(struct hdd_adapter *adapter)
+{
+}
+
+static inline void hdd_nud_ignore_tracking(struct hdd_adapter *adapter,
+					   bool status)
+{
+}
+
+static inline int
+hdd_nud_register_netevent_notifier(struct hdd_context *hdd_ctx)
+{
+	return 0;
+}
+
+static inline void
+hdd_nud_unregister_netevent_notifier(struct hdd_context *hdd_ctx)
+{
+}
+
+static inline void
+hdd_nud_flush_work(struct hdd_adapter *adapter)
+{
+}
+
+static inline void
+hdd_nud_indicate_roam(struct hdd_adapter *adapter)
+{
+}
+
 static inline void
 hdd_nud_failure_work(hdd_cb_handle context, uint8_t vdev_id)
 {

+ 146 - 0
core/hdd/src/wlan_hdd_periodic_sta_stats.c

@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC : wlan_hdd_periodic_sta_stats.c
+ *
+ * WLAN Host Device Driver periodic STA statistics related implementation
+ *
+ */
+
+#include "wlan_hdd_main.h"
+#include "cfg_ucfg_api.h"
+#include "wlan_hdd_periodic_sta_stats.h"
+
+void hdd_periodic_sta_stats_config(struct hdd_config *config,
+				   struct wlan_objmgr_psoc *psoc)
+{
+	config->periodic_stats_timer_interval =
+		cfg_get(psoc, CFG_PERIODIC_STATS_TIMER_INTERVAL);
+	config->periodic_stats_timer_duration =
+		cfg_get(psoc, CFG_PERIODIC_STATS_TIMER_DURATION);
+}
+
+void hdd_periodic_sta_stats_init(struct hdd_adapter *adapter)
+{
+	adapter->is_sta_periodic_stats_enabled = false;
+}
+
+void hdd_periodic_sta_stats_display(struct hdd_context *hdd_ctx)
+{
+	struct hdd_adapter *adapter, *next_adapter = NULL;
+	struct hdd_stats sta_stats;
+	struct hdd_config *hdd_cfg;
+	char *dev_name;
+	bool should_log;
+	wlan_net_dev_ref_dbgid dbgid = NET_DEV_HOLD_PERIODIC_STA_STATS_DISPLAY;
+
+	if (!hdd_ctx)
+		return;
+
+	hdd_for_each_adapter_dev_held_safe(hdd_ctx, adapter, next_adapter,
+					   dbgid) {
+		should_log = false;
+
+		if (adapter->device_mode != QDF_STA_MODE) {
+			hdd_adapter_dev_put_debug(adapter, dbgid);
+			continue;
+		}
+
+		hdd_cfg = hdd_ctx->config;
+		qdf_mutex_acquire(&adapter->sta_periodic_stats_lock);
+
+		if (!adapter->is_sta_periodic_stats_enabled) {
+			qdf_mutex_release(&adapter->sta_periodic_stats_lock);
+			hdd_adapter_dev_put_debug(adapter, dbgid);
+			continue;
+		}
+
+		adapter->periodic_stats_timer_counter++;
+		if ((adapter->periodic_stats_timer_counter *
+		    GET_BW_COMPUTE_INTV(hdd_cfg)) >=
+				hdd_cfg->periodic_stats_timer_interval) {
+			should_log = true;
+
+			adapter->periodic_stats_timer_count--;
+			if (adapter->periodic_stats_timer_count == 0)
+				adapter->is_sta_periodic_stats_enabled = false;
+			adapter->periodic_stats_timer_counter = 0;
+		}
+		qdf_mutex_release(&adapter->sta_periodic_stats_lock);
+
+		if (should_log) {
+			dev_name = WLAN_HDD_GET_DEV_NAME(adapter);
+			sta_stats = adapter->hdd_stats;
+			hdd_nofl_info("%s: Tx ARP requests: %d", dev_name,
+				      sta_stats.hdd_arp_stats.tx_arp_req_count);
+			hdd_nofl_info("%s: Rx ARP responses: %d", dev_name,
+				      sta_stats.hdd_arp_stats.rx_arp_rsp_count);
+			hdd_nofl_info("%s: Tx DNS requests: %d", dev_name,
+				      sta_stats.hdd_dns_stats.tx_dns_req_count);
+			hdd_nofl_info("%s: Rx DNS responses: %d", dev_name,
+				      sta_stats.hdd_dns_stats.rx_dns_rsp_count);
+		}
+		hdd_adapter_dev_put_debug(adapter, dbgid);
+	}
+}
+
+void hdd_periodic_sta_stats_start(struct hdd_adapter *adapter)
+{
+	struct hdd_config *hdd_cfg = adapter->hdd_ctx->config;
+
+	if ((adapter->device_mode == QDF_STA_MODE) &&
+	    (hdd_cfg->periodic_stats_timer_interval > 0)) {
+		qdf_mutex_acquire(&adapter->sta_periodic_stats_lock);
+
+		adapter->periodic_stats_timer_count =
+			hdd_cfg->periodic_stats_timer_duration /
+			hdd_cfg->periodic_stats_timer_interval;
+		adapter->periodic_stats_timer_counter = 0;
+		if (adapter->periodic_stats_timer_count > 0)
+			adapter->is_sta_periodic_stats_enabled = true;
+
+		qdf_mutex_release(&adapter->sta_periodic_stats_lock);
+	}
+}
+
+void hdd_periodic_sta_stats_stop(struct hdd_adapter *adapter)
+{
+	struct hdd_config *hdd_cfg = adapter->hdd_ctx->config;
+
+	if ((adapter->device_mode == QDF_STA_MODE) &&
+	    (hdd_cfg->periodic_stats_timer_interval > 0)) {
+		qdf_mutex_acquire(&adapter->sta_periodic_stats_lock);
+
+		/* Stop the periodic ARP and DNS stats timer */
+		adapter->periodic_stats_timer_count = 0;
+		adapter->is_sta_periodic_stats_enabled = false;
+
+		qdf_mutex_release(&adapter->sta_periodic_stats_lock);
+	}
+}
+
+void hdd_periodic_sta_stats_mutex_create(struct hdd_adapter *adapter)
+{
+	qdf_mutex_create(&adapter->sta_periodic_stats_lock);
+}
+
+void hdd_periodic_sta_stats_mutex_destroy(struct hdd_adapter *adapter)
+{
+	qdf_mutex_destroy(&adapter->sta_periodic_stats_lock);
+}
+

+ 16 - 0
core/hdd/src/wlan_hdd_power.c

@@ -1857,6 +1857,20 @@ QDF_STATUS hdd_wlan_shutdown(void)
 		}
 	}
 
+	/*
+	 * After SSR, FW clear its txrx stats. In host,
+	 * as adapter is intact so those counts are still
+	 * available. Now if agains Set stats command comes,
+	 * then host will increment its counts start from its
+	 * last saved value, i.e., count before SSR, and FW will
+	 * increment its count from 0. This will finally sends a
+	 * mismatch of packet counts b/w host and FW to framework
+	 * that will create ambiquity. Therfore, Resetting the host
+	 * counts here so that after SSR both FW and host start
+	 * increment their counts from 0.
+	 */
+	hdd_reset_all_adapters_connectivity_stats(hdd_ctx);
+
 	hdd_reset_all_adapters(hdd_ctx);
 
 	ucfg_ipa_uc_ssr_cleanup(hdd_ctx->pdev);
@@ -2053,6 +2067,8 @@ QDF_STATUS hdd_wlan_re_init(void)
 	if (!adapter)
 		hdd_err("Failed to get adapter");
 
+	hdd_dp_trace_init(hdd_ctx->config);
+
 	ret = hdd_wlan_start_modules(hdd_ctx, true);
 	if (ret) {
 		hdd_err("Failed to start wlan after error");

+ 68 - 0
core/hdd/src/wlan_hdd_rx_monitor.c

@@ -28,6 +28,74 @@
 #include "ol_txrx.h"
 #include "cdp_txrx_mon.h"
 
+void hdd_rx_monitor_callback(ol_osif_vdev_handle context,
+				qdf_nbuf_t rxbuf,
+				void *rx_status)
+{
+	struct hdd_adapter *adapter;
+	int rxstat;
+	struct sk_buff *skb;
+	struct sk_buff *skb_next;
+	unsigned int cpu_index;
+	struct hdd_tx_rx_stats *stats;
+
+	qdf_assert(context);
+	qdf_assert(rxbuf);
+
+	adapter = (struct hdd_adapter *)context;
+	if (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) {
+		QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
+			"invalid adapter %pK", adapter);
+		return;
+	}
+
+	cpu_index = wlan_hdd_get_cpu();
+	stats = &adapter->hdd_stats.tx_rx_stats;
+
+	/* walk the chain until all are processed */
+	skb = (struct sk_buff *)rxbuf;
+	while (skb) {
+		skb_next = skb->next;
+		skb->dev = adapter->dev;
+
+		++stats->per_cpu[cpu_index].rx_packets;
+		++adapter->stats.rx_packets;
+		adapter->stats.rx_bytes += skb->len;
+
+		/* Remove SKB from internal tracking table before submitting
+		 * it to stack
+		 */
+		qdf_net_buf_debug_release_skb(skb);
+
+		/*
+		 * If this is not a last packet on the chain
+		 * Just put packet into backlog queue, not scheduling RX sirq
+		 */
+		if (skb->next) {
+			rxstat = netif_rx(skb);
+		} else {
+			/*
+			 * This is the last packet on the chain
+			 * Scheduling rx sirq
+			 */
+			rxstat = netif_rx_ni(skb);
+		}
+
+		if (NET_RX_SUCCESS == rxstat)
+			++stats->per_cpu[cpu_index].rx_delivered;
+		else
+			++stats->per_cpu[cpu_index].rx_refused;
+
+		skb = skb_next;
+	}
+}
+
+void hdd_monitor_set_rx_monitor_cb(struct ol_txrx_ops *txrx,
+				ol_txrx_rx_mon_fp rx_monitor_cb)
+{
+	txrx->rx.mon = rx_monitor_cb;
+}
+
 int hdd_enable_monitor_mode(struct net_device *dev)
 {
 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);

+ 36 - 0
core/hdd/src/wlan_hdd_rx_monitor.h

@@ -23,6 +23,37 @@
 struct ol_txrx_ops;
 
 #ifdef FEATURE_MONITOR_MODE_SUPPORT
+/**
+ * hdd_rx_monitor_callback(): Callback function for receive monitor mode
+ * @vdev: Handle to vdev object
+ * @mpdu: pointer to mpdu to be delivered to os
+ * @rx_status: receive status
+ *
+ * Returns: None
+ */
+void hdd_monitor_set_rx_monitor_cb(struct ol_txrx_ops *txrx,
+				ol_txrx_rx_mon_fp rx_monitor_cb);
+
+/**
+ * hdd_monitor_set_rx_monitor_cb(): Set rx monitor mode callback function
+ * @txrx: pointer to txrx ops
+ * @rx_monitor_cb: pointer to callback function
+ *
+ * Returns: None
+ */
+void hdd_rx_monitor_callback(ol_osif_vdev_handle vdev,
+				qdf_nbuf_t mpdu,
+				void *rx_status);
+/**
+ * hdd_enable_monitor_mode() - Enable monitor mode
+ * @dev: Pointer to the net_device structure
+ *
+ * This function invokes cdp interface API to enable
+ * monitor mode configuration on the hardware. In this
+ * case sends HTT messages to FW to setup hardware rings
+ *
+ * Return: 0 for success; non-zero for failure
+ */
 int hdd_enable_monitor_mode(struct net_device *dev);
 
 /**
@@ -36,6 +67,11 @@ int hdd_enable_monitor_mode(struct net_device *dev);
  */
 int hdd_disable_monitor_mode(void);
 #else
+static inline void hdd_monitor_set_rx_monitor_cb(struct ol_txrx_ops *txrx,
+					ol_txrx_rx_mon_fp rx_monitor_cb){ }
+static inline void hdd_rx_monitor_callback(ol_osif_vdev_handle vdev,
+				qdf_nbuf_t mpdu,
+				void *rx_status){ }
 static inline int hdd_enable_monitor_mode(struct net_device *dev)
 {
 	return 0;

+ 723 - 0
core/hdd/src/wlan_hdd_softap_tx_rx.c

@@ -33,6 +33,7 @@
 #include <qdf_types.h>
 #include <net/ieee80211_radiotap.h>
 #include <cds_sched.h>
+#include <wlan_hdd_napi.h>
 #include <cdp_txrx_cmn.h>
 #include <cdp_txrx_peer_ops.h>
 #include <cds_utils.h>
@@ -75,6 +76,44 @@ struct l2_update_frame {
 } qdf_packed;
 #endif
 
+/* Function definitions and documenation */
+#ifdef QCA_HDD_SAP_DUMP_SK_BUFF
+/**
+ * hdd_softap_dump_sk_buff() - Dump an skb
+ * @skb: skb to dump
+ *
+ * Return: None
+ */
+static void hdd_softap_dump_sk_buff(struct sk_buff *skb)
+{
+	QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+		  "%s: head = %pK ", __func__, skb->head);
+	QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_INFO,
+		  "%s: tail = %pK ", __func__, skb->tail);
+	QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+		  "%s: end = %pK ", __func__, skb->end);
+	QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+		  "%s: len = %d ", __func__, skb->len);
+	QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+		  "%s: data_len = %d ", __func__, skb->data_len);
+	QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+		  "%s: mac_len = %d", __func__, skb->mac_len);
+
+	QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+		  "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x ", skb->data[0],
+		  skb->data[1], skb->data[2], skb->data[3], skb->data[4],
+		  skb->data[5], skb->data[6], skb->data[7]);
+	QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+		  "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", skb->data[8],
+		  skb->data[9], skb->data[10], skb->data[11], skb->data[12],
+		  skb->data[13], skb->data[14], skb->data[15]);
+}
+#else
+static void hdd_softap_dump_sk_buff(struct sk_buff *skb)
+{
+}
+#endif
+
 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
 void hdd_softap_tx_resume_timer_expired_handler(void *adapter_context)
 {
@@ -147,8 +186,159 @@ void hdd_softap_tx_resume_cb(void *adapter_context, bool tx_resume)
 	}
 	hdd_softap_tx_resume_false(adapter, tx_resume);
 }
+
+static inline struct sk_buff *hdd_skb_orphan(struct hdd_adapter *adapter,
+		struct sk_buff *skb)
+{
+	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+	int need_orphan = 0;
+	int cpu;
+
+	if (adapter->tx_flow_low_watermark > 0) {
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
+		/*
+		 * The TCP TX throttling logic is changed a little after
+		 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
+		 * which will throttle the TCP packets to the host driver.
+		 * The TCP UP LINK throughput will drop heavily. In order to
+		 * fix this issue, need to orphan the socket buffer asap, which
+		 * will call skb's destructor to notify the TCP stack that the
+		 * SKB buffer is unowned. And then the TCP stack will pump more
+		 * packets to host driver.
+		 *
+		 * The TX packets might be dropped for UDP case in the iperf
+		 * testing. So need to be protected by follow control.
+		 */
+		need_orphan = 1;
+#else
+		if (hdd_ctx->config->tx_orphan_enable)
+			need_orphan = 1;
+#endif
+	} else if (hdd_ctx->config->tx_orphan_enable) {
+		if (qdf_nbuf_is_ipv4_tcp_pkt(skb) ||
+		    qdf_nbuf_is_ipv6_tcp_pkt(skb))
+			need_orphan = 1;
+	}
+
+	if (need_orphan) {
+		skb_orphan(skb);
+		cpu = qdf_get_smp_processor_id();
+		++adapter->hdd_stats.tx_rx_stats.per_cpu[cpu].tx_orphaned;
+	} else
+		skb = skb_unshare(skb, GFP_ATOMIC);
+
+	return skb;
+}
 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 
+#define IEEE8021X_AUTH_TYPE_EAP 0
+#define EAP_CODE_OFFSET 18
+#define EAP_CODE_FAILURE 4
+
+/* Wait EAP Failure frame timeout in (MS) */
+#define EAP_FRM_TIME_OUT 80
+
+/**
+ * hdd_softap_inspect_tx_eap_pkt() - Inspect eap pkt tx/tx-completion
+ * @adapter: pointer to hdd adapter
+ * @skb: sk_buff
+ * @tx_comp: tx sending or tx completion
+ *
+ * Inspect the EAP-Failure pkt tx sending and tx completion.
+ *
+ * Return: void
+ */
+static void hdd_softap_inspect_tx_eap_pkt(struct hdd_adapter *adapter,
+					  struct sk_buff *skb,
+					  bool tx_comp)
+{
+	struct qdf_mac_addr *mac_addr;
+	uint8_t *data;
+	uint8_t auth_type, eap_code;
+	struct hdd_station_info *sta_info;
+	struct hdd_hostapd_state *hapd_state;
+
+	if (qdf_likely(QDF_NBUF_CB_GET_PACKET_TYPE(skb) !=
+	    QDF_NBUF_CB_PACKET_TYPE_EAPOL) || skb->len < (EAP_CODE_OFFSET + 1))
+		return;
+
+	if (cds_is_driver_recovering() || cds_is_driver_in_bad_state() ||
+	    cds_is_load_or_unload_in_progress()) {
+		hdd_debug("Recovery/(Un)load in Progress. Ignore!!!");
+		return;
+	}
+	if (adapter->device_mode != QDF_P2P_GO_MODE)
+		return;
+	hapd_state = WLAN_HDD_GET_HOSTAP_STATE_PTR(adapter);
+	if (!hapd_state || hapd_state->bss_state != BSS_START) {
+		hdd_debug("Hostapd State is not START");
+		return;
+	}
+	data = skb->data;
+	auth_type = *(uint8_t *)(data + EAPOL_PACKET_TYPE_OFFSET);
+	if (auth_type != IEEE8021X_AUTH_TYPE_EAP)
+		return;
+	eap_code = *(uint8_t *)(data + EAP_CODE_OFFSET);
+	if (eap_code != EAP_CODE_FAILURE)
+		return;
+	mac_addr = (struct qdf_mac_addr *)skb->data;
+	sta_info = hdd_get_sta_info_by_mac(&adapter->sta_info_list,
+					   mac_addr->bytes,
+					   STA_INFO_SOFTAP_INSPECT_TX_EAP_PKT);
+	if (!sta_info)
+		return;
+	if (tx_comp) {
+		hdd_debug("eap_failure frm tx done "QDF_MAC_ADDR_FMT,
+			  QDF_MAC_ADDR_REF(mac_addr->bytes));
+		qdf_atomic_clear_bit(PENDING_TYPE_EAP_FAILURE,
+				     &sta_info->pending_eap_frm_type);
+		qdf_event_set(&hapd_state->qdf_sta_eap_frm_done_event);
+	} else {
+		hdd_debug("eap_failure frm tx pending "QDF_MAC_ADDR_FMT,
+			  QDF_MAC_ADDR_REF(mac_addr->bytes));
+		qdf_event_reset(&hapd_state->qdf_sta_eap_frm_done_event);
+		qdf_atomic_set_bit(PENDING_TYPE_EAP_FAILURE,
+				   &sta_info->pending_eap_frm_type);
+		QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) = 1;
+	}
+	hdd_put_sta_info_ref(&adapter->sta_info_list, &sta_info, true,
+			     STA_INFO_SOFTAP_INSPECT_TX_EAP_PKT);
+}
+
+void hdd_softap_check_wait_for_tx_eap_pkt(struct hdd_adapter *adapter,
+					  struct qdf_mac_addr *mac_addr)
+{
+	struct hdd_station_info *sta_info;
+	QDF_STATUS qdf_status;
+	struct hdd_hostapd_state *hapd_state;
+
+	if (adapter->device_mode != QDF_P2P_GO_MODE)
+		return;
+	hapd_state = WLAN_HDD_GET_HOSTAP_STATE_PTR(adapter);
+	if (!hapd_state || hapd_state->bss_state != BSS_START) {
+		hdd_err("Hostapd State is not START");
+		return;
+	}
+	sta_info = hdd_get_sta_info_by_mac(
+				&adapter->sta_info_list,
+				mac_addr->bytes,
+				STA_INFO_SOFTAP_CHECK_WAIT_FOR_TX_EAP_PKT);
+	if (!sta_info)
+		return;
+	if (qdf_atomic_test_bit(PENDING_TYPE_EAP_FAILURE,
+				&sta_info->pending_eap_frm_type)) {
+		hdd_debug("eap_failure frm pending "QDF_MAC_ADDR_FMT,
+			  QDF_MAC_ADDR_REF(mac_addr->bytes));
+		qdf_status = qdf_wait_for_event_completion(
+				&hapd_state->qdf_sta_eap_frm_done_event,
+				EAP_FRM_TIME_OUT);
+		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
+			hdd_debug("eap_failure tx timeout");
+	}
+	hdd_put_sta_info_ref(&adapter->sta_info_list, &sta_info, true,
+			     STA_INFO_SOFTAP_CHECK_WAIT_FOR_TX_EAP_PKT);
+}
+
 #ifndef MDM_PLATFORM
 void hdd_ipa_update_rx_mcbc_stats(struct hdd_adapter *adapter,
 				  struct sk_buff *skb)
@@ -184,6 +374,313 @@ void hdd_ipa_update_rx_mcbc_stats(struct hdd_adapter *adapter,
 }
 #endif
 
+#ifdef SAP_DHCP_FW_IND
+/**
+ * hdd_post_dhcp_ind() - Send DHCP START/STOP indication to FW
+ * @adapter: pointer to hdd adapter
+ * @sta_id: peer station ID
+ * @type: WMA message type
+ *
+ * Return: error number
+ */
+int hdd_post_dhcp_ind(struct hdd_adapter *adapter, uint8_t *mac_addr,
+		      uint16_t type)
+{
+	tAniDHCPInd pmsg;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	hdd_debug("Post DHCP indication,sta_mac=" QDF_MAC_ADDR_FMT
+		  " ,  type=%d", QDF_MAC_ADDR_REF(mac_addr), type);
+
+	if (!adapter) {
+		hdd_err("NULL adapter");
+		return -EINVAL;
+	}
+
+	pmsg.msgType = type;
+	pmsg.msgLen = (uint16_t) sizeof(tAniDHCPInd);
+	pmsg.device_mode = adapter->device_mode;
+	qdf_mem_copy(pmsg.adapterMacAddr.bytes,
+		     adapter->mac_addr.bytes,
+		     QDF_MAC_ADDR_SIZE);
+	qdf_mem_copy(pmsg.peerMacAddr.bytes,
+		     mac_addr,
+		     QDF_MAC_ADDR_SIZE);
+
+	status = wma_process_dhcp_ind(cds_get_context(QDF_MODULE_ID_WMA),
+				      &pmsg);
+	if (!QDF_IS_STATUS_SUCCESS(status)) {
+		QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Post DHCP Ind MSG fail", __func__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+#define DHCP_CLIENT_MAC_ADDR_OFFSET 0x46
+
+/**
+ * hdd_softap_notify_dhcp_ind() - Notify SAP for DHCP indication for tx desc
+ * @context: pointer to HDD context
+ * @netbuf: pointer to OS packet (sk_buff)
+ *
+ * Return: None
+ */
+static void hdd_softap_notify_dhcp_ind(void *context, struct sk_buff *netbuf)
+{
+	struct hdd_ap_ctx *hdd_ap_ctx;
+	uint8_t *dest_mac_addr;
+	struct hdd_adapter *adapter = context;
+
+	if (hdd_validate_adapter(adapter))
+		return;
+
+	hdd_ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(adapter);
+	if (!hdd_ap_ctx) {
+		hdd_err("HDD sap context is NULL");
+		return;
+	}
+
+	dest_mac_addr = netbuf->data + DHCP_CLIENT_MAC_ADDR_OFFSET;
+
+	hdd_post_dhcp_ind(adapter, dest_mac_addr, WMA_DHCP_STOP_IND);
+}
+
+int hdd_softap_inspect_dhcp_packet(struct hdd_adapter *adapter,
+				   struct sk_buff *skb,
+				   enum qdf_proto_dir dir)
+{
+	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
+	struct hdd_station_info *hdd_sta_info;
+	int errno = 0;
+	struct qdf_mac_addr *src_mac;
+
+	if (((adapter->device_mode == QDF_SAP_MODE) ||
+	     (adapter->device_mode == QDF_P2P_GO_MODE)) &&
+	    ((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_DHCP ==
+				QDF_NBUF_CB_GET_PACKET_TYPE(skb)) ||
+	     (dir == QDF_RX && qdf_nbuf_is_ipv4_dhcp_pkt(skb) == true))) {
+
+		src_mac = (struct qdf_mac_addr *)(skb->data +
+						  DHCP_CLIENT_MAC_ADDR_OFFSET);
+
+		subtype = qdf_nbuf_get_dhcp_subtype(skb);
+		hdd_sta_info = hdd_get_sta_info_by_mac(
+					&adapter->sta_info_list,
+					src_mac->bytes,
+					STA_INFO_SOFTAP_INSPECT_DHCP_PACKET);
+		if (!hdd_sta_info) {
+			hdd_debug("Station not found");
+			return -EINVAL;
+		}
+
+		hdd_debug("ENTER: type=%d, phase=%d, nego_status=%d",
+			  subtype,
+			  hdd_sta_info->dhcp_phase,
+			  hdd_sta_info->dhcp_nego_status);
+
+		switch (subtype) {
+		case QDF_PROTO_DHCP_DISCOVER:
+			if (dir != QDF_RX)
+				break;
+			if (hdd_sta_info->dhcp_nego_status == DHCP_NEGO_STOP)
+				errno =	hdd_post_dhcp_ind(
+						adapter,
+						hdd_sta_info->sta_mac.bytes,
+						WMA_DHCP_START_IND);
+			hdd_sta_info->dhcp_phase = DHCP_PHASE_DISCOVER;
+			hdd_sta_info->dhcp_nego_status = DHCP_NEGO_IN_PROGRESS;
+			break;
+		case QDF_PROTO_DHCP_OFFER:
+			hdd_sta_info->dhcp_phase = DHCP_PHASE_OFFER;
+			break;
+		case QDF_PROTO_DHCP_REQUEST:
+			if (dir != QDF_RX)
+				break;
+			if (hdd_sta_info->dhcp_nego_status == DHCP_NEGO_STOP)
+				errno = hdd_post_dhcp_ind(
+						adapter,
+						hdd_sta_info->sta_mac.bytes,
+						WMA_DHCP_START_IND);
+			hdd_sta_info->dhcp_nego_status = DHCP_NEGO_IN_PROGRESS;
+			/* fallthrough */
+		case QDF_PROTO_DHCP_DECLINE:
+			if (dir == QDF_RX)
+				hdd_sta_info->dhcp_phase = DHCP_PHASE_REQUEST;
+			break;
+		case QDF_PROTO_DHCP_ACK:
+		case QDF_PROTO_DHCP_NACK:
+			hdd_sta_info->dhcp_phase = DHCP_PHASE_ACK;
+			if (hdd_sta_info->dhcp_nego_status ==
+				DHCP_NEGO_IN_PROGRESS) {
+				hdd_debug("Setting NOTIFY_COMP Flag");
+				QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb)
+									= 1;
+			}
+			hdd_sta_info->dhcp_nego_status = DHCP_NEGO_STOP;
+			break;
+		default:
+			break;
+		}
+
+		hdd_debug("EXIT: phase=%d, nego_status=%d",
+			  hdd_sta_info->dhcp_phase,
+			  hdd_sta_info->dhcp_nego_status);
+		hdd_put_sta_info_ref(&adapter->sta_info_list, &hdd_sta_info,
+				     true, STA_INFO_SOFTAP_INSPECT_DHCP_PACKET);
+	}
+
+	return errno;
+}
+#else
+static void hdd_softap_notify_dhcp_ind(void *context, struct sk_buff *netbuf)
+{
+}
+#endif /* SAP_DHCP_FW_IND */
+
+#if defined(IPA_OFFLOAD)
+static
+struct sk_buff *hdd_sap_skb_orphan(struct hdd_adapter *adapter,
+				   struct sk_buff *skb)
+{
+	if (!qdf_nbuf_ipa_owned_get(skb)) {
+		skb = hdd_skb_orphan(adapter, skb);
+	} else {
+		/*
+		 * Clear the IPA ownership after check it to avoid ipa_free_skb
+		 * is called when Tx completed for intra-BSS Tx packets
+		 */
+		qdf_nbuf_ipa_owned_clear(skb);
+	}
+	return skb;
+}
+#else
+static inline
+struct sk_buff *hdd_sap_skb_orphan(struct hdd_adapter *adapter,
+				   struct sk_buff *skb)
+{
+	return hdd_skb_orphan(adapter, skb);
+}
+#endif /* IPA_OFFLOAD */
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+static
+void hdd_softap_get_tx_resource(struct hdd_adapter *adapter,
+				struct sk_buff *skb)
+{
+	if (QDF_NBUF_CB_GET_IS_BCAST(skb) || QDF_NBUF_CB_GET_IS_MCAST(skb))
+		hdd_get_tx_resource(adapter, &adapter->mac_addr,
+				    WLAN_SAP_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
+	else
+		hdd_get_tx_resource(adapter, (struct qdf_mac_addr *)skb->data,
+				    WLAN_SAP_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
+}
+#else
+#define hdd_softap_get_tx_resource(adapter, skb)
+#endif
+
+static QDF_STATUS hdd_softap_validate_peer_state(struct hdd_adapter *adapter,
+						 struct sk_buff *skb)
+{
+	struct qdf_mac_addr *dest_mac_addr, *mac_addr;
+	static struct qdf_mac_addr bcast_mac_addr = QDF_MAC_ADDR_BCAST_INIT;
+
+	dest_mac_addr = (struct qdf_mac_addr *)skb->data;
+
+	if (QDF_NBUF_CB_GET_IS_MCAST(skb))
+		mac_addr = &bcast_mac_addr;
+	else
+		mac_addr = dest_mac_addr;
+
+	if (!QDF_NBUF_CB_GET_IS_BCAST(skb) && !QDF_NBUF_CB_GET_IS_MCAST(skb)) {
+		/* for a unicast frame */
+		enum ol_txrx_peer_state peer_state;
+		void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+
+		QDF_BUG(soc);
+		hdd_wds_replace_peer_mac(soc, adapter, mac_addr->bytes);
+		peer_state = cdp_peer_state_get(soc, adapter->vdev_id,
+						mac_addr->bytes);
+
+		if (peer_state == OL_TXRX_PEER_STATE_INVALID) {
+			hdd_sapd_debug_rl("Failed to find right station");
+			return QDF_STATUS_E_FAILURE;
+		}
+
+		if (peer_state != OL_TXRX_PEER_STATE_CONN &&
+		    peer_state != OL_TXRX_PEER_STATE_AUTH) {
+			hdd_sapd_debug_rl("Station not connected yet");
+			return QDF_STATUS_E_FAILURE;
+		}
+
+		if (peer_state == OL_TXRX_PEER_STATE_CONN) {
+			if (ntohs(skb->protocol) != HDD_ETHERTYPE_802_1_X) {
+				hdd_sapd_debug_rl("NON-EAPOL packet in non-Authenticated state");
+				return QDF_STATUS_E_FAILURE;
+			}
+		}
+	}
+	return QDF_STATUS_SUCCESS;
+}
+
+static
+QDF_STATUS hdd_softap_validate_driver_state(struct hdd_adapter *adapter)
+{
+	struct hdd_ap_ctx *ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(adapter);
+
+	if (qdf_unlikely(cds_is_driver_transitioning())) {
+		hdd_err_rl("driver is transitioning, drop pkt");
+		return QDF_STATUS_E_ABORTED;
+	}
+
+	if (qdf_unlikely(adapter->hdd_ctx->hdd_wlan_suspended)) {
+		hdd_err_rl("Device is system suspended, drop pkt");
+		return QDF_STATUS_E_ABORTED;
+	}
+
+	/*
+	 * If the device is operating on a DFS Channel
+	 * then check if SAP is in CAC WAIT state and
+	 * drop the packets. In CAC WAIT state device
+	 * is expected not to transmit any frames.
+	 * SAP starts Tx only after the BSS START is
+	 * done.
+	 */
+	if (qdf_unlikely(ap_ctx->dfs_cac_block_tx)) {
+		hdd_sapd_debug_rl("In CAC WAIT state, drop pkt");
+		return QDF_STATUS_E_ABORTED;
+	}
+
+	if (qdf_unlikely(ap_ctx->hostapd_state.bss_state != BSS_START)) {
+		hdd_sapd_debug_rl("SAP is not in START state (%d). Ignore!!!",
+				  ap_ctx->hostapd_state.bss_state);
+		return QDF_STATUS_E_ABORTED;
+	}
+
+	if (qdf_unlikely(!adapter->tx_fn)) {
+		hdd_sapd_debug_rl("TX function not registered by the data path");
+		return QDF_STATUS_E_ABORTED;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+static void hdd_softap_config_tx_pkt_tracing(struct hdd_adapter *adapter,
+					     struct sk_buff *skb)
+{
+	if (hdd_is_current_high_throughput(adapter->hdd_ctx))
+		return;
+
+	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
+	QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
+	qdf_dp_trace_set_track(skb, QDF_TX);
+	DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
+			     QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(skb),
+			     sizeof(qdf_nbuf_data(skb)),
+			     QDF_TX));
+}
+
 /**
  * __hdd_softap_hard_start_xmit() - Transmit a frame
  * @skb: pointer to OS packet (sk_buff)
@@ -310,6 +807,22 @@ void hdd_softap_tx_timeout(struct net_device *net_dev)
 	osif_vdev_sync_op_stop(vdev_sync);
 }
 
+void hdd_softap_init_tx_rx(struct hdd_adapter *adapter)
+{
+	qdf_mem_zero(&adapter->stats, sizeof(struct net_device_stats));
+}
+
+QDF_STATUS hdd_softap_deinit_tx_rx(struct hdd_adapter *adapter)
+{
+	QDF_BUG(adapter);
+	if (!adapter)
+		return QDF_STATUS_E_FAILURE;
+
+	adapter->tx_fn = NULL;
+
+	return QDF_STATUS_SUCCESS;
+}
+
 static void
 hdd_reset_sta_info_during_reattach(struct hdd_station_info *sta_info)
 {
@@ -446,6 +959,216 @@ QDF_STATUS hdd_softap_init_tx_rx_sta(struct hdd_adapter *adapter,
 	return status;
 }
 
+/**
+ * hdd_softap_tsf_timestamp_rx() - time stamp Rx netbuf
+ * @context: pointer to HDD context
+ * @netbuf: pointer to a Rx netbuf
+ *
+ * Return: None
+ */
+#ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
+static inline void hdd_softap_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
+					       qdf_nbuf_t netbuf)
+{
+	uint64_t target_time;
+
+	if (!hdd_tsf_is_rx_set(hdd_ctx))
+		return;
+
+	target_time = ktime_to_us(netbuf->tstamp);
+	hdd_rx_timestamp(netbuf, target_time);
+}
+#else
+static inline void hdd_softap_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
+					       qdf_nbuf_t netbuf)
+{
+}
+#endif
+
+/**
+ * hdd_softap_notify_tx_compl_cbk() - callback to notify tx completion
+ * @skb: pointer to skb data
+ * @adapter: pointer to vdev apdapter
+ * @flags: tx status flag
+ *
+ * Return: None
+ */
+static void hdd_softap_notify_tx_compl_cbk(struct sk_buff *skb,
+					   void *context, uint16_t flag)
+{
+	int errno;
+	struct hdd_adapter *adapter = context;
+
+	errno = hdd_validate_adapter(adapter);
+	if (errno)
+		return;
+
+	if (QDF_NBUF_CB_PACKET_TYPE_DHCP == QDF_NBUF_CB_GET_PACKET_TYPE(skb)) {
+		hdd_debug("sending DHCP indication");
+		hdd_softap_notify_dhcp_ind(context, skb);
+	} else if (QDF_NBUF_CB_GET_PACKET_TYPE(skb) ==
+						QDF_NBUF_CB_PACKET_TYPE_EAPOL) {
+		hdd_softap_inspect_tx_eap_pkt(adapter, skb, true);
+	}
+}
+
+QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf)
+{
+	struct hdd_adapter *adapter = NULL;
+	QDF_STATUS qdf_status;
+	unsigned int cpu_index;
+	struct sk_buff *skb = NULL;
+	struct sk_buff *next = NULL;
+	struct hdd_context *hdd_ctx = NULL;
+	struct qdf_mac_addr *src_mac;
+	struct hdd_station_info *sta_info;
+	bool is_eapol = false;
+	struct hdd_tx_rx_stats *stats;
+	uint16_t dump_level;
+
+	/* Sanity check on inputs */
+	if (unlikely((!adapter_context) || (!rx_buf))) {
+		QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	adapter = (struct hdd_adapter *)adapter_context;
+	if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
+		QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
+			  "Magic cookie(%x) for adapter sanity verification is invalid",
+			  adapter->magic);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+	if (unlikely(!hdd_ctx)) {
+		QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
+			  "%s: HDD context is Null", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	dump_level = cfg_get(hdd_ctx->psoc, CFG_ENABLE_DEBUG_PACKET_LOG);
+
+	stats = &adapter->hdd_stats.tx_rx_stats;
+	/* walk the chain until all are processed */
+	next = (struct sk_buff *)rx_buf;
+
+	while (next) {
+		skb = next;
+		next = skb->next;
+		skb->next = NULL;
+
+		hdd_softap_dump_sk_buff(skb);
+
+		skb->dev = adapter->dev;
+
+		if (unlikely(!skb->dev)) {
+			QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA,
+				  QDF_TRACE_LEVEL_ERROR,
+				  "%s: ERROR!!Invalid netdevice", __func__);
+			qdf_nbuf_free(skb);
+			continue;
+		}
+		cpu_index = wlan_hdd_get_cpu();
+		++stats->per_cpu[cpu_index].rx_packets;
+		++adapter->stats.rx_packets;
+		/* count aggregated RX frame into stats */
+		adapter->stats.rx_packets += qdf_nbuf_get_gso_segs(skb);
+		adapter->stats.rx_bytes += skb->len;
+
+		/* Send DHCP Indication to FW */
+		src_mac = (struct qdf_mac_addr *)(skb->data +
+						  QDF_NBUF_SRC_MAC_OFFSET);
+		sta_info = hdd_get_sta_info_by_mac(
+					&adapter->sta_info_list,
+					(uint8_t *)src_mac,
+					STA_INFO_SOFTAP_RX_PACKET_CBK);
+
+		if (sta_info) {
+			sta_info->rx_packets++;
+			sta_info->rx_bytes += skb->len;
+			hdd_softap_inspect_dhcp_packet(adapter, skb, QDF_RX);
+			hdd_put_sta_info_ref(&adapter->sta_info_list, &sta_info,
+					     true,
+					     STA_INFO_SOFTAP_RX_PACKET_CBK);
+		}
+
+		if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
+			is_eapol = true;
+
+		if (qdf_unlikely(dump_level >= DEBUG_PKTLOG_TYPE_EAPOL))
+			hdd_debug_pkt_dump(skb, skb->len - skb->data_len,
+					   &dump_level);
+
+		if (qdf_unlikely(is_eapol &&
+		    !(hdd_nbuf_dst_addr_is_self_addr(adapter, skb) ||
+		    hdd_nbuf_dst_addr_is_mld_addr(adapter, skb)))) {
+			qdf_nbuf_free(skb);
+			continue;
+		}
+
+		hdd_pkt_add_timestamp(adapter, QDF_PKT_RX_DRIVER_EXIT,
+				      qdf_get_log_timestamp(), skb);
+
+		hdd_event_eapol_log(skb, QDF_RX);
+		qdf_dp_trace_log_pkt(adapter->vdev_id,
+				     skb, QDF_RX, QDF_TRACE_DEFAULT_PDEV_ID);
+		DPTRACE(qdf_dp_trace(skb,
+			QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
+			QDF_TRACE_DEFAULT_PDEV_ID,
+			qdf_nbuf_data_addr(skb),
+			sizeof(qdf_nbuf_data(skb)), QDF_RX));
+		DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
+				QDF_DP_TRACE_RX_PACKET_RECORD, 0, QDF_RX));
+
+		if (hdd_rx_pkt_tracepoints_enabled())
+			qdf_trace_dp_packet(skb, QDF_RX, NULL, 0);
+
+		skb->protocol = eth_type_trans(skb, skb->dev);
+
+		/* hold configurable wakelock for unicast traffic */
+		if (!hdd_is_current_high_throughput(hdd_ctx) &&
+		    hdd_ctx->config->rx_wakelock_timeout &&
+		    skb->pkt_type != PACKET_BROADCAST &&
+		    skb->pkt_type != PACKET_MULTICAST) {
+			cds_host_diag_log_work(&hdd_ctx->rx_wake_lock,
+						   hdd_ctx->config->rx_wakelock_timeout,
+						   WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
+			qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock,
+							  hdd_ctx->config->
+								  rx_wakelock_timeout);
+		}
+
+		/* Remove SKB from internal tracking table before submitting
+		 * it to stack
+		 */
+		qdf_net_buf_debug_release_skb(skb);
+
+		hdd_softap_tsf_timestamp_rx(hdd_ctx, skb);
+
+		if (is_eapol && SEND_EAPOL_OVER_NL) {
+			if (wlan_hdd_cfg80211_rx_control_port(
+							adapter->dev,
+							adapter->mac_addr.bytes,
+							skb, false))
+				qdf_status = QDF_STATUS_SUCCESS;
+			else
+				qdf_status = QDF_STATUS_E_INVAL;
+			dev_kfree_skb(skb);
+		} else {
+			qdf_status = hdd_rx_deliver_to_stack(adapter, skb);
+		}
+
+		if (QDF_IS_STATUS_SUCCESS(qdf_status))
+			++stats->per_cpu[cpu_index].rx_delivered;
+		else
+			++stats->per_cpu[cpu_index].rx_refused;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
 QDF_STATUS hdd_softap_deregister_sta(struct hdd_adapter *adapter,
 				     struct hdd_station_info **sta_info)
 {

+ 81 - 0
core/hdd/src/wlan_hdd_stats.c

@@ -7406,6 +7406,87 @@ void wlan_hdd_display_tx_multiq_stats(hdd_cb_handle context, uint8_t vdev_id)
 }
 #endif
 
+void wlan_hdd_display_txrx_stats(struct hdd_context *ctx)
+{
+	struct hdd_adapter *adapter = NULL, *next_adapter = NULL;
+	struct hdd_tx_rx_stats *stats;
+	int i = 0;
+	uint32_t total_rx_pkt, total_rx_dropped,
+		 total_rx_delv, total_rx_refused;
+	wlan_net_dev_ref_dbgid dbgid = NET_DEV_HOLD_CACHE_STATION_STATS_CB;
+	uint32_t total_tx_pkt;
+	uint32_t total_tx_dropped;
+	uint32_t total_tx_orphaned;
+
+	hdd_for_each_adapter_dev_held_safe(ctx, adapter, next_adapter,
+					   dbgid) {
+		total_rx_pkt = 0;
+		total_rx_dropped = 0;
+		total_rx_delv = 0;
+		total_rx_refused = 0;
+		total_tx_pkt = 0;
+		total_tx_dropped = 0;
+		total_tx_orphaned = 0;
+		stats = &adapter->hdd_stats.tx_rx_stats;
+
+		if (adapter->vdev_id == WLAN_INVALID_VDEV_ID) {
+			hdd_adapter_dev_put_debug(adapter, dbgid);
+			continue;
+		}
+
+		hdd_debug("adapter: %u", adapter->vdev_id);
+		for (i = 0; i < NUM_CPUS; i++) {
+			total_rx_pkt += stats->per_cpu[i].rx_packets;
+			total_rx_dropped += stats->per_cpu[i].rx_dropped;
+			total_rx_delv += stats->per_cpu[i].rx_delivered;
+			total_rx_refused += stats->per_cpu[i].rx_refused;
+			total_tx_pkt += stats->per_cpu[i].tx_called;
+			total_tx_dropped += stats->per_cpu[i].tx_dropped;
+			total_tx_orphaned += stats->per_cpu[i].tx_orphaned;
+		}
+
+		/* dev_put has to be done here */
+		hdd_adapter_dev_put_debug(adapter, dbgid);
+
+		for (i = 0; i < NUM_CPUS; i++) {
+			if (!stats->per_cpu[i].tx_called)
+				continue;
+
+			hdd_debug("Tx CPU[%d]: called %u, dropped %u, orphaned %u",
+				  i, stats->per_cpu[i].tx_called,
+				  stats->per_cpu[i].tx_dropped,
+				  stats->per_cpu[i].tx_orphaned);
+		}
+
+		hdd_debug("TX - called %u, dropped %u orphan %u",
+			  total_tx_pkt, total_tx_dropped,
+			  total_tx_orphaned);
+
+		wlan_hdd_display_tx_multiq_stats(stats);
+
+		for (i = 0; i < NUM_CPUS; i++) {
+			if (stats->per_cpu[i].rx_packets == 0)
+				continue;
+			hdd_debug("Rx CPU[%d]: packets %u, dropped %u, delivered %u, refused %u",
+				  i, stats->per_cpu[i].rx_packets,
+				  stats->per_cpu[i].rx_dropped,
+				  stats->per_cpu[i].rx_delivered,
+				  stats->per_cpu[i].rx_refused);
+		}
+
+		hdd_debug("RX - packets %u, dropped %u, unsolict_arp_n_mcast_drp %u, delivered %u, refused %u GRO - agg %u drop %u non-agg %u flush_skip %u low_tput_flush %u disabled(conc %u low-tput %u)",
+			  total_rx_pkt, total_rx_dropped,
+			  qdf_atomic_read(&stats->rx_usolict_arp_n_mcast_drp),
+			  total_rx_delv,
+			  total_rx_refused, stats->rx_aggregated,
+			  stats->rx_gro_dropped, stats->rx_non_aggregated,
+			  stats->rx_gro_flush_skip,
+			  stats->rx_gro_low_tput_flush,
+			  qdf_atomic_read(&ctx->disable_rx_ol_in_concurrency),
+			  qdf_atomic_read(&ctx->disable_rx_ol_in_low_tput));
+	}
+}
+
 #ifdef QCA_SUPPORT_CP_STATS
 /**
  * hdd_lost_link_cp_stats_info_cb() - callback function to get lost

File diff suppressed because it is too large
+ 1974 - 30
core/hdd/src/wlan_hdd_tx_rx.c


+ 1 - 0
core/hdd/src/wlan_hdd_wext.c

@@ -87,6 +87,7 @@
 #include "hif.h"
 #endif
 #include "pld_common.h"
+#include "wlan_hdd_lro.h"
 #include "cds_utils.h"
 #include "wlan_osif_request_manager.h"
 #include "os_if_wifi_pos.h"

+ 7 - 0
core/wma/src/wma_main.c

@@ -3335,6 +3335,13 @@ QDF_STATUS wma_open(struct wlan_objmgr_psoc *psoc,
 					   wmi_peer_sta_kickout_event_id,
 					   wma_peer_sta_kickout_event_handler,
 					   WMA_RX_SERIALIZER_CTX);
+
+	/* register for stats response event */
+	wmi_unified_register_event_handler(wma_handle->wmi_handle,
+					   wmi_get_arp_stats_req_id,
+					   wma_get_arp_stats_handler,
+					   WMA_RX_SERIALIZER_CTX);
+
 	/* register for fw state response event */
 	wma_register_fw_state_events(wma_handle->wmi_handle);
 

Some files were not shown because too many files changed in this diff