Selaa lähdekoodia

qcacld-3.0: Implement DP component per packet TX/RX support

Implement per packet TX/RX support in DP componentization model.
This change will help to process per packet TX/RX path in STA interface.

Change-Id: I3fbd8dbe26ccd24fc50c6e67cd1213c72837cf9b
CRs-Fixed: 3173449
Karthik Kantamneni 3 vuotta sitten
vanhempi
sitoutus
5dc87dece5

+ 48 - 3
components/dp/core/inc/wlan_dp_main.h

@@ -134,6 +134,31 @@ struct wlan_dp_intf*
 dp_get_intf_by_macaddr(struct wlan_dp_psoc_context *dp_ctx,
 dp_get_intf_by_macaddr(struct wlan_dp_psoc_context *dp_ctx,
 		       struct qdf_mac_addr *addr);
 		       struct qdf_mac_addr *addr);
 
 
+/* MAX iteration count to wait for dp packet process to complete */
+#define DP_TASK_MAX_WAIT_CNT  100
+/* Milli seconds to wait when packet is getting processed */
+#define DP_TASK_WAIT_TIME 200
+
+#define DP_TX_FN_CLR (1 << 0)
+#define DP_TX_SAP_STOP (1 << 1)
+#define DP_TX_DFS_CAC_BLOCK (1 << 2)
+#define WLAN_DP_SUSPEND (1 << 3)
+
+/**
+ * dp_wait_complete_tasks: Wait for DP tasks to complete
+ * @dp_ctx: DP context pointer
+ *
+ * This function waits for dp tasks like TX to be completed
+ *
+ * Return: None
+ */
+void dp_wait_complete_tasks(struct wlan_dp_psoc_context *dp_ctx);
+
+#define NUM_RX_QUEUES 5
+
+#define dp_enter() QDF_TRACE_ENTER(QDF_MODULE_ID_DP, "enter")
+#define dp_exit() QDF_TRACE_EXIT(QDF_MODULE_ID_DP, "exit")
+
 /**
 /**
  * dp_peer_obj_create_notification(): dp peer create handler
  * dp_peer_obj_create_notification(): dp peer create handler
  * @peer: peer which is going to created by objmgr
  * @peer: peer which is going to created by objmgr
@@ -332,10 +357,12 @@ dp_del_latency_critical_client(struct wlan_objmgr_vdev *vdev,
 }
 }
 
 
 /**
 /**
- * is_dp_intf_valid() - Check if interface is valid
- * @dp_intf: DP interface
+ * is_dp_intf_valid() - to check DP interface valid
+ * @dp_intf: DP interface pointer
  *
  *
- * Return: 0 if interface is valid, else error code
+ * API to check whether DP interface is valid
+ *
+ * Return: non zero value on interface valid
  */
  */
 int is_dp_intf_valid(struct wlan_dp_intf *dp_intf);
 int is_dp_intf_valid(struct wlan_dp_intf *dp_intf);
 
 
@@ -551,4 +578,22 @@ void dp_trace_init(struct wlan_objmgr_psoc *psoc);
  * Return: None
  * Return: None
  */
  */
 void dp_set_dump_dp_trace(uint16_t cmd_type, uint16_t count);
 void dp_set_dump_dp_trace(uint16_t cmd_type, uint16_t count);
+
+#ifdef WLAN_FEATURE_DP_BUS_BANDWIDTH
+#define DP_BUS_BW_CFG(bus_bw_cfg)	bus_bw_cfg
+#define DP_BUS_BW_GET_RX_LVL(dp_ctx)	(dp_ctx)->cur_rx_level
+static inline bool
+dp_is_low_tput_gro_enable(struct wlan_dp_psoc_context *dp_ctx)
+{
+	return (qdf_atomic_read(&dp_ctx->low_tput_gro_enable)) ? true : false;
+}
+#else
+#define DP_BUS_BW_CFG(bus_bw_cfg)	0
+#define DP_BUS_BW_GET_RX_LVL(dp_ctx)	0
+static inline bool
+dp_is_low_tput_gro_enable(struct wlan_dp_psoc_context *dp_ctx)
+{
+	return false;
+}
+#endif
 #endif
 #endif

+ 112 - 14
components/dp/core/inc/wlan_dp_priv.h

@@ -30,10 +30,13 @@
 #include "wlan_dp_cfg.h"
 #include "wlan_dp_cfg.h"
 #include "wlan_dp_objmgr.h"
 #include "wlan_dp_objmgr.h"
 #include <cdp_txrx_misc.h>
 #include <cdp_txrx_misc.h>
+#include <dp_rx_thread.h>
 #include "qdf_periodic_work.h"
 #include "qdf_periodic_work.h"
 #include <cds_api.h>
 #include <cds_api.h>
 #include "pld_common.h"
 #include "pld_common.h"
 #include "wlan_dp_nud_tracking.h"
 #include "wlan_dp_nud_tracking.h"
+#include <i_qdf_net_stats.h>
+#include <qdf_types.h>
 
 
 #ifndef NUM_TX_RX_HISTOGRAM
 #ifndef NUM_TX_RX_HISTOGRAM
 #define NUM_TX_RX_HISTOGRAM 128
 #define NUM_TX_RX_HISTOGRAM 128
@@ -143,6 +146,9 @@ struct wlan_dp_psoc_cfg {
 	uint32_t fisa_enable;
 	uint32_t fisa_enable;
 
 
 	int icmp_req_to_fw_mark_interval;
 	int icmp_req_to_fw_mark_interval;
+
+	bool lro_enable;
+	bool gro_enable;
 };
 };
 
 
 /**
 /**
@@ -188,6 +194,60 @@ struct dp_stats {
 	struct dp_dns_stats dns_stats;
 	struct dp_dns_stats dns_stats;
 	struct dp_tcp_stats tcp_stats;
 	struct dp_tcp_stats tcp_stats;
 	struct dp_icmpv4_stats icmpv4_stats;
 	struct dp_icmpv4_stats icmpv4_stats;
+	struct dp_dhcp_stats dhcp_stats;
+	struct dp_eapol_stats eapol_stats;
+};
+
+/**
+ * struct dhcp_phase - Per Peer DHCP Phases
+ * @DHCP_PHASE_ACK: upon receiving DHCP_ACK/NAK message in REQUEST phase or
+ *         DHCP_DELINE message in OFFER phase
+ * @DHCP_PHASE_DISCOVER: upon receiving DHCP_DISCOVER message in ACK phase
+ * @DHCP_PHASE_OFFER: upon receiving DHCP_OFFER message in DISCOVER phase
+ * @DHCP_PHASE_REQUEST: upon receiving DHCP_REQUEST message in OFFER phase or
+ *         ACK phase (Renewal process)
+ */
+enum dhcp_phase {
+	DHCP_PHASE_ACK,
+	DHCP_PHASE_DISCOVER,
+	DHCP_PHASE_OFFER,
+	DHCP_PHASE_REQUEST
+};
+
+/**
+ * struct dhcp_nego_status - Per Peer DHCP Negotiation Status
+ * @DHCP_NEGO_STOP: when the peer is in ACK phase or client disassociated
+ * @DHCP_NEGO_IN_PROGRESS: when the peer is in DISCOVER or REQUEST
+ *         (Renewal process) phase
+ */
+enum dhcp_nego_status {
+	DHCP_NEGO_STOP,
+	DHCP_NEGO_IN_PROGRESS
+};
+
+/**
+ * Pending frame type of EAP_FAILURE, bit number used in "pending_eap_frm_type"
+ * of sta_info.
+ */
+#define DP_PENDING_TYPE_EAP_FAILURE  0
+
+enum bss_intf_state {
+	BSS_INTF_STOP,
+	BSS_INTF_START,
+};
+
+struct wlan_dp_sta_info {
+	struct qdf_mac_addr sta_mac;
+	unsigned long pending_eap_frm_type;
+	enum dhcp_phase dhcp_phase;
+	enum dhcp_nego_status dhcp_nego_status;
+};
+
+struct wlan_dp_conn_info {
+	struct qdf_mac_addr bssid;
+	struct qdf_mac_addr peer_macaddr;
+	uint8_t proxy_arp_service;
+	uint8_t is_authenticated;
 };
 };
 
 
 /**
 /**
@@ -197,11 +257,20 @@ struct dp_stats {
  * @device_mode: Device Mode
  * @device_mode: Device Mode
  * @intf_id: Interface ID
  * @intf_id: Interface ID
  * @node: list node for membership in the interface list
  * @node: list node for membership in the interface list
- * @tx_rx_disallow_mask: TX/RX disallow mask
  * @vdev: object manager vdev context
  * @vdev: object manager vdev context
  * @dev: netdev reference
  * @dev: netdev reference
  * @stats: Netdev stats
  * @stats: Netdev stats
  * @mic_work: Work to handle MIC error
  * @mic_work: Work to handle MIC error
+ * @num_active_task: Active task count
+ * @sap_tx_block_mask: SAP TX block mask
+ * @gro_disallowed: GRO disallowed flag
+ * @gro_flushed: GRO flushed flag
+ * @runtime_disable_rx_thread: Runtime Rx thread flag
+ * @rx_stack: function pointer Rx packet handover
+ * @tx_fn: function pointer to send Tx packet
+ * @conn_info: STA connection information
+ * @bss_state: AP BSS state
+ * @qdf_sta_eap_frm_done_event: EAP frame event management
  */
  */
 struct wlan_dp_intf {
 struct wlan_dp_intf {
 	struct wlan_dp_psoc_context *dp_ctx;
 	struct wlan_dp_psoc_context *dp_ctx;
@@ -214,7 +283,6 @@ struct wlan_dp_intf {
 
 
 	qdf_list_node_t node;
 	qdf_list_node_t node;
 
 
-	uint32_t tx_rx_disallow_mask;
 	struct wlan_objmgr_vdev *vdev;
 	struct wlan_objmgr_vdev *vdev;
 	qdf_netdev_t dev;
 	qdf_netdev_t dev;
 	/**Device TX/RX statistics*/
 	/**Device TX/RX statistics*/
@@ -229,7 +297,6 @@ struct wlan_dp_intf {
 	qdf_net_dev_stats stats;
 	qdf_net_dev_stats stats;
 	bool con_status;
 	bool con_status;
 	bool dad;
 	bool dad;
-	uint8_t active_ac;
 	uint32_t pkt_type_bitmap;
 	uint32_t pkt_type_bitmap;
 	uint32_t track_arp_ip;
 	uint32_t track_arp_ip;
 	uint8_t dns_payload[256];
 	uint8_t dns_payload[256];
@@ -244,14 +311,33 @@ struct wlan_dp_intf {
 	uint64_t prev_fwd_tx_packets;
 	uint64_t prev_fwd_tx_packets;
 	uint64_t prev_fwd_rx_packets;
 	uint64_t prev_fwd_rx_packets;
 #endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
 #endif /*WLAN_FEATURE_DP_BUS_BANDWIDTH*/
-#ifdef WLAN_FEATURE_MSCS
-	unsigned long mscs_prev_tx_vo_pkts;
-	uint32_t mscs_counter;
-#endif /* WLAN_FEATURE_MSCS */
 	struct dp_mic_work mic_work;
 	struct dp_mic_work mic_work;
 #ifdef WLAN_NUD_TRACKING
 #ifdef WLAN_NUD_TRACKING
 	struct dp_nud_tracking_info nud_tracking;
 	struct dp_nud_tracking_info nud_tracking;
 #endif
 #endif
+	qdf_atomic_t num_active_task;
+	uint32_t sap_tx_block_mask;
+
+	uint8_t gro_disallowed[DP_MAX_RX_THREADS];
+	uint8_t gro_flushed[DP_MAX_RX_THREADS];
+
+	bool runtime_disable_rx_thread;
+	ol_txrx_rx_fp rx_stack;
+	ol_txrx_tx_fp tx_fn;
+	struct wlan_dp_conn_info conn_info;
+
+	enum bss_intf_state bss_state;
+	qdf_event_t qdf_sta_eap_frm_done_event;
+};
+
+/**
+ * enum RX_OFFLOAD - Receive offload modes
+ * @CFG_LRO_ENABLED: Large Rx offload
+ * @CFG_GRO_ENABLED: Generic Rx Offload
+ */
+enum RX_OFFLOAD {
+	CFG_LRO_ENABLED = 1,
+	CFG_GRO_ENABLED,
 };
 };
 
 
 /**
 /**
@@ -296,9 +382,7 @@ struct wlan_dp_psoc_context {
 	/* For Rx thread non GRO/LRO packet accounting */
 	/* For Rx thread non GRO/LRO packet accounting */
 	uint64_t no_rx_offload_pkt_cnt;
 	uint64_t no_rx_offload_pkt_cnt;
 	uint64_t no_tx_offload_pkt_cnt;
 	uint64_t no_tx_offload_pkt_cnt;
-#ifdef QCA_CONFIG_SMP
-	bool is_ol_rx_thread_suspended;
-#endif
+
 	bool wlan_suspended;
 	bool wlan_suspended;
 	/* Flag keeps track of wiphy suspend/resume */
 	/* Flag keeps track of wiphy suspend/resume */
 	bool is_wiphy_suspended;
 	bool is_wiphy_suspended;
@@ -329,10 +413,6 @@ struct wlan_dp_psoc_context {
 	qdf_atomic_t disable_rx_ol_in_concurrency;
 	qdf_atomic_t disable_rx_ol_in_concurrency;
 	/* disable RX offload (GRO/LRO) in low throughput scenarios */
 	/* disable RX offload (GRO/LRO) in low throughput scenarios */
 	qdf_atomic_t disable_rx_ol_in_low_tput;
 	qdf_atomic_t disable_rx_ol_in_low_tput;
-#ifdef WLAN_NS_OFFLOAD
-	/* IPv6 notifier callback for handling NS offload on change in IP */
-	struct notifier_block ipv6_notifier;
-#endif
 
 
 	uint16_t txrx_hist_idx;
 	uint16_t txrx_hist_idx;
 	struct tx_rx_histogram *txrx_hist;
 	struct tx_rx_histogram *txrx_hist;
@@ -341,5 +421,23 @@ struct wlan_dp_psoc_context {
 #ifdef FEATURE_BUS_BANDWIDTH_MGR
 #ifdef FEATURE_BUS_BANDWIDTH_MGR
 	struct bbm_context *bbm_ctx;
 	struct bbm_context *bbm_ctx;
 #endif
 #endif
+
+	QDF_STATUS(*receive_offload_cb)(struct wlan_dp_intf *, qdf_nbuf_t nbuf);
+
+	struct {
+		qdf_atomic_t rx_aggregation;
+		uint8_t gro_force_flush[DP_MAX_RX_THREADS];
+		bool force_gro_enable;
+	}
+	dp_agg_param;
+
+	qdf_atomic_t rx_skip_qdisc_chk_conc;
+
+	uint32_t arp_connectivity_map;
+
+	qdf_wake_lock_t rx_wake_lock;
+
+	enum RX_OFFLOAD ol_enable;
 };
 };
+
 #endif /* end  of _WLAN_DP_PRIV_STRUCT_H_ */
 #endif /* end  of _WLAN_DP_PRIV_STRUCT_H_ */

+ 559 - 6
components/dp/core/inc/wlan_dp_txrx.h

@@ -19,9 +19,131 @@
 #ifndef __WLAN_DP_TXRX_H__
 #ifndef __WLAN_DP_TXRX_H__
 #define __WLAN_DP_TXRX_H__
 #define __WLAN_DP_TXRX_H__
 
 
-#include <wlan_dp_priv.h>
-#include <wlan_dp_main.h>
-#include <qdf_types.h>
+#include <cds_api.h>
+#include <qdf_tracepoint.h>
+#include <qdf_pkt_add_timestamp.h>
+#include <enet.h>
+#include <qdf_tracepoint.h>
+
+/** DP Tx Time out value */
+#define DP_TX_TIMEOUT   qdf_system_msecs_to_ticks(5000)
+
+#define DP_TX_STALL_THRESHOLD 4
+
+#ifdef FEATURE_WLAN_WAPI
+#define IS_DP_ETHERTYPE_WAI(_nbuf) (qdf_ntohs(qdf_nbuf_get_protocol(_nbuf)) == \
+								ETHERTYPE_WAI)
+#else
+#define IS_DP_ETHERTYPE_WAI(_nbuf) (false)
+#endif
+
+#ifdef CFG80211_CTRL_FRAME_SRC_ADDR_TA_ADDR
+#define SEND_EAPOL_OVER_NL true
+#else
+#define SEND_EAPOL_OVER_NL  false
+#endif
+
+#define DP_CONNECTIVITY_CHECK_SET_ARP		1
+#define DP_CONNECTIVITY_CHECK_SET_DNS		2
+#define DP_CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE	3
+#define DP_CONNECTIVITY_CHECK_SET_ICMPV4	4
+#define DP_CONNECTIVITY_CHECK_SET_ICMPV6	5
+#define DP_CONNECTIVITY_CHECK_SET_TCP_SYN	6
+#define DP_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK	7
+#define DP_CONNECTIVITY_CHECK_SET_TCP_ACK	8
+
+/**
+ * dp_reset_all_intfs_connectivity_stats() - reset connectivity stats
+ * @dp_ctx: pointer to DP Context
+ *
+ * Return: None
+ */
+void dp_reset_all_intfs_connectivity_stats(struct wlan_dp_psoc_context *dp_ctx);
+
+/**
+ * dp_softap_check_wait_for_tx_eap_pkt() - Check and wait for eap failure
+ * pkt completion event
+ * @dp_intf: pointer to DP interface
+ * @mac_addr: mac address of peer
+ *
+ * Check and wait for eap failure pkt tx completion.
+ *
+ * Return: void
+ */
+void dp_softap_check_wait_for_tx_eap_pkt(struct wlan_dp_intf *dp_intf,
+					 struct qdf_mac_addr *mac_addr);
+
+#ifdef SAP_DHCP_FW_IND
+/**
+ * dp_post_dhcp_ind() - Send DHCP START/STOP indication to FW
+ * @dp_intf: pointer to dp interface
+ * @mac_addr: mac address
+ * @type: WMA message type
+ *
+ * Return: error number
+ */
+int dp_post_dhcp_ind(struct wlan_dp_intf *dp_intf,
+		     uint8_t *mac_addr, bool dhcp_start);
+
+/**
+ * dp_softap_inspect_dhcp_packet() - Inspect DHCP packet
+ * @dp_intf: pointer to dp interface
+ * @skb: pointer to OS packet (sk_buff)
+ * @dir: direction
+ *
+ * Inspect the Tx/Rx frame, and send DHCP START/STOP notification to the FW
+ * through WMI message, during DHCP based IP address acquisition phase.
+ *
+ * - Send DHCP_START notification to FW when SAP gets DHCP Discovery
+ * - Send DHCP_STOP notification to FW when SAP sends DHCP ACK/NAK
+ *
+ * DHCP subtypes are determined by a status octet in the DHCP Message type
+ * option (option code 53 (0x35)).
+ *
+ * Each peer will be in one of 4 DHCP phases, starts from QDF_DHCP_PHASE_ACK,
+ * and transitioned per DHCP message type as it arrives.
+ *
+ * - QDF_DHCP_PHASE_DISCOVER: upon receiving DHCP_DISCOVER message in ACK phase
+ * - QDF_DHCP_PHASE_OFFER: upon receiving DHCP_OFFER message in DISCOVER phase
+ * - QDF_DHCP_PHASE_REQUEST: upon receiving DHCP_REQUEST message in OFFER phase
+ *	or ACK phase (Renewal process)
+ * - QDF_DHCP_PHASE_ACK : upon receiving DHCP_ACK/NAK message in REQUEST phase
+ *	or DHCP_DELINE message in OFFER phase
+ *
+ * Return: error number
+ */
+int dp_softap_inspect_dhcp_packet(struct wlan_dp_intf *dp_intf,
+				  qdf_nbuf_t nbuf,
+				  enum qdf_proto_dir dir);
+#else
+static inline
+int dp_post_dhcp_ind(struct wlan_dp_intf *dp_intf,
+		     uint8_t *mac_addr, bool dhcp_start)
+{
+	return 0;
+}
+
+static inline
+int dp_softap_inspect_dhcp_packet(struct wlan_dp_intf *dp_intf,
+				  qdf_nbuf_t nbuf,
+				  enum qdf_proto_dir dir)
+{
+	return 0;
+}
+#endif
+
+/**
+ * dp_rx_flush_packet_cbk() - flush rx packet handler
+ * @dp_intf_context: pointer to DP interface context
+ * @vdev_id: vdev_id of the packets to be flushed
+ *
+ * Flush rx packet callback registered with data path. DP will call this to
+ * notify when packets for a particular vdev is to be flushed out.
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS dp_rx_flush_packet_cbk(void *dp_intf_ctx, uint8_t vdev_id);
 
 
 /**
 /**
  * dp_softap_start_xmit() - Transmit a frame for SAP interface
  * dp_softap_start_xmit() - Transmit a frame for SAP interface
@@ -30,13 +152,24 @@
  *
  *
  * Return: QDF_STATUS_SUCCESS on successful transmission
  * Return: QDF_STATUS_SUCCESS on successful transmission
  */
  */
-QDF_STATUS
-dp_softap_start_xmit(qdf_nbuf_t nbuf, struct wlan_dp_intf *dp_intf);
+QDF_STATUS dp_softap_start_xmit(qdf_nbuf_t nbuf, struct wlan_dp_intf *dp_intf);
+
+/**
+ * dp_softap_tx_timeout() - TX timeout handler
+ * @dp_intf: pointer to DP interface
+ *
+ * Timeout API called for mode interfaces (SoftAP/P2P GO)
+ * when TX transmission takes too long.
+ * called by the OS_IF layer legacy driver.
+ *
+ * Return: None
+ */
+void dp_softap_tx_timeout(struct wlan_dp_intf *dp_intf);
 
 
 /**
 /**
  * dp_softap_rx_packet_cbk() - Receive packet handler for SAP
  * dp_softap_rx_packet_cbk() - Receive packet handler for SAP
  * @dp_intf_context: pointer to DP interface context
  * @dp_intf_context: pointer to DP interface context
- * @rxBuf: pointer to rx qdf_nbuf
+ * @rx_buf: pointer to rx qdf_nbuf
  *
  *
  * Receive callback registered with data path.  DP will call this to notify
  * Receive callback registered with data path.  DP will call this to notify
  * when one or more packets were received for a registered
  * when one or more packets were received for a registered
@@ -58,6 +191,16 @@ dp_softap_rx_packet_cbk(void *intf_ctx, qdf_nbuf_t rx_buf);
 QDF_STATUS
 QDF_STATUS
 dp_start_xmit(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf);
 dp_start_xmit(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf);
 
 
+/**
+ * dp_tx_timeout() - DP Tx timeout API
+ * @dp_intf: Data path interface pointer
+ *
+ * Function called by OS_IF there is any timeout during transmission.
+ *
+ * Return: none
+ */
+void dp_tx_timeout(struct wlan_dp_intf *dp_intf);
+
 /**
 /**
  * dp_rx_packet_cbk() - Receive packet handler
  * dp_rx_packet_cbk() - Receive packet handler
  * @dp_intf_context: pointer to DP interface context
  * @dp_intf_context: pointer to DP interface context
@@ -72,4 +215,414 @@ dp_start_xmit(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf);
  */
  */
 QDF_STATUS dp_rx_packet_cbk(void *dp_intf_context, qdf_nbuf_t rx_buf);
 QDF_STATUS dp_rx_packet_cbk(void *dp_intf_context, qdf_nbuf_t rx_buf);
 
 
+#if defined(WLAN_SUPPORT_RX_FISA)
+/**
+ * wlan_dp_rx_fisa_cbk() - Entry function to FISA to handle aggregation
+ * @soc: core txrx main context
+ * @vdev: Handle DP vdev
+ * @nbuf_list: List nbufs to be aggregated
+ *
+ * Return: Success on aggregation
+ */
+QDF_STATUS wlan_dp_rx_fisa_cbk(void *dp_soc, void *dp_vdev,
+			       qdf_nbuf_t rxbuf_list);
+
+/**
+ * wlan_dp_rx_fisa_flush_by_ctx_id() - Flush function to end of context
+ *				   flushing of aggregates
+ * @soc: core txrx main context
+ * @ring_num: REO number to flush the flow Rxed on the REO
+ *
+ * Return: Success on flushing the flows for the REO
+ */
+QDF_STATUS wlan_dp_rx_fisa_flush_by_ctx_id(void *dp_soc, int ring_num);
+
+/**
+ * wlan_dp_rx_fisa_flush_by_vdev_id() - Flush fisa aggregates per vdev id
+ * @soc: core txrx main context
+ * @vdev_id: vdev ID
+ *
+ * Return: Success on flushing the flows for the vdev
+ */
+QDF_STATUS wlan_dp_rx_fisa_flush_by_vdev_id(void *dp_soc, uint8_t vdev_id);
+#else
+static inline QDF_STATUS wlan_dp_rx_fisa_flush_by_vdev_id(void *dp_soc,
+							  uint8_t vdev_id)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * dp_rx_deliver_to_stack() - DP helper function to deliver RX pkts to stack
+ * @dp_intf_context: pointer to DP interface context
+ * @nbuf: pointer to nbuf
+ *
+ * The function calls the appropriate stack function depending upon the packet
+ * type and whether GRO/LRO is enabled.
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf *dp_intf,
+				       qdf_nbuf_t nbuf);
+
+/**
+ * dp_rx_thread_gro_flush_ind_cbk() - receive handler to flush GRO packets
+ * @dp_intf_context: pointer to DP interface context
+ * @rx_ctx_id: RX CTX Id for which flush should happen
+ *
+ * Receive callback registered with DP layer which flushes GRO packets
+ * for a given RX CTX ID (RX Thread)
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS dp_rx_thread_gro_flush_ind_cbk(void *dp_intf_ctx, int rx_ctx_id);
+
+/**
+ * dp_rx_pkt_thread_enqueue_cbk() - receive pkt handler to enqueue into thread
+ * @dp_intf_context: pointer to DP interface context
+ * @nbuf_list: pointer to qdf_nbuf list
+ *
+ * Receive callback registered with DP layer which enqueues packets into dp rx
+ * thread
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS dp_rx_pkt_thread_enqueue_cbk(void *dp_intf_ctx,
+					qdf_nbuf_t nbuf_list);
+
+/**
+ * dp_disable_rx_ol_for_low_tput() - Disable Rx offload in low TPUT scenario
+ * @dp_ctx: dp context
+ * @disable: true/false to disable/enable the Rx offload
+ *
+ * Return: none
+ */
+void dp_disable_rx_ol_for_low_tput(struct wlan_dp_psoc_context *dp_ctx,
+				   bool disable);
+
+/**
+ * dp_tx_rx_collect_connectivity_stats_info() - collect connectivity stats
+ * @nbuf: pointer to n/w buffer
+ * @context: pointer to DP interface
+ * @action: action done on pkt.
+ * @pkt_type: data pkt type
+ *
+ * Return: None
+ */
+void
+dp_tx_rx_collect_connectivity_stats_info(qdf_nbuf_t nbuf, void *context,
+		enum connectivity_stats_pkt_status action, uint8_t *pkt_type);
+
+static inline void
+dp_nbuf_fill_gso_size(qdf_netdev_t dev, qdf_nbuf_t nbuf)
+{
+	unsigned long val;
+
+	if (qdf_nbuf_is_cloned(nbuf) && qdf_nbuf_is_nonlinear(nbuf) &&
+	    qdf_nbuf_get_gso_size(nbuf) == 0 &&
+	    qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
+		val = dev->mtu - ((qdf_nbuf_transport_header(nbuf) -
+				   qdf_nbuf_network_header(nbuf))
+				  + qdf_nbuf_get_tcp_hdr_len(nbuf));
+		qdf_nbuf_set_gso_size(nbuf, val);
+	}
+}
+
+#ifdef CONFIG_HL_SUPPORT
+static inline QDF_STATUS
+dp_nbuf_nontso_linearize(qdf_nbuf_t nbuf)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static inline QDF_STATUS
+dp_nbuf_nontso_linearize(qdf_nbuf_t nbuf)
+{
+	if (qdf_nbuf_is_nonlinear(nbuf) && qdf_nbuf_is_tso(nbuf) == false) {
+		if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
+			return QDF_STATUS_E_NOMEM;
+	}
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+#ifdef FEATURE_WLAN_DIAG_SUPPORT
+void dp_event_eapol_log(qdf_nbuf_t nbuf, enum qdf_proto_dir dir);
+#else
+static inline
+void dp_event_eapol_log(qdf_nbuf_t nbuf, enum qdf_proto_dir dir)
+{}
+#endif
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+static inline
+qdf_nbuf_t dp_nbuf_orphan(struct wlan_dp_intf *dp_intf,
+			  qdf_nbuf_t nbuf)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+	struct wlan_dp_psoc_callbacks *dp_ops = &dp_ctx->dp_ops;
+	unsigned int tx_flow_low_watermark;
+	int need_orphan = 0;
+	int cpu;
+
+	tx_flow_low_watermark =
+	   dp_ops->dp_get_tx_flow_low_watermark(dp_ops->callback_ctx,
+						dp_intf->intf_id);
+	if (tx_flow_low_watermark > 0) {
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
+		/*
+		 * The TCP TX throttling logic is changed a little after
+		 * 3.19-rc1 kernel, the TCP sending limit will be smaller,
+		 * which will throttle the TCP packets to the host driver.
+		 * The TCP UP LINK throughput will drop heavily. In order to
+		 * fix this issue, need to orphan the socket buffer asap, which
+		 * will call skb's destructor to notify the TCP stack that the
+		 * SKB buffer is unowned. And then the TCP stack will pump more
+		 * packets to host driver.
+		 *
+		 * The TX packets might be dropped for UDP case in the iperf
+		 * testing. So need to be protected by follow control.
+		 */
+		need_orphan = 1;
+#else
+		if (dp_ctx->dp_cfg.tx_orphan_enable)
+			need_orphan = 1;
+#endif
+	} else if (dp_ctx->dp_cfg.tx_orphan_enable) {
+		if (qdf_nbuf_is_ipv4_tcp_pkt(nbuf) ||
+		    qdf_nbuf_is_ipv6_tcp_pkt(nbuf))
+			need_orphan = 1;
+	}
+
+	if (need_orphan) {
+		qdf_nbuf_orphan(nbuf);
+		cpu = qdf_get_smp_processor_id();
+		++dp_intf->dp_stats.tx_rx_stats.per_cpu[cpu].tx_orphaned;
+	} else {
+		nbuf = __qdf_nbuf_unshare(nbuf);
+	}
+
+	return nbuf;
+}
+
+/**
+ * dp_get_tx_resource() - check tx resources and take action
+ * @dp_intf: DP interface
+ * @mac_addr: mac address
+ *
+ * Return: none
+ */
+void dp_get_tx_resource(struct wlan_dp_intf *dp_intf,
+			struct qdf_mac_addr *mac_addr);
+
+#else
+/**
+ * dp_nbuf_orphan() - skb_unshare a cloned packed else skb_orphan
+ * @dp_intf: pointer to DP interface
+ * @nbuf: pointer to nbuf data packet
+ *
+ * Return: pointer to nbuf structure
+ */
+static inline
+qdf_nbuf_t dp_nbuf_orphan(struct wlan_dp_intf *dp_intf,
+			  qdf_nbuf_t nbuf)
+{
+	qdf_nbuf_t nskb;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+#endif
+	int cpu;
+
+	dp_nbuf_fill_gso_size(dp_intf->dev, nbuf);
+
+	nskb =  __qdf_nbuf_unshare(nbuf);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0))
+	if (unlikely(dp_ctx->dp_cfg.tx_orphan_enable) && nskb == nbuf) {
+		/*
+		 * For UDP packets we want to orphan the packet to allow the app
+		 * to send more packets. The flow would ultimately be controlled
+		 * by the limited number of tx descriptors for the vdev.
+		 */
+		cpu = qdf_get_smp_processor_id();
+		++dp_intf->dp_stats.tx_rx_stats.per_cpu[cpu].tx_orphaned;
+		qdf_nbuf_orphan(nbuf);
+	}
+#endif
+	return nskb;
+}
+
+/**
+ * dp_get_tx_resource() - check tx resources and take action
+ * @dp_intf: DP interface
+ * @mac_addr: mac address
+ *
+ * Return: none
+ */
+static inline
+void dp_get_tx_resource(struct wlan_dp_intf *dp_intf,
+			struct qdf_mac_addr *mac_addr)
+{
+}
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+/**
+ * dp_start_xmit() - Transmit a frame
+ * @dp_intf: pointer to DP interface
+ * @nbuf: n/w buffer
+ *
+ * Function called to Transmit a n/w buffer in STA mode.
+ *
+ * Return: Status of the transmission
+ */
+QDF_STATUS
+dp_start_xmit(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf);
+
+#ifdef FEATURE_MONITOR_MODE_SUPPORT
+/**
+ * dp_mon_rx_packet_cbk() - Receive callback registered with OL layer.
+ * @context: pointer to qdf context
+ * @rx_buf: pointer to rx qdf_nbuf
+ *
+ * TL will call this to notify the HDD when one or more packets were
+ * received for a registered STA.
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf);
+
+/**
+ * dp_monitor_set_rx_monitor_cb(): Set rx monitor mode callback function
+ * @txrx: pointer to txrx ops
+ * @rx_monitor_cb: pointer to callback function
+ *
+ * Returns: None
+ */
+void dp_monitor_set_rx_monitor_cb(struct ol_txrx_ops *txrx,
+				  ol_txrx_rx_mon_fp rx_monitor_cb);
+/**
+ * dp_rx_monitor_callback(): Callback function for receive monitor mode
+ * @vdev: Handle to vdev object
+ * @mpdu: pointer to mpdu to be delivered to os
+ * @rx_status: receive status
+ *
+ * Returns: None
+ */
+void dp_rx_monitor_callback(ol_osif_vdev_handle vdev,
+			    qdf_nbuf_t mpdu,
+			    void *rx_status);
+
+#else
+static inline
+QDF_STATUS dp_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
+static inline
+void dp_monitor_set_rx_monitor_cb(struct ol_txrx_ops *txrx,
+				  ol_txrx_rx_mon_fp rx_monitor_cb) { }
+
+static inline
+void dp_rx_monitor_callback(ol_osif_vdev_handle vdev, qdf_nbuf_t mpdu,
+			    void *rx_status) { }
+#endif
+
+/**
+ * dp_sta_notify_tx_comp_cb() - notify tx comp callback registered with dp
+ * @nbuf: pointer to nbuf
+ * @ctx: osif context
+ * @flag: tx status flag
+ *
+ * Return: None
+ */
+void dp_sta_notify_tx_comp_cb(qdf_nbuf_t nbuf, void *ctx, uint16_t flag);
+
+/**
+ * dp_softap_notify_tx_comp_cb() - notify softap tx comp registered with dp
+ * @nbuf: pointer to nbuf
+ * @ctx: osif context
+ * @flag: tx status flag
+ *
+ * Return: None
+ */
+void dp_softap_notify_tx_compl_cbk(qdf_nbuf_t nbuf,
+				   void *context, uint16_t flag);
+
+/**
+ * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint
+ *
+ * Return: True if any rx pkt tracepoint is enabled else false
+ */
+static inline bool dp_rx_pkt_tracepoints_enabled(void)
+{
+	return (qdf_trace_dp_rx_tcp_pkt_enabled() ||
+		qdf_trace_dp_rx_udp_pkt_enabled() ||
+		qdf_trace_dp_rx_pkt_enabled());
+}
+
+#ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
+/**
+ * dp_pkt_add_timestamp() - add timestamp in data payload
+ *
+ * @dp_intf - DP interface
+ * @index - timestamp index which decides offset in payload
+ * @time - time to update in payload
+ * @nbuf - Network socket buffer
+ *
+ * Return: none
+ */
+void wlan_dp_pkt_add_timestamp(struct wlan_dp_intf *dp_intf,
+			       enum qdf_pkt_timestamp_index index,
+			       qdf_nbuf_t nbuf);
+#else
+static inline
+void wlan_dp_pkt_add_timestamp(struct wlan_dp_intf *dp_intf,
+			  enum qdf_pkt_timestamp_index index,
+			  qdf_nbuf_t nbuf)
+{
+}
+#endif
+
+#if defined(FEATURE_LRO)
+/**
+ * dp_lro_set_reset() - API for Disable/Enable LRO
+ * @dp_intf: DP inerface pointer
+ * @enable_flag: enable or disable LRO.
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS dp_lro_set_reset(struct wlan_dp_intf *dp_intf, uint8_t enable_flag);
+#else
+static inline
+QDF_STATUS dp_lro_set_reset(struct wlan_dp_intf *dp_intf,
+			    uint8_t enable_flag)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif /* FEATURE_LRO */
+
+#ifdef RECEIVE_OFFLOAD
+/**
+ * dp_rx_ol_init() - Initialize Rx offload mode (LRO or GRO)
+ * @dp_ctx: pointer to DP Context
+ * @is_wifi3_0_target: true if it wifi3.0 target
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS dp_rx_ol_init(struct wlan_dp_psoc_context *dp_ctx,
+			 bool is_wifi3_0_target);
+#else /* RECEIVE_OFFLOAD */
+
+static inline QDF_STATUS
+dp_rx_ol_init(struct wlan_dp_psoc_context *dp_ctx,
+	      bool is_wifi3_0_target)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif
+
 #endif
 #endif

+ 3 - 4
components/dp/core/src/wlan_dp_bus_bandwidth.c

@@ -38,6 +38,7 @@
 #include "wlan_dp_periodic_sta_stats.h"
 #include "wlan_dp_periodic_sta_stats.h"
 #include "wlan_mlme_ucfg_api.h"
 #include "wlan_mlme_ucfg_api.h"
 #include <i_qdf_net_stats.h>
 #include <i_qdf_net_stats.h>
+#include "wlan_dp_txrx.h"
 
 
 #ifdef FEATURE_BUS_BANDWIDTH_MGR
 #ifdef FEATURE_BUS_BANDWIDTH_MGR
 /**
 /**
@@ -1524,11 +1525,9 @@ static void dp_pld_request_bus_bandwidth(struct wlan_dp_psoc_context *dp_ctx,
 							rx_packets);
 							rx_packets);
 
 
 		if (rx_packets < dp_ctx->dp_cfg.bus_bw_low_threshold)
 		if (rx_packets < dp_ctx->dp_cfg.bus_bw_low_threshold)
-			dp_ops->dp_disable_rx_ol_for_low_tput(ctx,
-							      true);
+			dp_disable_rx_ol_for_low_tput(dp_ctx, true);
 		else
 		else
-			dp_ops->dp_disable_rx_ol_for_low_tput(ctx,
-							      false);
+			dp_disable_rx_ol_for_low_tput(dp_ctx, false);
 
 
 		/*
 		/*
 		 * force disable pktlog and only re-enable based
 		 * force disable pktlog and only re-enable based

+ 89 - 1
components/dp/core/src/wlan_dp_main.c

@@ -31,6 +31,7 @@
 #include <wlan_cm_ucfg_api.h>
 #include <wlan_cm_ucfg_api.h>
 #include "wlan_dp_nud_tracking.h"
 #include "wlan_dp_nud_tracking.h"
 #include "target_if_dp_comp.h"
 #include "target_if_dp_comp.h"
+#include "wlan_dp_txrx.h"
 
 
 /* Global DP context */
 /* Global DP context */
 static struct wlan_dp_psoc_context *gp_dp_ctx;
 static struct wlan_dp_psoc_context *gp_dp_ctx;
@@ -151,9 +152,58 @@ int is_dp_intf_valid(struct wlan_dp_intf *dp_intf)
 		dp_err("Interface is NULL");
 		dp_err("Interface is NULL");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
+
+	if (!dp_intf->dev) {
+		dp_err("DP interface net_device is null");
+		return -EINVAL;
+	}
+
+	if (!(dp_intf->dev->flags & IFF_UP)) {
+		dp_info_rl("DP interface '%s' is not up",
+			   dp_intf->dev->name);
+		return -EAGAIN;
+	}
+
 	return validate_interface_id(dp_intf->intf_id);
 	return validate_interface_id(dp_intf->intf_id);
 }
 }
 
 
+static QDF_STATUS
+dp_intf_wait_for_task_complete(struct wlan_dp_intf *dp_intf)
+{
+	int count = DP_TASK_MAX_WAIT_CNT;
+	int r;
+
+	while (count) {
+		r = atomic_read(&dp_intf->num_active_task);
+
+		if (!r)
+			return QDF_STATUS_SUCCESS;
+
+		if (--count) {
+			dp_err_rl("Waiting for DP task to complete: %d", count);
+			qdf_sleep(DP_TASK_WAIT_TIME);
+		}
+	}
+
+	dp_err("Timed-out waiting for DP task completion");
+	return QDF_STATUS_E_TIMEOUT;
+}
+
+void dp_wait_complete_tasks(struct wlan_dp_psoc_context *dp_ctx)
+{
+	struct wlan_dp_intf *dp_intf, *dp_intf_next = NULL;
+
+	dp_for_each_intf_held_safe(dp_ctx, dp_intf, dp_intf_next) {
+		/*
+		 * If timeout happens for one interface better to bail out
+		 * instead of waiting for other intefaces task completion
+		 */
+		if (qdf_atomic_read(&dp_intf->num_active_task))
+			if (dp_intf_wait_for_task_complete(dp_intf))
+				break;
+	}
+}
+
 #ifdef CONFIG_DP_TRACE
 #ifdef CONFIG_DP_TRACE
 /**
 /**
  * dp_convert_string_to_u8_array() - used to convert string into u8 array
  * dp_convert_string_to_u8_array() - used to convert string into u8 array
@@ -297,7 +347,7 @@ void dp_set_dump_dp_trace(uint16_t cmd_type, uint16_t count)
 		qdf_dp_trace_disable_live_mode();
 		qdf_dp_trace_disable_live_mode();
 }
 }
 #else
 #else
-static void dp_trace_init(struct wlan_dp_psoc_cfg *config)
+void dp_trace_init(struct wlan_objmgr_psoc *psoc)
 {
 {
 }
 }
 
 
@@ -820,6 +870,7 @@ dp_vdev_obj_create_notification(struct wlan_objmgr_vdev *vdev, void *arg)
 	struct wlan_dp_intf *dp_intf;
 	struct wlan_dp_intf *dp_intf;
 	QDF_STATUS status = QDF_STATUS_SUCCESS;
 	QDF_STATUS status = QDF_STATUS_SUCCESS;
 	struct qdf_mac_addr *mac_addr;
 	struct qdf_mac_addr *mac_addr;
+	struct qdf_mac_addr intf_mac;
 
 
 	dp_info("DP VDEV OBJ create notification");
 	dp_info("DP VDEV OBJ create notification");
 
 
@@ -832,6 +883,14 @@ dp_vdev_obj_create_notification(struct wlan_objmgr_vdev *vdev, void *arg)
 	dp_ctx =  dp_psoc_get_priv(psoc);
 	dp_ctx =  dp_psoc_get_priv(psoc);
 	mac_addr = (struct qdf_mac_addr *)wlan_vdev_mlme_get_macaddr(vdev);
 	mac_addr = (struct qdf_mac_addr *)wlan_vdev_mlme_get_macaddr(vdev);
 
 
+	status = dp_ctx->dp_ops.dp_get_nw_intf_mac_by_vdev_mac(mac_addr,
+							       &intf_mac);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		dp_err("Failed to get intf mac:" QDF_MAC_ADDR_FMT,
+		       QDF_MAC_ADDR_REF(mac_addr));
+		return QDF_STATUS_E_INVAL;
+	}
+
 	dp_intf = dp_get_intf_by_macaddr(dp_ctx, mac_addr);
 	dp_intf = dp_get_intf_by_macaddr(dp_ctx, mac_addr);
 	if (!dp_intf) {
 	if (!dp_intf) {
 		dp_err("Failed to get dp intf mac:" QDF_MAC_ADDR_FMT,
 		dp_err("Failed to get dp intf mac:" QDF_MAC_ADDR_FMT,
@@ -842,6 +901,18 @@ dp_vdev_obj_create_notification(struct wlan_objmgr_vdev *vdev, void *arg)
 	dp_intf->device_mode = wlan_vdev_mlme_get_opmode(vdev);
 	dp_intf->device_mode = wlan_vdev_mlme_get_opmode(vdev);
 	dp_intf->intf_id = vdev->vdev_objmgr.vdev_id;
 	dp_intf->intf_id = vdev->vdev_objmgr.vdev_id;
 	dp_intf->vdev = vdev;
 	dp_intf->vdev = vdev;
+	qdf_atomic_init(&dp_intf->num_active_task);
+
+	if (dp_intf->device_mode == QDF_SAP_MODE ||
+	    dp_intf->device_mode == QDF_P2P_GO_MODE) {
+		dp_intf->sap_tx_block_mask = DP_TX_FN_CLR | DP_TX_SAP_STOP;
+
+		status = qdf_event_create(&dp_intf->qdf_sta_eap_frm_done_event);
+		if (!QDF_IS_STATUS_SUCCESS(status)) {
+			dp_err("eap frm done event init failed!!");
+			return status;
+		}
+	}
 
 
 	status = wlan_objmgr_vdev_component_obj_attach(vdev,
 	status = wlan_objmgr_vdev_component_obj_attach(vdev,
 						       WLAN_COMP_DP,
 						       WLAN_COMP_DP,
@@ -878,6 +949,21 @@ dp_vdev_obj_destroy_notification(struct wlan_objmgr_vdev *vdev, void *arg)
 	dp_nud_flush_work(dp_intf);
 	dp_nud_flush_work(dp_intf);
 	dp_mic_flush_work(dp_intf);
 	dp_mic_flush_work(dp_intf);
 
 
+	status = dp_intf_wait_for_task_complete(dp_intf);
+	if (QDF_IS_STATUS_ERROR(status))
+		return status;
+
+	if (dp_intf->device_mode == QDF_SAP_MODE ||
+	    dp_intf->device_mode == QDF_P2P_GO_MODE) {
+		status = qdf_event_destroy(&dp_intf->qdf_sta_eap_frm_done_event);
+		if (!QDF_IS_STATUS_SUCCESS(status)) {
+			dp_err("eap frm done event destroy failed!!");
+			return status;
+		}
+	}
+	qdf_mem_zero(&dp_intf->conn_info, sizeof(struct wlan_dp_conn_info));
+	dp_intf->intf_id = WLAN_UMAC_VDEV_ID_MAX;
+	dp_intf->vdev = NULL;
 	status = wlan_objmgr_vdev_component_obj_detach(vdev,
 	status = wlan_objmgr_vdev_component_obj_detach(vdev,
 						       WLAN_COMP_DP,
 						       WLAN_COMP_DP,
 						       (void *)dp_intf);
 						       (void *)dp_intf);
@@ -995,6 +1081,8 @@ dp_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc, void *arg)
 		return status;
 		return status;
 	}
 	}
 
 
+	dp_reset_all_intfs_connectivity_stats(dp_ctx);
+
 	return status;
 	return status;
 }
 }
 
 

+ 1808 - 2
components/dp/core/src/wlan_dp_txrx.c

@@ -22,18 +22,1824 @@
   *
   *
   */
   */
 
 
+#include <wlan_dp_priv.h>
+#include <wlan_dp_main.h>
 #include <wlan_dp_txrx.h>
 #include <wlan_dp_txrx.h>
 #include <qdf_types.h>
 #include <qdf_types.h>
-#include <qdf_nbuf.h>
+#include <cdp_txrx_cmn.h>
+#include <cdp_txrx_peer_ops.h>
+#include <cdp_txrx_misc.h>
+#include <cdp_txrx_flow_ctrl_v2.h>
+#include "dp_txrx.h"
+#if defined(WLAN_SUPPORT_RX_FISA)
+#include "dp_fisa_rx.h"
+#endif
+#include "nan_public_structs.h"
+#include "nan_ucfg_api.h"
+#include <wlan_cm_ucfg_api.h>
+#include <enet.h>
+#include <cds_utils.h>
+#include <wlan_dp_bus_bandwidth.h>
+#include <wlan_tdls_ucfg_api.h>
+#include <qdf_trace.h>
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+void dp_get_tx_resource(struct wlan_dp_intf *dp_intf,
+			struct qdf_mac_addr *mac_addr)
+{
+	struct wlan_dp_psoc_callbacks *dp_ops = &dp_intf->dp_ctx->dp_ops;
+
+	dp_ops->dp_get_tx_resource(dp_intf->intf_id,
+				   mac_addr);
+}
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+#ifdef FEATURE_WLAN_DIAG_SUPPORT
+/**
+ * dp_event_eapol_log() - send event to wlan diag
+ * @nbuf: Network buffer ptr
+ * @dir: direction
+ * @eapol_key_info: eapol key info
+ *
+ * Return: None
+ */
+void dp_event_eapol_log(qdf_nbuf_t nbuf, enum qdf_proto_dir dir)
+{
+	int16_t eapol_key_info;
+
+	WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
+
+	if (dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
+	    QDF_NBUF_CB_GET_PACKET_TYPE(nbuf))
+		return;
+	else if (!qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
+		return;
+
+	eapol_key_info = (uint16_t)(*(uint16_t *)
+				(nbuf->data + EAPOL_KEY_INFO_OFFSET));
+
+	wlan_diag_event.event_sub_type =
+		(dir == QDF_TX ?
+		 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
+		 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
+	wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
+				(nbuf->data + EAPOL_PACKET_TYPE_OFFSET));
+	wlan_diag_event.eapol_key_info = eapol_key_info;
+	wlan_diag_event.eapol_rate = 0;
+	qdf_mem_copy(wlan_diag_event.dest_addr,
+		     (nbuf->data + QDF_NBUF_DEST_MAC_OFFSET),
+		     sizeof(wlan_diag_event.dest_addr));
+	qdf_mem_copy(wlan_diag_event.src_addr,
+		     (nbuf->data + QDF_NBUF_SRC_MAC_OFFSET),
+		     sizeof(wlan_diag_event.src_addr));
+
+	WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
+}
+#endif /* FEATURE_WLAN_DIAG_SUPPORT */
+
+static int dp_intf_is_tx_allowed(qdf_nbuf_t nbuf,
+				 uint8_t intf_id, void *soc,
+				 uint8_t *peer_mac)
+{
+	enum ol_txrx_peer_state peer_state;
+
+	peer_state = cdp_peer_state_get(soc, intf_id, peer_mac);
+	if (qdf_likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
+		return true;
+	if (OL_TXRX_PEER_STATE_CONN == peer_state &&
+	    (qdf_ntohs(qdf_nbuf_get_protocol(nbuf)) == ETHERTYPE_PAE ||
+	     IS_DP_ETHERTYPE_WAI(nbuf)))
+		return true;
+
+	dp_info("Invalid peer state for Tx: %d", peer_state);
+	return false;
+}
+
+/**
+ * dp_tx_rx_is_dns_domain_name_match() - function to check whether dns
+ * domain name in the received nbuf matches with the tracking dns domain
+ * name or not
+ *
+ * @nbuf: Network buffer pointer
+ * @dp_intf: DP interface poniter
+ *
+ * Returns: true if matches else false
+ */
+static bool dp_tx_rx_is_dns_domain_name_match(qdf_nbuf_t nbuf,
+					      struct wlan_dp_intf *dp_intf)
+{
+	uint8_t *domain_name;
+
+	if (dp_intf->track_dns_domain_len == 0)
+		return false;
+
+	/* check OOB , is strncmp accessing data more than skb->len */
+	if ((dp_intf->track_dns_domain_len +
+	    QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET) > qdf_nbuf_len(nbuf))
+		return false;
+
+	domain_name = qdf_nbuf_get_dns_domain_name(nbuf,
+						dp_intf->track_dns_domain_len);
+	if (qdf_str_ncmp(domain_name, dp_intf->dns_payload,
+			 dp_intf->track_dns_domain_len) == 0)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dp_clear_tx_rx_connectivity_stats() - clear connectivity stats
+ * @dp_intf: pointer to DP interface
+ *
+ * Return: None
+ */
+static void dp_clear_tx_rx_connectivity_stats(struct wlan_dp_intf *dp_intf)
+{
+	dp_debug("Clear txrx connectivity stats");
+	qdf_mem_zero(&dp_intf->dp_stats.arp_stats,
+		     sizeof(dp_intf->dp_stats.arp_stats));
+	qdf_mem_zero(&dp_intf->dp_stats.dns_stats,
+		     sizeof(dp_intf->dp_stats.dns_stats));
+	qdf_mem_zero(&dp_intf->dp_stats.tcp_stats,
+		     sizeof(dp_intf->dp_stats.tcp_stats));
+	qdf_mem_zero(&dp_intf->dp_stats.icmpv4_stats,
+		     sizeof(dp_intf->dp_stats.icmpv4_stats));
+	dp_intf->pkt_type_bitmap = 0;
+	dp_intf->track_arp_ip = 0;
+	qdf_mem_zero(dp_intf->dns_payload, dp_intf->track_dns_domain_len);
+	dp_intf->track_dns_domain_len = 0;
+	dp_intf->track_src_port = 0;
+	dp_intf->track_dest_port = 0;
+	dp_intf->track_dest_ipv4 = 0;
+}
+
+void dp_reset_all_intfs_connectivity_stats(struct wlan_dp_psoc_context *dp_ctx)
+{
+	struct wlan_dp_intf *dp_intf = NULL;
+
+	qdf_spin_lock_bh(&dp_ctx->intf_list_lock);
+	for (dp_get_front_intf_no_lock(dp_ctx, &dp_intf); dp_intf;
+	     dp_get_next_intf_no_lock(dp_ctx, dp_intf, &dp_intf)) {
+		dp_clear_tx_rx_connectivity_stats(dp_intf);
+	}
+	qdf_spin_unlock_bh(&dp_ctx->intf_list_lock);
+}
+
+void
+dp_tx_rx_collect_connectivity_stats_info(qdf_nbuf_t nbuf, void *context,
+		enum connectivity_stats_pkt_status action, uint8_t *pkt_type)
+{
+	uint32_t pkt_type_bitmap;
+	struct wlan_dp_intf *dp_intf =  (struct  wlan_dp_intf *)context;
+
+	/* ARP tracking is done already. */
+	pkt_type_bitmap = dp_intf->pkt_type_bitmap;
+
+	pkt_type_bitmap &=  ~dp_intf->dp_ctx->arp_connectivity_map;
+
+	if (!pkt_type_bitmap)
+		return;
+
+	switch (action) {
+	case PKT_TYPE_REQ:
+	case PKT_TYPE_TX_HOST_FW_SENT:
+		if (qdf_nbuf_is_icmp_pkt(nbuf)) {
+			if (qdf_nbuf_data_is_icmpv4_req(nbuf) &&
+			    dp_intf->track_dest_ipv4 ==
+			    qdf_nbuf_get_icmpv4_tgt_ip(nbuf)) {
+				*pkt_type = DP_CONNECTIVITY_CHECK_SET_ICMPV4;
+				if (action == PKT_TYPE_REQ) {
+					++dp_intf->dp_stats.icmpv4_stats.
+							tx_icmpv4_req_count;
+					dp_info("ICMPv4 Req packet");
+				} else
+					/* host receives tx completion */
+					++dp_intf->dp_stats.icmpv4_stats.
+						tx_host_fw_sent;
+			}
+		} else if (qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
+			if (qdf_nbuf_data_is_tcp_syn(nbuf) &&
+			    dp_intf->track_dest_port ==
+			    qdf_nbuf_data_get_tcp_dst_port(nbuf)) {
+				*pkt_type = DP_CONNECTIVITY_CHECK_SET_TCP_SYN;
+				if (action == PKT_TYPE_REQ) {
+					++dp_intf->dp_stats.tcp_stats.
+							tx_tcp_syn_count;
+					dp_info("TCP Syn packet");
+				} else {
+					/* host receives tx completion */
+					++dp_intf->dp_stats.tcp_stats.
+							tx_tcp_syn_host_fw_sent;
+				}
+			} else if ((dp_intf->dp_stats.tcp_stats.
+				    is_tcp_syn_ack_rcv || dp_intf->dp_stats.
+					tcp_stats.is_tcp_ack_sent) &&
+				   qdf_nbuf_data_is_tcp_ack(nbuf) &&
+				   (dp_intf->track_dest_port ==
+				    qdf_nbuf_data_get_tcp_dst_port(nbuf))) {
+				*pkt_type = DP_CONNECTIVITY_CHECK_SET_TCP_ACK;
+				if (action == PKT_TYPE_REQ &&
+					dp_intf->dp_stats.tcp_stats.
+							is_tcp_syn_ack_rcv) {
+					++dp_intf->dp_stats.tcp_stats.
+							tx_tcp_ack_count;
+					dp_intf->dp_stats.tcp_stats.
+						is_tcp_syn_ack_rcv = false;
+					dp_intf->dp_stats.tcp_stats.
+						is_tcp_ack_sent = true;
+					dp_info("TCP Ack packet");
+				} else if (action == PKT_TYPE_TX_HOST_FW_SENT &&
+					dp_intf->dp_stats.tcp_stats.
+							is_tcp_ack_sent) {
+					/* host receives tx completion */
+					++dp_intf->dp_stats.tcp_stats.
+							tx_tcp_ack_host_fw_sent;
+					dp_intf->dp_stats.tcp_stats.
+							is_tcp_ack_sent = false;
+				}
+			}
+		} else if (qdf_nbuf_is_ipv4_udp_pkt(nbuf)) {
+			if (qdf_nbuf_data_is_dns_query(nbuf) &&
+			    dp_tx_rx_is_dns_domain_name_match(nbuf, dp_intf)) {
+				*pkt_type = DP_CONNECTIVITY_CHECK_SET_DNS;
+				if (action == PKT_TYPE_REQ) {
+					++dp_intf->dp_stats.dns_stats.
+							tx_dns_req_count;
+					dp_info("DNS query packet");
+				} else
+					/* host receives tx completion */
+					++dp_intf->dp_stats.dns_stats.
+								tx_host_fw_sent;
+			}
+		}
+		break;
+
+	case PKT_TYPE_RSP:
+		if (qdf_nbuf_is_icmp_pkt(nbuf)) {
+			if (qdf_nbuf_data_is_icmpv4_rsp(nbuf) &&
+			    (dp_intf->track_dest_ipv4 ==
+					qdf_nbuf_get_icmpv4_src_ip(nbuf))) {
+				++dp_intf->dp_stats.icmpv4_stats.
+							rx_icmpv4_rsp_count;
+				*pkt_type =
+				DP_CONNECTIVITY_CHECK_SET_ICMPV4;
+				dp_info("ICMPv4 Res packet");
+			}
+		} else if (qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
+			if (qdf_nbuf_data_is_tcp_syn_ack(nbuf) &&
+			    (dp_intf->track_dest_port ==
+					qdf_nbuf_data_get_tcp_src_port(nbuf))) {
+				++dp_intf->dp_stats.tcp_stats.
+							rx_tcp_syn_ack_count;
+				dp_intf->dp_stats.tcp_stats.
+					is_tcp_syn_ack_rcv = true;
+				*pkt_type =
+				DP_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK;
+				dp_info("TCP Syn ack packet");
+			}
+		} else if (qdf_nbuf_is_ipv4_udp_pkt(nbuf)) {
+			if (qdf_nbuf_data_is_dns_response(nbuf) &&
+			    dp_tx_rx_is_dns_domain_name_match(nbuf, dp_intf)) {
+				++dp_intf->dp_stats.dns_stats.
+							rx_dns_rsp_count;
+				*pkt_type = DP_CONNECTIVITY_CHECK_SET_DNS;
+				dp_info("DNS response packet");
+			}
+		}
+		break;
+
+	case PKT_TYPE_TX_DROPPED:
+		switch (*pkt_type) {
+		case DP_CONNECTIVITY_CHECK_SET_ICMPV4:
+			++dp_intf->dp_stats.icmpv4_stats.tx_dropped;
+			dp_info("ICMPv4 Req packet dropped");
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_TCP_SYN:
+			++dp_intf->dp_stats.tcp_stats.tx_tcp_syn_dropped;
+			dp_info("TCP syn packet dropped");
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_TCP_ACK:
+			++dp_intf->dp_stats.tcp_stats.tx_tcp_ack_dropped;
+			dp_info("TCP ack packet dropped");
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_DNS:
+			++dp_intf->dp_stats.dns_stats.tx_dropped;
+			dp_info("DNS query packet dropped");
+			break;
+		default:
+			break;
+		}
+		break;
+	case PKT_TYPE_RX_DELIVERED:
+		switch (*pkt_type) {
+		case DP_CONNECTIVITY_CHECK_SET_ICMPV4:
+			++dp_intf->dp_stats.icmpv4_stats.rx_delivered;
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
+			++dp_intf->dp_stats.tcp_stats.rx_delivered;
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_DNS:
+			++dp_intf->dp_stats.dns_stats.rx_delivered;
+			break;
+		default:
+			break;
+		}
+		break;
+	case PKT_TYPE_RX_REFUSED:
+		switch (*pkt_type) {
+		case DP_CONNECTIVITY_CHECK_SET_ICMPV4:
+			++dp_intf->dp_stats.icmpv4_stats.rx_refused;
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
+			++dp_intf->dp_stats.tcp_stats.rx_refused;
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_DNS:
+			++dp_intf->dp_stats.dns_stats.rx_refused;
+			break;
+		default:
+			break;
+		}
+		break;
+	case PKT_TYPE_TX_ACK_CNT:
+		switch (*pkt_type) {
+		case DP_CONNECTIVITY_CHECK_SET_ICMPV4:
+			++dp_intf->dp_stats.icmpv4_stats.tx_ack_cnt;
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_TCP_SYN:
+			++dp_intf->dp_stats.tcp_stats.tx_tcp_syn_ack_cnt;
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_TCP_ACK:
+			++dp_intf->dp_stats.tcp_stats.tx_tcp_ack_ack_cnt;
+			break;
+		case DP_CONNECTIVITY_CHECK_SET_DNS:
+			++dp_intf->dp_stats.dns_stats.tx_ack_cnt;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * dp_get_transmit_mac_addr() - Get the mac address to validate the xmit
+ * @dp_intf: DP interface
+ * @nbuf: The network buffer
+ * @mac_addr_tx_allowed: The mac address to be filled
+ *
+ * Return: None
+ */
+static
+void dp_get_transmit_mac_addr(struct wlan_dp_intf *dp_intf,
+			      qdf_nbuf_t nbuf,
+			      struct qdf_mac_addr *mac_addr_tx_allowed)
+{
+	bool is_mc_bc_addr = false;
+	enum nan_datapath_state state;
+
+	switch (dp_intf->device_mode) {
+	case QDF_NDI_MODE:
+		state = ucfg_nan_get_ndi_state(dp_intf->vdev);
+		if (state == NAN_DATA_NDI_CREATED_STATE ||
+		    state == NAN_DATA_CONNECTED_STATE ||
+		    state == NAN_DATA_CONNECTING_STATE ||
+		    state == NAN_DATA_PEER_CREATE_STATE) {
+			if (QDF_NBUF_CB_GET_IS_BCAST(nbuf) ||
+			    QDF_NBUF_CB_GET_IS_MCAST(nbuf))
+				is_mc_bc_addr = true;
+			if (is_mc_bc_addr)
+				qdf_copy_macaddr(mac_addr_tx_allowed,
+						 &dp_intf->mac_addr);
+			else
+				qdf_copy_macaddr(mac_addr_tx_allowed,
+				(struct qdf_mac_addr *)qdf_nbuf_data(nbuf));
+		}
+		break;
+	case QDF_STA_MODE:
+	case QDF_P2P_CLIENT_MODE:
+		if (ucfg_cm_is_vdev_active(dp_intf->vdev))
+			qdf_copy_macaddr(mac_addr_tx_allowed,
+					 &dp_intf->conn_info.bssid);
+		break;
+	default:
+		break;
+	}
+}
+
+#ifdef HANDLE_BROADCAST_EAPOL_TX_FRAME
+/**
+ * dp_fix_broadcast_eapol() - Fix broadcast eapol
+ * @dp_intf: pointer to dp interface
+ * @nbuf: pointer to nbuf
+ *
+ * Override DA of broadcast eapol with bssid addr.
+ *
+ * Return: None
+ */
+static void dp_fix_broadcast_eapol(struct wlan_dp_intf *dp_intf,
+				   qdf_nbuf_t nbuf)
+{
+	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
+	unsigned char *ap_mac_addr =
+		&dp_intf->conn_info.bssid.bytes[0];
+
+	if (qdf_unlikely((QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
+			  QDF_NBUF_CB_PACKET_TYPE_EAPOL) &&
+			 QDF_NBUF_CB_GET_IS_BCAST(nbuf))) {
+		dp_debug("SA: "QDF_MAC_ADDR_FMT " override DA: "QDF_MAC_ADDR_FMT " with AP mac address "QDF_MAC_ADDR_FMT,
+			  QDF_MAC_ADDR_REF(&eth_hdr->ether_shost[0]),
+			  QDF_MAC_ADDR_REF(&eth_hdr->ether_dhost[0]),
+			  QDF_MAC_ADDR_REF(ap_mac_addr));
+
+		qdf_mem_copy(&eth_hdr->ether_dhost, ap_mac_addr,
+			     QDF_MAC_ADDR_SIZE);
+	}
+}
+#else
+static void dp_fix_broadcast_eapol(struct wlan_dp_intf *dp_intf,
+				   qdf_nbuf_t nbuf)
+{
+}
+#endif /* HANDLE_BROADCAST_EAPOL_TX_FRAME */
+
+#ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
+/**
+ * dp_mark_icmp_req_to_fw() - Mark the ICMP request at a certain time interval
+ *			       to be sent to the FW.
+ * @dp_ctx: Global dp context
+ * @nbuf: packet to be transmitted
+ *
+ * This func sets the "to_fw" flag in the packet context block, if the
+ * current packet is an ICMP request packet. This marking is done at a
+ * specific time interval, unless the INI value indicates to disable/enable
+ * this for all frames.
+ *
+ * Return: none
+ */
+static void dp_mark_icmp_req_to_fw(struct wlan_dp_psoc_context *dp_ctx,
+				   qdf_nbuf_t nbuf)
+{
+	uint64_t curr_time, time_delta;
+	int time_interval_ms = dp_ctx->dp_cfg.icmp_req_to_fw_mark_interval;
+	static uint64_t prev_marked_icmp_time;
+
+	if (!dp_ctx->dp_cfg.icmp_req_to_fw_mark_interval)
+		return;
+
+	if ((qdf_nbuf_get_icmp_subtype(nbuf) != QDF_PROTO_ICMP_REQ) &&
+	    (qdf_nbuf_get_icmpv6_subtype(nbuf) != QDF_PROTO_ICMPV6_REQ))
+		return;
+
+	/* Mark all ICMP request to be sent to FW */
+	if (time_interval_ms == WLAN_CFG_ICMP_REQ_TO_FW_MARK_ALL)
+		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
+
+	curr_time = qdf_get_log_timestamp();
+	time_delta = curr_time - prev_marked_icmp_time;
+	if (time_delta >= (time_interval_ms *
+			   QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100)) {
+		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
+		prev_marked_icmp_time = curr_time;
+	}
+}
+#else
+static void dp_mark_icmp_req_to_fw(struct wlan_dp_psoc_context *dp_ctx,
+				   qdf_nbuf_t nbuf)
+{
+}
+#endif
+
+#ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
+void wlan_dp_pkt_add_timestamp(struct wlan_dp_intf *dp_intf,
+			       enum qdf_pkt_timestamp_index index,
+			       qdf_nbuf_t nbuf)
+{
+	struct wlan_dp_psoc_callbacks *dp_ops;
+
+	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
+		uint64_t tsf_time;
+
+		dp_ops = &dp_intf->dp_ctx->dp_ops;
+		dp_ops->dp_get_tsf_time(dp_intf->intf_id,
+					qdf_get_log_timestamp(),
+					&tsf_time);
+		qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
+	}
+}
+#endif
 
 
 QDF_STATUS
 QDF_STATUS
 dp_start_xmit(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf)
 dp_start_xmit(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf)
 {
 {
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+	struct dp_tx_rx_stats *stats;
+	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
+	bool is_arp = false;
+	bool is_eapol = false;
+	bool is_dhcp = false;
+	uint8_t pkt_type;
+	struct qdf_mac_addr mac_addr_tx_allowed = QDF_MAC_ADDR_ZERO_INIT;
+	int cpu = qdf_get_smp_processor_id();
+
+	stats = &dp_intf->dp_stats.tx_rx_stats;
+	++stats->per_cpu[cpu].tx_called;
+	stats->cont_txtimeout_cnt = 0;
+
+	if (qdf_unlikely(cds_is_driver_transitioning())) {
+		dp_err_rl("driver is transitioning, drop pkt");
+		goto drop_pkt;
+	}
+
+	if (qdf_unlikely(dp_ctx->wlan_suspended)) {
+		dp_err_rl("Device is system suspended, drop pkt");
+		goto drop_pkt;
+	}
+
+	QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(nbuf) = 1;
+
+	pkt_type = QDF_NBUF_CB_GET_PACKET_TYPE(nbuf);
+
+	if (pkt_type == QDF_NBUF_CB_PACKET_TYPE_ARP) {
+		if (qdf_nbuf_data_is_arp_req(nbuf) &&
+		    (dp_intf->track_arp_ip == qdf_nbuf_get_arp_tgt_ip(nbuf))) {
+			is_arp = true;
+			++dp_intf->dp_stats.arp_stats.tx_arp_req_count;
+			dp_info("ARP packet");
+		}
+	} else if (pkt_type == QDF_NBUF_CB_PACKET_TYPE_EAPOL) {
+		subtype = qdf_nbuf_get_eapol_subtype(nbuf);
+		if (subtype == QDF_PROTO_EAPOL_M2) {
+			++dp_intf->dp_stats.eapol_stats.eapol_m2_count;
+			is_eapol = true;
+		} else if (subtype == QDF_PROTO_EAPOL_M4) {
+			++dp_intf->dp_stats.eapol_stats.eapol_m4_count;
+			is_eapol = true;
+		}
+	} else if (pkt_type == QDF_NBUF_CB_PACKET_TYPE_DHCP) {
+		subtype = qdf_nbuf_get_dhcp_subtype(nbuf);
+		if (subtype == QDF_PROTO_DHCP_DISCOVER) {
+			++dp_intf->dp_stats.dhcp_stats.dhcp_dis_count;
+			is_dhcp = true;
+		} else if (subtype == QDF_PROTO_DHCP_REQUEST) {
+			++dp_intf->dp_stats.dhcp_stats.dhcp_req_count;
+			is_dhcp = true;
+		}
+	} else if ((pkt_type == QDF_NBUF_CB_PACKET_TYPE_ICMP) ||
+		   (pkt_type == QDF_NBUF_CB_PACKET_TYPE_ICMPv6)) {
+		dp_mark_icmp_req_to_fw(dp_ctx, nbuf);
+	}
+
+	wlan_dp_pkt_add_timestamp(dp_intf, QDF_PKT_TX_DRIVER_ENTRY, nbuf);
+
+	/* track connectivity stats */
+	if (dp_intf->pkt_type_bitmap)
+		dp_tx_rx_collect_connectivity_stats_info(nbuf, dp_intf,
+							 PKT_TYPE_REQ,
+							 &pkt_type);
+
+	dp_get_transmit_mac_addr(dp_intf, nbuf, &mac_addr_tx_allowed);
+	if (qdf_is_macaddr_zero(&mac_addr_tx_allowed)) {
+		dp_info_rl("tx not allowed, transmit operation suspended");
+		goto drop_pkt;
+	}
+
+	dp_get_tx_resource(dp_intf, &mac_addr_tx_allowed);
+
+	if (!qdf_nbuf_ipa_owned_get(nbuf)) {
+		nbuf = dp_nbuf_orphan(dp_intf, nbuf);
+		if (!nbuf)
+			goto drop_pkt_accounting;
+	}
+
+	/*
+	 * Add SKB to internal tracking table before further processing
+	 * in WLAN driver.
+	 */
+	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
+
+	QDF_NET_DEV_STATS_TX_BYTES(&dp_intf->stats) += qdf_nbuf_len(nbuf);
+
+	if (qdf_nbuf_is_tso(nbuf)) {
+		QDF_NET_DEV_STATS_TX_PKTS(&dp_intf->stats) +=
+			qdf_nbuf_get_tso_num_seg(nbuf);
+	} else {
+		QDF_NET_DEV_STATS_INC_TX_PKTS(&dp_intf->stats);
+		dp_ctx->no_tx_offload_pkt_cnt++;
+	}
+
+	dp_event_eapol_log(nbuf, QDF_TX);
+	QDF_NBUF_CB_TX_PACKET_TRACK(nbuf) = QDF_NBUF_TX_PKT_DATA_TRACK;
+	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_DP);
+
+	qdf_dp_trace_set_track(nbuf, QDF_TX);
+
+	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_TX_PACKET_PTR_RECORD,
+			     QDF_TRACE_DEFAULT_PDEV_ID,
+			     qdf_nbuf_data_addr(nbuf),
+			     sizeof(qdf_nbuf_data(nbuf)),
+			     QDF_TX));
+
+	if (!dp_intf_is_tx_allowed(nbuf, dp_intf->intf_id, soc,
+				   mac_addr_tx_allowed.bytes)) {
+		dp_info("Tx not allowed for sta:" QDF_MAC_ADDR_FMT,
+			QDF_MAC_ADDR_REF(mac_addr_tx_allowed.bytes));
+		goto drop_pkt_and_release_nbuf;
+	}
+
+	/* check whether need to linearize nbuf, like non-linear udp data */
+	if (dp_nbuf_nontso_linearize(nbuf) != QDF_STATUS_SUCCESS) {
+		dp_err(" nbuf %pK linearize failed. drop the pkt", nbuf);
+		goto drop_pkt_and_release_nbuf;
+	}
+
+	/*
+	 * If a transmit function is not registered, drop packet
+	 */
+	if (!dp_intf->tx_fn) {
+		dp_err("TX function not registered by the data path");
+		goto drop_pkt_and_release_nbuf;
+	}
+
+	dp_fix_broadcast_eapol(dp_intf, nbuf);
+
+	if (dp_intf->tx_fn(soc, dp_intf->intf_id, nbuf)) {
+		dp_debug("Failed to send packet from adapter %u",
+			 dp_intf->intf_id);
+		goto drop_pkt_and_release_nbuf;
+	}
+
+	return QDF_STATUS_SUCCESS;
+
+drop_pkt_and_release_nbuf:
+	qdf_net_buf_debug_release_skb(nbuf);
+drop_pkt:
+
+	/* track connectivity stats */
+	if (dp_intf->pkt_type_bitmap)
+		dp_tx_rx_collect_connectivity_stats_info(nbuf, dp_intf,
+							 PKT_TYPE_TX_DROPPED,
+							 &pkt_type);
+	qdf_dp_trace_data_pkt(nbuf, QDF_TRACE_DEFAULT_PDEV_ID,
+			      QDF_DP_TRACE_DROP_PACKET_RECORD, 0,
+			      QDF_TX);
+	qdf_nbuf_kfree(nbuf);
+
+drop_pkt_accounting:
+
+	QDF_NET_DEV_STATS_INC_TX_DROPEED(&dp_intf->stats);
+	++stats->per_cpu[cpu].tx_dropped;
+	if (is_arp) {
+		++dp_intf->dp_stats.arp_stats.tx_dropped;
+		dp_info_rl("ARP packet dropped");
+	} else if (is_eapol) {
+		++dp_intf->dp_stats.eapol_stats.
+				tx_dropped[subtype - QDF_PROTO_EAPOL_M1];
+	} else if (is_dhcp) {
+		++dp_intf->dp_stats.dhcp_stats.
+				tx_dropped[subtype - QDF_PROTO_DHCP_DISCOVER];
+	}
+
+	return QDF_STATUS_E_FAILURE;
+}
+
+void dp_tx_timeout(struct wlan_dp_intf *dp_intf)
+{
+	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+	u64 diff_time;
+
+	cdp_dump_flow_pool_info(soc);
+
+	++dp_intf->dp_stats.tx_rx_stats.tx_timeout_cnt;
+	++dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt;
+
+	diff_time = qdf_system_ticks() -
+		dp_intf->dp_stats.tx_rx_stats.last_txtimeout;
+
+	if ((dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt > 1) &&
+	    (diff_time > (DP_TX_TIMEOUT * 2))) {
+		/*
+		 * In case when there is no traffic is running, it may
+		 * possible tx time-out may once happen and later system
+		 * recovered then continuous tx timeout count has to be
+		 * reset as it is gets modified only when traffic is running.
+		 * If over a period of time if this count reaches to threshold
+		 * then host triggers a false subsystem restart. In genuine
+		 * time out case OS will call the tx time-out back to back
+		 * at interval of DP_TX_TIMEOUT. Here now check if previous
+		 * TX TIME out has occurred more than twice of DP_TX_TIMEOUT
+		 * back then host may recovered here from data stall.
+		 */
+		dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
+		dp_info("Reset continuous tx timeout stat");
+	}
+
+	dp_intf->dp_stats.tx_rx_stats.last_txtimeout = qdf_system_ticks();
+
+	if (dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt >
+	    DP_TX_STALL_THRESHOLD) {
+		dp_err("Data stall due to continuous TX timeouts");
+		dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
+
+		if (cdp_cfg_get(soc, cfg_dp_enable_data_stall))
+			cdp_post_data_stall_event(soc,
+					  DATA_STALL_LOG_INDICATOR_HOST_DRIVER,
+					  DATA_STALL_LOG_HOST_STA_TX_TIMEOUT,
+					  OL_TXRX_PDEV_ID, 0xFF,
+					  DATA_STALL_LOG_RECOVERY_TRIGGER_PDR);
+	}
+}
+
+void dp_sta_notify_tx_comp_cb(qdf_nbuf_t nbuf, void *ctx, uint16_t flag)
+{
+	struct wlan_dp_intf *dp_intf = ctx;
+	enum qdf_proto_subtype subtype;
+	struct qdf_mac_addr *dest_mac_addr;
+	QDF_STATUS status;
+
+	if (is_dp_intf_valid(dp_intf))
+		return;
+
+	dest_mac_addr = (struct qdf_mac_addr *)qdf_nbuf_data(nbuf);
+
+	switch (QDF_NBUF_CB_GET_PACKET_TYPE(nbuf)) {
+	case QDF_NBUF_CB_PACKET_TYPE_ARP:
+		if (flag & BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC))
+			++dp_intf->dp_stats.arp_stats.
+				tx_host_fw_sent;
+		if (flag & BIT(QDF_TX_RX_STATUS_OK))
+			++dp_intf->dp_stats.arp_stats.tx_ack_cnt;
+		break;
+	case QDF_NBUF_CB_PACKET_TYPE_EAPOL:
+		subtype = qdf_nbuf_get_eapol_subtype(nbuf);
+		if (!(flag & BIT(QDF_TX_RX_STATUS_OK)) &&
+		    subtype != QDF_PROTO_INVALID)
+			++dp_intf->dp_stats.eapol_stats.
+				tx_noack_cnt[subtype - QDF_PROTO_EAPOL_M1];
+		break;
+	case QDF_NBUF_CB_PACKET_TYPE_DHCP:
+		subtype = qdf_nbuf_get_dhcp_subtype(nbuf);
+		if (!(flag & BIT(QDF_TX_RX_STATUS_OK)) &&
+		    subtype != QDF_PROTO_INVALID &&
+		    subtype <= QDF_PROTO_DHCP_ACK)
+			++dp_intf->dp_stats.dhcp_stats.
+				tx_noack_cnt[subtype - QDF_PROTO_DHCP_DISCOVER];
+		break;
+	default:
+		break;
+	}
+
+	/* Since it is TDLS call took TDLS vdev ref*/
+	status = wlan_objmgr_vdev_try_get_ref(dp_intf->vdev, WLAN_TDLS_SB_ID);
+	if (QDF_IS_STATUS_SUCCESS(status)) {
+		ucfg_tdls_update_tx_pkt_cnt(dp_intf->vdev, dest_mac_addr);
+		wlan_objmgr_vdev_release_ref(dp_intf->vdev, WLAN_TDLS_SB_ID);
+	}
+}
+
+#ifdef FEATURE_MONITOR_MODE_SUPPORT
+QDF_STATUS dp_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
+{
+	struct wlan_dp_intf *dp_intf;
+	QDF_STATUS status;
+	qdf_nbuf_t nbuf;
+	qdf_nbuf_t nbuf_next;
+	unsigned int cpu_index;
+	struct dp_tx_rx_stats *stats;
+
+	/* Sanity check on inputs */
+	if ((!context) || (!rxbuf)) {
+		dp_err("Null params being passed");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	dp_intf = (struct wlan_dp_intf *)context;
+
+	cpu_index = qdf_get_cpu();
+	stats = &dp_intf->dp_stats.tx_rx_stats;
+
+	/* walk the chain until all are processed */
+	nbuf =  rxbuf;
+	while (nbuf) {
+		nbuf_next = qdf_nbuf_next(nbuf);
+		qdf_nbuf_set_dev(nbuf, dp_intf->dev);
+
+		++stats->per_cpu[cpu_index].rx_packets;
+		QDF_NET_DEV_STATS_INC_RX_PKTS(&dp_intf->stats);
+		QDF_NET_DEV_STATS_RX_BYTES(&dp_intf->stats) +=
+			qdf_nbuf_len(nbuf);
+
+		/* Remove SKB from internal tracking table before submitting
+		 * it to stack
+		 */
+		qdf_net_buf_debug_release_skb(nbuf);
+
+		/*
+		 * If this is not a last packet on the chain
+		 * Just put packet into backlog queue, not scheduling RX sirq
+		 */
+		if (qdf_nbuf_next(nbuf)) {
+			status = dp_intf->dp_ctx->dp_ops.dp_nbuf_push_pkt(nbuf,
+							DP_NBUF_PUSH_SIMPLE);
+		} else {
+			/*
+			 * This is the last packet on the chain
+			 * Scheduling rx sirq
+			 */
+			status = dp_intf->dp_ctx->dp_ops.dp_nbuf_push_pkt(nbuf,
+							DP_NBUF_PUSH_NAPI);
+		}
+
+		if (QDF_IS_STATUS_SUCCESS(status))
+			++stats->per_cpu[cpu_index].rx_delivered;
+		else
+			++stats->per_cpu[cpu_index].rx_refused;
+
+		nbuf = nbuf_next;
+	}
+
 	return QDF_STATUS_SUCCESS;
 	return QDF_STATUS_SUCCESS;
 }
 }
 
 
+void dp_monitor_set_rx_monitor_cb(struct ol_txrx_ops *txrx,
+				  ol_txrx_rx_mon_fp rx_monitor_cb)
+{
+	txrx->rx.mon = rx_monitor_cb;
+}
+
+void dp_rx_monitor_callback(ol_osif_vdev_handle context,
+			    qdf_nbuf_t rxbuf,
+			    void *rx_status)
+{
+	dp_mon_rx_packet_cbk(context, rxbuf);
+}
+#endif
+
+/**
+ * dp_is_rx_wake_lock_needed() - check if wake lock is needed
+ * @nbuf: pointer to sk_buff
+ *
+ * RX wake lock is needed for:
+ * 1) Unicast data packet OR
+ * 2) Local ARP data packet
+ *
+ * Return: true if wake lock is needed or false otherwise.
+ */
+static bool dp_is_rx_wake_lock_needed(qdf_nbuf_t nbuf)
+{
+	if ((!qdf_nbuf_pkt_type_is_mcast(nbuf) &&
+	     !qdf_nbuf_pkt_type_is_bcast(nbuf)) ||
+	    qdf_nbuf_is_arp_local(nbuf))
+		return true;
+
+	return false;
+}
+
+#ifdef RECEIVE_OFFLOAD
+/**
+ * dp_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO
+ * @dp_ctx: pointer to DP psoc Context
+ *
+ * Return: None
+ */
+static void dp_resolve_rx_ol_mode(struct wlan_dp_psoc_context *dp_ctx)
+{
+	void *soc;
+
+	soc = cds_get_context(QDF_MODULE_ID_SOC);
+
+	if (!(cdp_cfg_get(soc, cfg_dp_lro_enable) ^
+	    cdp_cfg_get(soc, cfg_dp_gro_enable))) {
+		cdp_cfg_get(soc, cfg_dp_lro_enable) &&
+			cdp_cfg_get(soc, cfg_dp_gro_enable) ?
+		dp_info("Can't enable both LRO and GRO, disabling Rx offload"):
+		dp_info("LRO and GRO both are disabled");
+		dp_ctx->ol_enable = 0;
+	} else if (cdp_cfg_get(soc, cfg_dp_lro_enable)) {
+		dp_info("Rx offload LRO is enabled");
+		dp_ctx->ol_enable = CFG_LRO_ENABLED;
+	} else {
+		dp_info("Rx offload: GRO is enabled");
+		dp_ctx->ol_enable = CFG_GRO_ENABLED;
+	}
+}
+
+#ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
+/**
+ * dp_gro_rx_bh_disable() - GRO RX/flush function.
+ * @napi_to_use: napi to be used to give packets to the stack, gro flush
+ * @nbuf: pointer to n/w buff
+ *
+ * Function calls napi_gro_receive for the skb. If the skb indicates that a
+ * flush needs to be done (set by the lower DP layer), the function also calls
+ * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
+ * napi_gro__ calls.
+ *
+ * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
+ *	   QDF error code.
+ */
+static QDF_STATUS dp_gro_rx_bh_disable(struct wlan_dp_intf *dp_intf,
+				       qdf_napi_struct *napi_to_use,
+				       qdf_nbuf_t nbuf)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+	uint32_t rx_aggregation;
+	uint8_t rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
+	uint8_t low_tput_force_flush = 0;
+
+	rx_aggregation = qdf_atomic_read(&dp_ctx->dp_agg_param.rx_aggregation);
+
+	if (dp_get_current_throughput_level(dp_ctx) == PLD_BUS_WIDTH_IDLE ||
+	    !rx_aggregation || dp_intf->gro_disallowed[rx_ctx_id]) {
+		status = dp_ctx->dp_ops.dp_rx_napi_gro_flush(napi_to_use, nbuf,
+						   &low_tput_force_flush);
+		if (!low_tput_force_flush)
+			dp_intf->dp_stats.tx_rx_stats.
+					rx_gro_low_tput_flush++;
+		if (!rx_aggregation)
+			dp_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] = 1;
+		if (dp_intf->gro_disallowed[rx_ctx_id])
+			dp_intf->gro_flushed[rx_ctx_id] = 1;
+	} else {
+		status = dp_ctx->dp_ops.dp_rx_napi_gro_receive(napi_to_use,
+							      nbuf);
+	}
+
+	return status;
+}
+
+#else /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
+
+/**
+ * dp_gro_rx_bh_disable() - GRO RX/flush function.
+ * @napi_to_use: napi to be used to give packets to the stack, gro flush
+ * @nbuf: pointer to nbuff
+ *
+ * Function calls napi_gro_receive for the skb. If the skb indicates that a
+ * flush needs to be done (set by the lower DP layer), the function also calls
+ * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
+ * napi_gro__ calls.
+ *
+ * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
+ *	   QDF error code.
+ */
+
+static QDF_STATUS dp_gro_rx_bh_disable(struct wlan_dp_intf *dp_intf,
+				       qdf_napi_struct *napi_to_use,
+				       qdf_nbuf_t nbuf)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+	uint8_t low_tput_force_flush = 0;
+
+	if (dp_get_current_throughput_level(dp_ctx) == PLD_BUS_WIDTH_IDLE) {
+		status = dp_ctx->dp_ops.dp_rx_napi_gro_flush(napi_to_use, nbuf,
+							&low_tput_force_flush);
+		if (!low_tput_force_flush)
+			dp_intf->dp_stats.tx_rx_stats.
+					rx_gro_low_tput_flush++;
+	} else {
+		status = dp_ctx->dp_ops.dp_rx_napi_gro_receive(napi_to_use,
+							      nbuf);
+	}
+
+	return status;
+}
+#endif /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
+
+#if defined(FEATURE_LRO)
+/**
+ * dp_lro_rx() - Handle Rx processing via LRO
+ * @dp_intf: pointer to DP interface
+ * @nbuf: pointer to n/w buff
+ *
+ * Return: QDF_STATUS_SUCCESS if processed via LRO or non zero return code
+ */
+static inline QDF_STATUS
+dp_lro_rx(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+
+	return dp_ctx->dp_ops.dp_lro_rx_cb(dp_intf->dev, nbuf);
+}
+
+/**
+ * dp_is_lro_enabled() - Is LRO enabled
+ * @dp_ctx: DP interface
+ *
+ * This function checks if LRO is enabled in DP context.
+ *
+ * Return: 0 - success, < 0 - failure
+ */
+static inline QDF_STATUS
+dp_is_lro_enabled(struct wlan_dp_psoc_context *dp_ctx)
+{
+	if (dp_ctx->ol_enable != CFG_LRO_ENABLED)
+		return QDF_STATUS_E_NOSUPPORT;
+}
+
+QDF_STATUS dp_lro_set_reset(struct wlan_dp_intf *dp_intf, uint8_t enable_flag)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+
+	if ((dp_ctx->ol_enable != CFG_LRO_ENABLED) ||
+	    (dp_intf->device_mode != QDF_STA_MODE)) {
+		dp_info("LRO is already Disabled");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	if (enable_flag) {
+		qdf_atomic_set(&dp_ctx->vendor_disable_lro_flag, 0);
+	} else {
+		/* Disable LRO, Enable tcpdelack*/
+		qdf_atomic_set(&dp_ctx->vendor_disable_lro_flag, 1);
+		dp_info("LRO Disabled");
+
+		if (dp_ctx->dp_cfg.enable_tcp_delack) {
+			struct wlan_rx_tp_data rx_tp_data;
+
+			dp_info("Enable TCP delack as LRO is disabled");
+			rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
+			rx_tp_data.level =
+				DP_BUS_BW_CFG(dp_ctx->dp_cfg.cur_rx_level);
+			wlan_dp_update_tcp_rx_param(dp_ctx, &rx_tp_data);
+			dp_ctx->en_tcp_delack_no_lro = 1;
+		}
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static inline
+QDF_STATUS dp_lro_rx(struct wlan_dp_intf *dp_intf,
+		     qdf_nbuf_t nbuf)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+
+static inline
+int dp_is_lro_enabled(struct wlan_dp_psoc_context *dp_ctx)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif /* FEATURE_LRO */
+
+/**
+ * dp_gro_rx_thread() - Handle Rx processing via GRO for DP thread
+ * @dp_intf: pointer to DP interface
+ * @nbuf: pointer to n/w buff
+ *
+ * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
+ */
+static
+QDF_STATUS dp_gro_rx_thread(struct wlan_dp_intf *dp_intf,
+			    qdf_nbuf_t nbuf)
+{
+	qdf_napi_struct *napi_to_use = NULL;
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+
+	if (!dp_intf->dp_ctx->enable_dp_rx_threads) {
+		dp_err_rl("gro not supported without DP RX thread!");
+		return status;
+	}
+
+	napi_to_use =
+		(qdf_napi_struct *)dp_rx_get_napi_context(cds_get_context(QDF_MODULE_ID_SOC),
+				       QDF_NBUF_CB_RX_CTX_ID(nbuf));
+
+	if (!napi_to_use) {
+		dp_err_rl("no napi to use for GRO!");
+		return status;
+	}
+
+	return dp_gro_rx_bh_disable(dp_intf, napi_to_use, nbuf);
+}
+
+/**
+ * dp_gro_rx_legacy() - Handle Rx processing via GRO for ihelium based targets
+ * @dp_intf: pointer to DP interface
+ * @nbuf: pointer to n/w buf
+ *
+ * Supports GRO for only station mode
+ *
+ * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
+ */
+static
+QDF_STATUS dp_gro_rx_legacy(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf)
+{
+	qdf_napi_struct *napi_to_use;
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+
+	/* Only enabling it for STA mode like LRO today */
+	if (QDF_STA_MODE != dp_intf->device_mode)
+		return QDF_STATUS_E_NOSUPPORT;
+
+	if (qdf_atomic_read(&dp_ctx->disable_rx_ol_in_low_tput) ||
+	    qdf_atomic_read(&dp_ctx->disable_rx_ol_in_concurrency))
+		return QDF_STATUS_E_NOSUPPORT;
+
+	napi_to_use = dp_ctx->dp_ops.dp_gro_rx_legacy_get_napi(nbuf,
+						dp_ctx->enable_rxthread);
+	if (!napi_to_use)
+		goto out;
+
+	status = dp_gro_rx_bh_disable(dp_intf, napi_to_use, nbuf);
+out:
+
+	return status;
+}
+
+/**
+ * dp_register_rx_ol() - Register LRO/GRO rx processing callbacks
+ * @hdd_ctx: pointer to hdd_ctx
+ * @wifi3_0_target: whether its a lithium/beryllium arch based target or not
+ *
+ * Return: none
+ */
+static void dp_register_rx_ol_cb(struct wlan_dp_psoc_context *dp_ctx,
+				 bool wifi3_0_target)
+{
+	if  (!dp_ctx) {
+		dp_err("DP context is NULL");
+		return;
+	}
+
+	dp_ctx->en_tcp_delack_no_lro = 0;
+
+	if (!dp_is_lro_enabled(dp_ctx)) {
+		dp_ctx->dp_ops.dp_register_rx_offld_flush_cb(DP_RX_FLUSH_LRO);
+		dp_ctx->receive_offload_cb = dp_lro_rx;
+		dp_info("LRO is enabled");
+	} else if (dp_ctx->ol_enable == CFG_GRO_ENABLED) {
+		qdf_atomic_set(&dp_ctx->dp_agg_param.rx_aggregation, 1);
+		if (wifi3_0_target) {
+		/* no flush registration needed, it happens in DP thread */
+			dp_ctx->receive_offload_cb = dp_gro_rx_thread;
+		} else {
+			/*ihelium based targets */
+			if (dp_ctx->enable_rxthread)
+				dp_ctx->dp_ops.dp_register_rx_offld_flush_cb(
+							DP_RX_FLUSH_THREAD);
+			else
+				dp_ctx->dp_ops.dp_register_rx_offld_flush_cb(
+							DP_RX_FLUSH_NAPI);
+			dp_ctx->receive_offload_cb = dp_gro_rx_legacy;
+		}
+		dp_info("GRO is enabled");
+	} else if (DP_BUS_BW_CFG(dp_ctx->dp_cfg.enable_tcp_delack)) {
+		dp_ctx->en_tcp_delack_no_lro = 1;
+		dp_info("TCP Del ACK is enabled");
+	}
+}
+
+/**
+ * dp_rx_ol_send_config() - Send RX offload configuration to FW
+ * @dp_ctx: pointer to DP_ctx
+ *
+ * This function is only used for non lithium targets. Lithium based targets are
+ * sending LRO config to FW in vdev attach implemented in cmn DP layer.
+ *
+ * Return: 0 on success, non zero on failure
+ */
+static QDF_STATUS dp_rx_ol_send_config(struct wlan_dp_psoc_context *dp_ctx)
+{
+	struct cdp_lro_hash_config lro_config = {0};
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	/*
+	 * This will enable flow steering and Toeplitz hash
+	 * So enable it for LRO or GRO processing.
+	 */
+	if (dp_ctx->dp_cfg.gro_enable ||
+	    dp_ctx->dp_cfg.lro_enable) {
+		lro_config.lro_enable = 1;
+		lro_config.tcp_flag = QDF_TCPHDR_ACK;
+		lro_config.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
+					   QDF_TCPHDR_RST | QDF_TCPHDR_ACK |
+					   QDF_TCPHDR_URG | QDF_TCPHDR_ECE |
+					   QDF_TCPHDR_CWR;
+	}
+
+	qdf_get_random_bytes(lro_config.toeplitz_hash_ipv4,
+			     (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
+			      LRO_IPV4_SEED_ARR_SZ));
+
+	qdf_get_random_bytes(lro_config.toeplitz_hash_ipv6,
+			     (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
+			      LRO_IPV6_SEED_ARR_SZ));
+
+	status = dp_ctx->sb_ops.dp_lro_config_cmd(dp_ctx->psoc, &lro_config);
+	dp_info("LRO Config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
+		lro_config.lro_enable, lro_config.tcp_flag,
+		lro_config.tcp_flag_mask);
+
+	return status;
+}
+
+QDF_STATUS dp_rx_ol_init(struct wlan_dp_psoc_context *dp_ctx,
+			 bool is_wifi3_0_target)
+{
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	dp_resolve_rx_ol_mode(dp_ctx);
+	dp_register_rx_ol_cb(dp_ctx, is_wifi3_0_target);
+
+	dp_info("ol init");
+	if (!is_wifi3_0_target) {
+		status = dp_rx_ol_send_config(dp_ctx);
+		if (status) {
+			dp_ctx->ol_enable = 0;
+			dp_err("Failed to send LRO/GRO configuration! %u", status);
+			return status;
+		}
+	}
+
+	return 0;
+}
+
+void dp_disable_rx_ol_for_low_tput(struct wlan_dp_psoc_context *dp_ctx,
+				   bool disable)
+{
+	if (disable)
+		qdf_atomic_set(&dp_ctx->disable_rx_ol_in_low_tput, 1);
+	else
+		qdf_atomic_set(&dp_ctx->disable_rx_ol_in_low_tput, 0);
+}
+
+#else /* RECEIVE_OFFLOAD */
+void dp_disable_rx_ol_for_low_tput(struct wlan_dp_psoc_context *dp_ctx,
+				   bool disable)
+{
+}
+#endif /* RECEIVE_OFFLOAD */
+
+#ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
+static inline void dp_tsf_timestamp_rx(struct wlan_dp_psoc_context *dp_ctx,
+				       qdf_nbuf_t netbuf)
+{
+	dp_ctx->dp_ops.dp_tsf_timestamp_rx(dp_ctx->dp_ops.callback_ctx,
+					   netbuf);
+}
+#else
+static inline void dp_tsf_timestamp_rx(struct wlan_dp_psoc_context *dp_ctx,
+				       qdf_nbuf_t netbuf)
+{
+}
+#endif
+
 QDF_STATUS
 QDF_STATUS
-dp_rx_packet_cbk(void *dp_intf_context, qdf_nbuf_t rx_buf)
+dp_rx_thread_gro_flush_ind_cbk(void *intf_ctx, int rx_ctx_id)
+{
+	struct wlan_dp_intf *dp_intf = intf_ctx;
+	enum dp_rx_gro_flush_code gro_flush_code = DP_RX_GRO_NORMAL_FLUSH;
+
+	if (qdf_unlikely((!dp_intf) || (!dp_intf->dp_ctx))) {
+		dp_err("Null params being passed");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (dp_intf->runtime_disable_rx_thread)
+		return QDF_STATUS_SUCCESS;
+
+	if (dp_is_low_tput_gro_enable(dp_intf->dp_ctx)) {
+		dp_intf->dp_stats.tx_rx_stats.rx_gro_flush_skip++;
+		gro_flush_code = DP_RX_GRO_LOW_TPUT_FLUSH;
+	}
+
+	return dp_rx_gro_flush_ind(cds_get_context(QDF_MODULE_ID_SOC),
+				   rx_ctx_id, gro_flush_code);
+}
+
+QDF_STATUS dp_rx_pkt_thread_enqueue_cbk(void *intf_ctx,
+					qdf_nbuf_t nbuf_list)
+{
+	struct wlan_dp_intf *dp_intf;
+	uint8_t intf_id;
+	qdf_nbuf_t head_ptr;
+
+	if (qdf_unlikely(!intf_ctx || !nbuf_list)) {
+		dp_err("Null params being passed");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	dp_intf = (struct wlan_dp_intf *)intf_ctx;
+	if (is_dp_intf_valid(dp_intf))
+		return QDF_STATUS_E_FAILURE;
+
+	if (dp_intf->runtime_disable_rx_thread &&
+	    dp_intf->rx_stack)
+		return dp_intf->rx_stack(dp_intf, nbuf_list);
+
+	intf_id = dp_intf->intf_id;
+
+	head_ptr = nbuf_list;
+	while (head_ptr) {
+		qdf_nbuf_cb_update_vdev_id(head_ptr,
+					   intf_id);
+		head_ptr = qdf_nbuf_next(head_ptr);
+	}
+
+	return dp_rx_enqueue_pkt(cds_get_context(QDF_MODULE_ID_SOC), nbuf_list);
+}
+
+#ifdef CONFIG_HL_SUPPORT
+QDF_STATUS wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf *dp_intf,
+				       qdf_nbuf_t nbuf)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+
+	dp_intf->dp_stats.tx_rx_stats.rx_non_aggregated++;
+	dp_ctx->no_rx_offload_pkt_cnt++;
+
+	return dp_ctx->dp_ops.dp_nbuf_push_pkt(nbuf, DP_NBUF_PUSH_NI);
+}
+#else
+
+#if defined(WLAN_SUPPORT_RX_FISA)
+/**
+ * wlan_dp_set_fisa_disallowed_for_vdev() - Set fisa disallowed bit for a vdev
+ * @soc: DP soc handle
+ * @vdev_id: Vdev id
+ * @rx_ctx_id: rx context id
+ * @val: Enable or disable
+ *
+ * The function sets the fisa disallowed flag for a given vdev
+ *
+ * Return: None
+ */
+static inline
+void wlan_dp_set_fisa_disallowed_for_vdev(ol_txrx_soc_handle soc,
+					  uint8_t vdev_id,
+					  uint8_t rx_ctx_id, uint8_t val)
 {
 {
+	dp_set_fisa_disallowed_for_vdev(soc, vdev_id, rx_ctx_id, val);
+}
+#else
+static inline
+void wlan_dp_set_fisa_disallowed_for_vdev(ol_txrx_soc_handle soc,
+					  uint8_t vdev_id,
+					  uint8_t rx_ctx_id, uint8_t val)
+{
+}
+#endif
+
+#ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
+/**
+ * dp_rx_check_qdisc_for_intf() - Check if any ingress qdisc is configured
+ *  for given adapter
+ * @dp_intf: pointer to DP interface context
+ * @rx_ctx_id: Rx context id
+ *
+ * The function checks if ingress qdisc is registered for a given
+ * net device.
+ *
+ * Return: None
+ */
+static void
+dp_rx_check_qdisc_for_intf(struct wlan_dp_intf *dp_intf,
+			   uint8_t rx_ctx_id)
+{
+	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
+	struct wlan_dp_psoc_callbacks *dp_ops;
+	QDF_STATUS status;
+
+	/*
+	 * Restrict the qdisc based dynamic GRO enable/disable to
+	 * standalone STA mode only. Reset the configuration for
+	 * any other device mode or concurrency.
+	 */
+	if (dp_intf->device_mode != QDF_STA_MODE ||
+	    (qdf_atomic_read(&dp_intf->dp_ctx->rx_skip_qdisc_chk_conc)))
+		goto reset_wl;
+
+	dp_ops = &dp_intf->dp_ctx->dp_ops;
+	status = dp_ops->dp_rx_check_qdisc_configured(dp_intf->dev,
+						      rx_ctx_id);
+	if (QDF_IS_STATUS_SUCCESS(status)) {
+		if (qdf_likely(dp_intf->gro_disallowed[rx_ctx_id]))
+			return;
+
+		dp_debug("ingress qdisc/filter configured disable GRO");
+		dp_intf->gro_disallowed[rx_ctx_id] = 1;
+		wlan_dp_set_fisa_disallowed_for_vdev(soc, dp_intf->intf_id,
+						     rx_ctx_id, 1);
+		return;
+	}
+
+reset_wl:
+	if (qdf_unlikely(dp_intf->gro_disallowed[rx_ctx_id])) {
+		dp_debug("ingress qdisc/filter removed enable GRO");
+		wlan_dp_set_fisa_disallowed_for_vdev(soc, dp_intf->intf_id,
+						     rx_ctx_id, 0);
+		dp_intf->gro_disallowed[rx_ctx_id] = 0;
+		dp_intf->gro_flushed[rx_ctx_id] = 0;
+	}
+}
+
+QDF_STATUS wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf *dp_intf,
+				       qdf_nbuf_t nbuf)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+	struct wlan_dp_psoc_callbacks *dp_ops = &dp_ctx->dp_ops;
+	int status = QDF_STATUS_E_FAILURE;
+	bool nbuf_receive_offload_ok = false;
+	enum dp_nbuf_push_type push_type;
+	uint8_t rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
+
+	if (!dp_ctx->dp_agg_param.force_gro_enable)
+		/* rx_ctx_id is already verified for out-of-range */
+		dp_rx_check_qdisc_for_intf(dp_intf, rx_ctx_id);
+
+	if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf) &&
+	    !QDF_NBUF_CB_RX_PEER_CACHED_FRM(nbuf))
+		nbuf_receive_offload_ok = true;
+
+	if (nbuf_receive_offload_ok && dp_ctx->receive_offload_cb &&
+	    !dp_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] &&
+	    !dp_intf->gro_flushed[rx_ctx_id] &&
+	    !dp_intf->runtime_disable_rx_thread) {
+		status = dp_ctx->receive_offload_cb(dp_intf, nbuf);
+
+		if (QDF_IS_STATUS_SUCCESS(status)) {
+			dp_intf->dp_stats.tx_rx_stats.rx_aggregated++;
+			return status;
+		}
+
+		if (status == QDF_STATUS_E_GRO_DROP) {
+			dp_intf->dp_stats.tx_rx_stats.rx_gro_dropped++;
+			return status;
+		}
+	}
+
+	/*
+	 * The below case handles the scenario when rx_aggregation is
+	 * re-enabled dynamically, in which case gro_force_flush needs
+	 * to be reset to 0 to allow GRO.
+	 */
+	if (qdf_atomic_read(&dp_ctx->dp_agg_param.rx_aggregation) &&
+	    dp_ctx->dp_agg_param.gro_force_flush[rx_ctx_id])
+		dp_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] = 0;
+
+	dp_intf->dp_stats.tx_rx_stats.rx_non_aggregated++;
+
+	/* Account for GRO/LRO ineligible packets, mostly UDP */
+	if (qdf_nbuf_get_gso_segs(nbuf) == 0)
+		dp_ctx->no_rx_offload_pkt_cnt++;
+
+	if (qdf_likely((dp_ctx->enable_dp_rx_threads ||
+			dp_ctx->enable_rxthread) &&
+		       !dp_intf->runtime_disable_rx_thread)) {
+		push_type = DP_NBUF_PUSH_BH_DISABLE;
+	} else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(nbuf))) {
+		/*
+		 * Frames before peer is registered to avoid contention with
+		 * NAPI softirq.
+		 * Refer fix:
+		 * qcacld-3.0: Do netif_rx_ni() for frames received before
+		 * peer assoc
+		 */
+		push_type = DP_NBUF_PUSH_NI;
+	} else { /* NAPI Context */
+		push_type = DP_NBUF_PUSH_NAPI;
+	}
+
+	return dp_ops->dp_nbuf_push_pkt(nbuf, push_type);
+}
+
+#else /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
+
+QDF_STATUS wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf *dp_intf,
+				       qdf_nbuf_t nbuf)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
+	struct wlan_dp_psoc_callbacks *dp_ops = &dp_ctx->dp_ops;
+	int status = QDF_STATUS_E_FAILURE;
+	bool nbuf_receive_offload_ok = false;
+	enum dp_nbuf_push_type push_type;
+
+	if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf) &&
+	    !QDF_NBUF_CB_RX_PEER_CACHED_FRM(nbuf))
+		nbuf_receive_offload_ok = true;
+
+	if (nbuf_receive_offload_ok && dp_ctx->receive_offload_cb) {
+		status = dp_ctx->receive_offload_cb(dp_intf, nbuf);
+
+		if (QDF_IS_STATUS_SUCCESS(status)) {
+			dp_intf->dp_stats.tx_rx_stats.rx_aggregated++;
+			return status;
+		}
+
+		if (status == QDF_STATUS_E_GRO_DROP) {
+			dp_intf->dp_stats.tx_rx_stats.rx_gro_dropped++;
+			return status;
+		}
+	}
+
+	dp_intf->dp_stats.tx_rx_stats.rx_non_aggregated++;
+
+	/* Account for GRO/LRO ineligible packets, mostly UDP */
+	if (qdf_nbuf_get_gso_segs(nbuf) == 0)
+		dp_ctx->no_rx_offload_pkt_cnt++;
+
+	if (qdf_likely((dp_ctx->enable_dp_rx_threads ||
+			dp_ctx->enable_rxthread) &&
+		       !dp_intf->runtime_disable_rx_thread)) {
+		push_type = DP_NBUF_PUSH_BH_DISABLE;
+	} else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(nbuf))) {
+		/*
+		 * Frames before peer is registered to avoid contention with
+		 * NAPI softirq.
+		 * Refer fix:
+		 * qcacld-3.0: Do netif_rx_ni() for frames received before
+		 * peer assoc
+		 */
+		push_type = DP_NBUF_PUSH_NI;
+	} else { /* NAPI Context */
+		push_type = DP_NBUF_PUSH_NAPI;
+	}
+
+	return dp_ops->dp_nbuf_push_pkt(nbuf, push_type);
+}
+#endif /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
+#endif
+
+static inline bool
+dp_is_gratuitous_arp_unsolicited_na(struct wlan_dp_psoc_context *dp_ctx,
+				    qdf_nbuf_t nbuf)
+{
+	if (qdf_unlikely(dp_ctx->dp_ops.dp_is_gratuitous_arp_unsolicited_na(nbuf)))
+		return dp_ctx->dp_ops.dp_is_gratuitous_arp_unsolicited_na(nbuf);
+
+	return false;
+}
+
+QDF_STATUS dp_rx_flush_packet_cbk(void *dp_intf_context, uint8_t intf_id)
+{
+	struct wlan_dp_intf *dp_intf = (struct wlan_dp_intf *)dp_intf_context;
+	struct wlan_dp_psoc_context *dp_ctx;
+	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
+
+	if (qdf_unlikely(!soc))
+		return QDF_STATUS_E_FAILURE;
+
+	dp_ctx = dp_intf->dp_ctx;
+	if (qdf_unlikely(!dp_ctx))
+		return QDF_STATUS_E_FAILURE;
+
+	qdf_atomic_inc(&dp_intf->num_active_task);
+
+	/* do fisa flush for this vdev */
+	if (dp_ctx->dp_cfg.fisa_enable)
+		wlan_dp_rx_fisa_flush_by_vdev_id((struct dp_soc *)soc, intf_id);
+
+	if (dp_ctx->enable_dp_rx_threads)
+		dp_txrx_flush_pkts_by_vdev_id(soc, intf_id);
+
+	qdf_atomic_dec(&dp_intf->num_active_task);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+#if defined(WLAN_SUPPORT_RX_FISA)
+QDF_STATUS wlan_dp_rx_fisa_cbk(void *dp_soc,
+			       void *dp_vdev, qdf_nbuf_t nbuf_list)
+{
+	return dp_fisa_rx((struct dp_soc *)dp_soc, (struct dp_vdev *)dp_vdev,
+			  nbuf_list);
+}
+
+QDF_STATUS wlan_dp_rx_fisa_flush_by_ctx_id(void *dp_soc, int ring_num)
+{
+	return dp_rx_fisa_flush_by_ctx_id((struct dp_soc *)dp_soc, ring_num);
+}
+
+QDF_STATUS wlan_dp_rx_fisa_flush_by_vdev_id(void *dp_soc, uint8_t vdev_id)
+{
+	return dp_rx_fisa_flush_by_vdev_id((struct dp_soc *)dp_soc, vdev_id);
+}
+#endif
+
+QDF_STATUS dp_rx_packet_cbk(void *dp_intf_context,
+			    qdf_nbuf_t rxBuf)
+{
+	struct wlan_dp_intf *dp_intf = NULL;
+	struct wlan_dp_psoc_context *dp_ctx = NULL;
+	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
+	qdf_nbuf_t nbuf = NULL;
+	qdf_nbuf_t next = NULL;
+	unsigned int cpu_index;
+	struct qdf_mac_addr *mac_addr, *dest_mac_addr;
+	bool wake_lock = false;
+	bool track_arp = false;
+	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
+	bool is_eapol, send_over_nl;
+	bool is_dhcp;
+	struct dp_tx_rx_stats *stats;
+	QDF_STATUS status;
+	uint8_t pkt_type;
+
+	/* Sanity check on inputs */
+	if (qdf_unlikely((!dp_intf_context) || (!rxBuf))) {
+		dp_err("Null params being passed");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	dp_intf = (struct wlan_dp_intf *)dp_intf_context;
+	dp_ctx = dp_intf->dp_ctx;
+
+	cpu_index = qdf_get_cpu();
+	stats = &dp_intf->dp_stats.tx_rx_stats;
+
+	next = rxBuf;
+
+	while (next) {
+		nbuf = next;
+		next = qdf_nbuf_next(nbuf);
+		qdf_nbuf_set_next(nbuf, NULL);
+		is_eapol = false;
+		is_dhcp = false;
+		send_over_nl = false;
+
+		if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
+			if (qdf_nbuf_data_is_arp_rsp(nbuf) &&
+			    (dp_intf->track_arp_ip ==
+			     qdf_nbuf_get_arp_src_ip(nbuf))) {
+				++dp_intf->dp_stats.arp_stats.
+					rx_arp_rsp_count;
+				dp_debug("ARP packet received");
+				track_arp = true;
+			}
+		} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
+			subtype = qdf_nbuf_get_eapol_subtype(nbuf);
+			send_over_nl = true;
+			if (subtype == QDF_PROTO_EAPOL_M1) {
+				++dp_intf->dp_stats.eapol_stats.
+						eapol_m1_count;
+				is_eapol = true;
+			} else if (subtype == QDF_PROTO_EAPOL_M3) {
+				++dp_intf->dp_stats.eapol_stats.
+						eapol_m3_count;
+				is_eapol = true;
+			}
+		} else if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
+			subtype = qdf_nbuf_get_dhcp_subtype(nbuf);
+			if (subtype == QDF_PROTO_DHCP_OFFER) {
+				++dp_intf->dp_stats.dhcp_stats.
+						dhcp_off_count;
+				is_dhcp = true;
+			} else if (subtype == QDF_PROTO_DHCP_ACK) {
+				++dp_intf->dp_stats.dhcp_stats.
+						dhcp_ack_count;
+				is_dhcp = true;
+			}
+		}
+
+		wlan_dp_pkt_add_timestamp(dp_intf, QDF_PKT_RX_DRIVER_EXIT,
+					  nbuf);
+
+		/* track connectivity stats */
+		if (dp_intf->pkt_type_bitmap)
+			dp_tx_rx_collect_connectivity_stats_info(nbuf, dp_intf,
+								 PKT_TYPE_RSP,
+								 &pkt_type);
+
+		if ((dp_intf->conn_info.proxy_arp_service) &&
+		    dp_is_gratuitous_arp_unsolicited_na(dp_ctx, nbuf)) {
+			qdf_atomic_inc(&stats->rx_usolict_arp_n_mcast_drp);
+			/* Remove SKB from internal tracking table before
+			 * submitting it to stack.
+			 */
+			qdf_nbuf_free(nbuf);
+			continue;
+		}
+
+		dp_event_eapol_log(nbuf, QDF_RX);
+		qdf_dp_trace_log_pkt(dp_intf->intf_id, nbuf, QDF_RX,
+				     QDF_TRACE_DEFAULT_PDEV_ID);
+
+		DPTRACE(qdf_dp_trace(nbuf,
+				     QDF_DP_TRACE_RX_PACKET_PTR_RECORD,
+				     QDF_TRACE_DEFAULT_PDEV_ID,
+				     qdf_nbuf_data_addr(nbuf),
+				     sizeof(qdf_nbuf_data(nbuf)), QDF_RX));
+
+		DPTRACE(qdf_dp_trace_data_pkt(nbuf, QDF_TRACE_DEFAULT_PDEV_ID,
+					      QDF_DP_TRACE_RX_PACKET_RECORD,
+					      0, QDF_RX));
+
+		dest_mac_addr = (struct qdf_mac_addr *)(qdf_nbuf_data(nbuf) +
+						QDF_NBUF_DEST_MAC_OFFSET);
+		mac_addr = (struct qdf_mac_addr *)(qdf_nbuf_data(nbuf) +
+						   QDF_NBUF_SRC_MAC_OFFSET);
+
+		status = wlan_objmgr_vdev_try_get_ref(dp_intf->vdev,
+						      WLAN_TDLS_SB_ID);
+		if (QDF_IS_STATUS_SUCCESS(status)) {
+			ucfg_tdls_update_rx_pkt_cnt(dp_intf->vdev, mac_addr,
+						    dest_mac_addr);
+			wlan_objmgr_vdev_release_ref(dp_intf->vdev,
+						     WLAN_TDLS_SB_ID);
+		}
+
+		if (dp_rx_pkt_tracepoints_enabled())
+			qdf_trace_dp_packet(nbuf, QDF_RX, NULL, 0);
+
+		qdf_nbuf_set_dev(nbuf, dp_intf->dev);
+		qdf_nbuf_set_protocol_eth_tye_trans(nbuf);
+		++stats->per_cpu[cpu_index].rx_packets;
+		QDF_NET_DEV_STATS_INC_RX_PKTS(&dp_intf->stats);
+		/* count aggregated RX frame into stats */
+		QDF_NET_DEV_STATS_RX_PKTS(&dp_intf->stats) +=
+			qdf_nbuf_get_gso_segs(nbuf);
+		QDF_NET_DEV_STATS_RX_BYTES(&dp_intf->stats) +=
+			qdf_nbuf_len(nbuf);
+
+		/* Incr GW Rx count for NUD tracking based on GW mac addr */
+		dp_nud_incr_gw_rx_pkt_cnt(dp_intf, mac_addr);
+
+		/* Check & drop replayed mcast packets (for IPV6) */
+		if (dp_ctx->dp_cfg.multicast_replay_filter &&
+				qdf_nbuf_is_mcast_replay(nbuf)) {
+			qdf_atomic_inc(&stats->rx_usolict_arp_n_mcast_drp);
+			qdf_nbuf_free(nbuf);
+			continue;
+		}
+
+		/* hold configurable wakelock for unicast traffic */
+		if (!dp_is_current_high_throughput(dp_ctx) &&
+		    dp_ctx->dp_cfg.rx_wakelock_timeout &&
+		    dp_intf->conn_info.is_authenticated)
+			wake_lock = dp_is_rx_wake_lock_needed(nbuf);
+
+		if (wake_lock) {
+			cds_host_diag_log_work(&dp_ctx->rx_wake_lock,
+					dp_ctx->dp_cfg.rx_wakelock_timeout,
+					WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
+			qdf_wake_lock_timeout_acquire(&dp_ctx->rx_wake_lock,
+					dp_ctx->dp_cfg.rx_wakelock_timeout);
+		}
+
+		/* Remove SKB from internal tracking table before submitting
+		 * it to stack
+		 */
+		qdf_net_buf_debug_release_skb(nbuf);
+
+		dp_tsf_timestamp_rx(dp_ctx, nbuf);
+
+		if (send_over_nl && SEND_EAPOL_OVER_NL) {
+			if (dp_ctx->dp_ops.dp_send_rx_pkt_over_nl(dp_intf->dev,
+					(u8 *)&dp_intf->conn_info.peer_macaddr,
+								  nbuf, false))
+				qdf_status = QDF_STATUS_SUCCESS;
+			else
+				qdf_status = QDF_STATUS_E_INVAL;
+			qdf_nbuf_dev_kfree(nbuf);
+		} else {
+			qdf_status = wlan_dp_rx_deliver_to_stack(dp_intf, nbuf);
+		}
+
+		if (QDF_IS_STATUS_SUCCESS(qdf_status)) {
+			++stats->per_cpu[cpu_index].rx_delivered;
+			if (track_arp)
+				++dp_intf->dp_stats.arp_stats.rx_delivered;
+			if (is_eapol)
+				++dp_intf->dp_stats.eapol_stats.
+				rx_delivered[subtype - QDF_PROTO_EAPOL_M1];
+			else if (is_dhcp)
+				++dp_intf->dp_stats.dhcp_stats.
+				rx_delivered[subtype - QDF_PROTO_DHCP_DISCOVER];
+
+			/* track connectivity stats */
+			if (dp_intf->pkt_type_bitmap)
+				dp_tx_rx_collect_connectivity_stats_info(
+					nbuf, dp_intf,
+					PKT_TYPE_RX_DELIVERED,
+					&pkt_type);
+		} else {
+			++stats->per_cpu[cpu_index].rx_refused;
+			if (track_arp)
+				++dp_intf->dp_stats.arp_stats.rx_refused;
+
+			if (is_eapol)
+				++dp_intf->dp_stats.eapol_stats.
+				       rx_refused[subtype - QDF_PROTO_EAPOL_M1];
+			else if (is_dhcp)
+				++dp_intf->dp_stats.dhcp_stats.
+				  rx_refused[subtype - QDF_PROTO_DHCP_DISCOVER];
+
+			/* track connectivity stats */
+			if (dp_intf->pkt_type_bitmap)
+				dp_tx_rx_collect_connectivity_stats_info(
+					nbuf, dp_intf,
+					PKT_TYPE_RX_REFUSED,
+					&pkt_type);
+		}
+	}
+
 	return QDF_STATUS_SUCCESS;
 	return QDF_STATUS_SUCCESS;
 }
 }
+

+ 94 - 2
components/dp/dispatcher/inc/wlan_dp_public_struct.h

@@ -32,6 +32,66 @@
 #include <ani_system_defs.h>
 #include <ani_system_defs.h>
 #include "cdp_txrx_ops.h"
 #include "cdp_txrx_ops.h"
 #include <qdf_defer.h>
 #include <qdf_defer.h>
+#include <qdf_types.h>
+
+#define DP_MAX_SUBTYPES_TRACKED	4
+
+enum dp_rx_offld_flush_cb {
+	DP_RX_FLUSH_LRO,
+	DP_RX_FLUSH_THREAD,
+	DP_RX_FLUSH_NAPI,
+};
+
+enum dp_nbuf_push_type {
+	DP_NBUF_PUSH_NI,
+	DP_NBUF_PUSH_NAPI,
+	DP_NBUF_PUSH_BH_DISABLE,
+	DP_NBUF_PUSH_SIMPLE,
+};
+
+/**
+ * struct dp_eapol_stats - eapol debug stats count
+ * @eapol_m1_count: eapol m1 count
+ * @eapol_m2_count: eapol m2 count
+ * @eapol_m3_count: eapol m3 count
+ * @eapol_m4_count: eapol m4 count
+ * @tx_dropped: no of tx frames dropped by host
+ * @tx_noack_cnt: no of frames for which there is no ack
+ * @rx_delivered: no. of frames delivered to network stack
+ * @rx_refused: no of frames not delivered to network stack
+ */
+struct dp_eapol_stats {
+	uint16_t eapol_m1_count;
+	uint16_t eapol_m2_count;
+	uint16_t eapol_m3_count;
+	uint16_t eapol_m4_count;
+	uint16_t tx_dropped[DP_MAX_SUBTYPES_TRACKED];
+	uint16_t tx_noack_cnt[DP_MAX_SUBTYPES_TRACKED];
+	uint16_t rx_delivered[DP_MAX_SUBTYPES_TRACKED];
+	uint16_t rx_refused[DP_MAX_SUBTYPES_TRACKED];
+};
+
+/**
+ * struct dp_dhcp_stats - dhcp debug stats count
+ * @dhcp_dis_count: dhcp discovery count
+ * @dhcp_off_count: dhcp offer count
+ * @dhcp_req_count: dhcp request count
+ * @dhcp_ack_count: dhcp ack count
+ * @tx_dropped: no of tx frames dropped by host
+ * @tx_noack_cnt: no of frames for which there is no ack
+ * @rx_delivered: no. of frames delivered to network stack
+ * @rx_refused: no of frames not delivered to network stack
+ */
+struct dp_dhcp_stats {
+	uint16_t dhcp_dis_count;
+	uint16_t dhcp_off_count;
+	uint16_t dhcp_req_count;
+	uint16_t dhcp_ack_count;
+	uint16_t tx_dropped[DP_MAX_SUBTYPES_TRACKED];
+	uint16_t tx_noack_cnt[DP_MAX_SUBTYPES_TRACKED];
+	uint16_t rx_delivered[DP_MAX_SUBTYPES_TRACKED];
+	uint16_t rx_refused[DP_MAX_SUBTYPES_TRACKED];
+};
 
 
 #ifdef TX_MULTIQ_PER_AC
 #ifdef TX_MULTIQ_PER_AC
 #define TX_GET_QUEUE_IDX(ac, off) (((ac) * TX_QUEUES_PER_AC) + (off))
 #define TX_GET_QUEUE_IDX(ac, off) (((ac) * TX_QUEUES_PER_AC) + (off))
@@ -530,10 +590,41 @@ union wlan_tp_data {
  * @dp_nud_failure_work: Callback API to handle NUD failuire work
  * @dp_nud_failure_work: Callback API to handle NUD failuire work
  */
  */
 struct wlan_dp_psoc_callbacks {
 struct wlan_dp_psoc_callbacks {
-	void (*os_if_dp_gro_rx)(struct sk_buff *skb, uint8_t napi_to_use,
-				bool flush_gro);
 	hdd_cb_handle callback_ctx;
 	hdd_cb_handle callback_ctx;
 
 
+	QDF_STATUS (*dp_get_nw_intf_mac_by_vdev_mac)(struct qdf_mac_addr *mac_addr,
+						     struct qdf_mac_addr *intf_mac);
+	unsigned int (*dp_get_tx_flow_low_watermark)(hdd_cb_handle cb_ctx,
+						     uint8_t intf_id);
+	void (*dp_get_tx_resource)(uint8_t intf_id, struct qdf_mac_addr *mac_addr);
+	void (*dp_get_tsf_time)(uint8_t intf_id,
+				uint64_t input_time, uint64_t *tsf_time);
+
+	void (*dp_tsf_timestamp_rx)(hdd_cb_handle ctx, qdf_nbuf_t nbuf);
+
+	QDF_STATUS (*dp_nbuf_push_pkt)(qdf_nbuf_t nbuf,
+				       enum dp_nbuf_push_type type);
+
+	QDF_STATUS (*dp_rx_napi_gro_flush)(qdf_napi_struct *napi_to_use,
+					   qdf_nbuf_t nbuf,
+					   uint8_t *force_flush);
+	QDF_STATUS (*dp_rx_napi_gro_receive)(qdf_napi_struct *napi_to_use,
+					     qdf_nbuf_t nbuf);
+
+	QDF_STATUS (*dp_lro_rx_cb)(qdf_netdev_t netdev, qdf_nbuf_t nbuf);
+
+	qdf_napi_struct *(*dp_gro_rx_legacy_get_napi)(qdf_nbuf_t nbuf,
+						      bool enable_rx_thread);
+
+	void (*dp_register_rx_offld_flush_cb)(enum dp_rx_offld_flush_cb type);
+
+	QDF_STATUS (*dp_rx_check_qdisc_configured)(qdf_netdev_t dev,
+						   uint8_t rx_ctx_id);
+
+	bool (*dp_is_gratuitous_arp_unsolicited_na)(qdf_nbuf_t nbuf);
+
+	QDF_STATUS (*dp_send_rx_pkt_over_nl)(qdf_netdev_t dev, uint8_t *addr,
+					     qdf_nbuf_t nbuf, bool unecrypted);
 	bool
 	bool
 	(*wlan_dp_sta_get_dot11mode)(hdd_cb_handle context, uint8_t vdev_id,
 	(*wlan_dp_sta_get_dot11mode)(hdd_cb_handle context, uint8_t vdev_id,
 				     enum qca_wlan_802_11_mode *dot11_mode);
 				     enum qca_wlan_802_11_mode *dot11_mode);
@@ -626,6 +717,7 @@ struct wlan_dp_psoc_nb_ops {
  */
  */
 struct wlan_dp_user_config {
 struct wlan_dp_user_config {
 	bool ipa_enable;
 	bool ipa_enable;
+	uint32_t arp_connectivity_map;
 };
 };
 
 
 #endif /* end  of _WLAN_DP_PUBLIC_STRUCT_H_ */
 #endif /* end  of _WLAN_DP_PUBLIC_STRUCT_H_ */

+ 417 - 0
components/dp/dispatcher/inc/wlan_dp_ucfg_api.h

@@ -138,6 +138,260 @@ void ucfg_dp_resume_wlan(struct wlan_objmgr_psoc *psoc);
 QDF_STATUS
 QDF_STATUS
 ucfg_dp_update_config(struct wlan_objmgr_psoc *psoc,
 ucfg_dp_update_config(struct wlan_objmgr_psoc *psoc,
 		      struct wlan_dp_user_config *req);
 		      struct wlan_dp_user_config *req);
+/**
+ * ucfg_dp_wait_complete_tasks() - wait for DP tasks to complete
+ * Called from legacy layer to wait DP tasks completion
+ *
+ * Return: None
+ */
+void
+ucfg_dp_wait_complete_tasks(void);
+
+/**
+ * ucfg_dp_remove_conn_info() - Remove DP STA intf connection info
+ * @vdev: vdev mapped to STA DP interface
+ *
+ * Return: QDF_STATUS
+ */
+void
+ucfg_dp_remove_conn_info(struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_conn_info_set_bssid() - set BSSID info in STA intf
+ * @vdev: vdev mapped to STA DP interface
+ * @bssid: BSSID mac
+ *
+ * Return: None
+ */
+void ucfg_dp_conn_info_set_bssid(struct wlan_objmgr_vdev *vdev,
+				 struct qdf_mac_addr *bssid);
+
+/**
+ * ucfg_dp_conn_info_set_arp_service() - set ARP service info
+ * @vdev: vdev mapped to STA DP interface
+ * @proxy_arp_service: ARP service info
+ *
+ * Return: None
+ */
+void ucfg_dp_conn_info_set_arp_service(struct wlan_objmgr_vdev *vdev,
+				       uint8_t proxy_arp_service);
+
+/**
+ * ucfg_dp_conn_info_set_peer_authenticate() - set Peer authenticated state
+ * @vdev: vdev mapped to STA DP interface
+ * is_authenticated: Peer authenticated info
+ *
+ * Return: None
+ */
+void ucfg_dp_conn_info_set_peer_authenticate(struct wlan_objmgr_vdev *vdev,
+					     uint8_t is_authenticated);
+
+/**
+ * ucfg_dp_conn_info_set_peer_mac() - set peer mac info in DP intf
+ * @vdev: vdev mapped to STA DP interface
+ * peer_mac: Peer MAC information
+ *
+ * Return: None
+ */
+void ucfg_dp_conn_info_set_peer_mac(struct wlan_objmgr_vdev *vdev,
+				    struct qdf_mac_addr *peer_mac);
+
+/**
+ * ucfg_dp_softap_check_wait_for_tx_eap_pkt() - wait for TX EAP pkt in SAP
+ * @vdev: vdev mapped to SAP DP interface
+ * mac_addr: Peer MAC address info
+ *
+ * Return: None
+ */
+void ucfg_dp_softap_check_wait_for_tx_eap_pkt(struct wlan_objmgr_vdev *vdev,
+					      struct qdf_mac_addr *mac_addr);
+
+/**
+ * ucfg_dp_update_dhcp_state_on_disassoc() - update DHCP during disassoc
+ * @vdev: vdev mapped to SAP DP interface
+ * mac_addr: Peer MAC address info
+ *
+ * Return: None
+ */
+void ucfg_dp_update_dhcp_state_on_disassoc(struct wlan_objmgr_vdev *vdev,
+					   struct qdf_mac_addr *mac_addr);
+
+/**
+ * ucfg_dp_set_dfs_cac_tx() - update DFS CAC TX block info
+ * @vdev: vdev mapped to SAP DP interface
+ * tx_block: true if TX need to be blocked
+ *
+ * Return: None
+ */
+void ucfg_dp_set_dfs_cac_tx(struct wlan_objmgr_vdev *vdev,
+			    bool tx_block);
+
+/**
+ * ucfg_dp_set_bss_state_start() - update BSS state for SAP intf
+ * @vdev: vdev mapped to SAP DP interface
+ * start: true if BSS state is started
+ *
+ * Return: None
+ */
+void ucfg_dp_set_bss_state_start(struct wlan_objmgr_vdev *vdev, bool start);
+
+/**
+ * ucfg_dp_lro_set_reset() - LRO set/reset in DP
+ * @vdev: vdev mapped to DP interface
+ * enable_flag: Enable/disable LRO feature
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_lro_set_reset(struct wlan_objmgr_vdev *vdev,
+				 uint8_t enable_flag);
+/**
+ * ucfg_dp_is_ol_enabled() - Get ol enable/disable info
+ * @psoc: PSOC mapped to DP context
+ *
+ * Return: true if OL enabled
+ */
+bool ucfg_dp_is_ol_enabled(struct wlan_objmgr_psoc *psoc);
+
+#ifdef RECEIVE_OFFLOAD
+/**
+ * ucfg_dp_rx_handle_concurrency() - Handle concurrency setting in DP
+ * @psoc: PSOC mapped to DP context
+ * @is_wifi3_0_target: true if it is wifi3.0 target
+ * @is_concurrency: Is concurrency enabled/disabled
+ *
+ * Return: None
+ */
+void ucfg_dp_rx_handle_concurrency(struct wlan_objmgr_psoc *psoc,
+				   bool is_wifi3_0_target,
+				   bool is_concurrency);
+#else
+static inline
+void ucfg_dp_rx_handle_concurrency(struct wlan_objmgr_psoc *psoc,
+				   bool is_wifi3_0_target,
+				   bool is_concurrency) { }
+#endif
+
+/**
+ * ucfg_dp_is_rx_common_thread_enabled() - Get common thread enable/disable info
+ * @psoc: PSOC mapped to DP context
+ *
+ * Return: true if common thread enabled
+ */
+bool ucfg_dp_is_rx_common_thread_enabled(struct wlan_objmgr_psoc *psoc);
+
+/**
+ * ucfg_dp_is_rx_threads_enabled() - Get RX DP threads info
+ * @psoc: PSOC mapped to DP context
+ *
+ * Return: true if DP RX threads enabled
+ */
+bool ucfg_dp_is_rx_threads_enabled(struct wlan_objmgr_psoc *psoc);
+
+/**
+ * ucfg_dp_rx_ol_init() - Initialize Rx offload mode (LRO or GRO)
+ * @psoc: PSOC mapped to DP context
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_rx_ol_init(struct wlan_objmgr_psoc *psoc,
+			      bool is_wifi3_0_target);
+
+/**
+ * ucfg_dp_init_txrx() - Initialize STA DP init TX/RX
+ * @vdev: vdev mapped to STA DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_init_txrx(struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_deinit_txrx() - Deinitialize STA DP init TX/RX
+ * @vdev: vdev mapped to STA DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_deinit_txrx(struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_softap_init_txrx() - Initialize SAP DP init TX/RX
+ * @vdev: vdev mapped to SAP DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_softap_init_txrx(struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_softap_deinit_txrx() - Deinitialize SAP DP init TX/RX
+ * @vdev: vdev mapped to SAP DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_softap_deinit_txrx(struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_start_xmit() - Transmit packet on STA interface
+ * @nbuf: n/w buffer to transmitted
+ * @vdev: vdev mapped to STA DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS
+ucfg_dp_start_xmit(qdf_nbuf_t nbuf, struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_rx_packet_cbk() - Receive packet on STA interface
+ * @nbuf: n/w buffer to be received
+ * @vdev: vdev mapped to STA DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_rx_packet_cbk(struct wlan_objmgr_vdev *vdev,
+				 qdf_nbuf_t nbuf);
+
+/**
+ * ucfg_dp_tx_timeout() - called during transmission timeout on STA
+ * @vdev: vdev mapped to STA DP interface
+ *
+ * Return: None
+ */
+void ucfg_dp_tx_timeout(struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_softap_tx_timeout() - called during transmission timeout on SAP
+ * @vdev: vdev mapped to SAP DP interface
+ *
+ * Return: None
+ */
+void ucfg_dp_softap_tx_timeout(struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_softap_start_xmit() - Transmit packet on SAP interface
+ * @nbuf: n/w buffer to transmitted
+ * @vdev: vdev mapped to SAP DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS
+ucfg_dp_softap_start_xmit(qdf_nbuf_t nbuf, struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_get_dev_stats() - Get netdev stats info
+ * @intf_addr: DP interface MAC address
+ *
+ * Return: qdf_net_dev_stats info
+ */
+qdf_net_dev_stats *ucfg_dp_get_dev_stats(struct qdf_mac_addr *intf_addr);
+
+/**
+ * ucfg_dp_inc_rx_pkt_stats() - DP increment RX pkt stats
+ * @vdev: VDEV mapped to DP interface
+ * pkt_len: packet length to be incremented in stats
+ *
+ * Return: None
+ */
+void ucfg_dp_inc_rx_pkt_stats(struct wlan_objmgr_vdev *vdev,
+			      uint32_t pkt_len,
+			      bool delivered);
 
 
 /**
 /**
  * ucfg_dp_get_rx_softirq_yield_duration() - Get rx soft IRQ yield duration
  * ucfg_dp_get_rx_softirq_yield_duration() - Get rx soft IRQ yield duration
@@ -154,6 +408,72 @@ ucfg_dp_get_rx_softirq_yield_duration(struct wlan_objmgr_psoc *psoc);
  */
  */
 void ucfg_dp_register_rx_mic_error_ind_handler(void *soc);
 void ucfg_dp_register_rx_mic_error_ind_handler(void *soc);
 
 
+/**
+ * ucfg_dp_sta_register_txrx_ops() - Register ops for TX/RX operations in STA
+ * @vdev: vdev mapped to STA DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_sta_register_txrx_ops(struct wlan_objmgr_vdev *vdev);
+
+#ifdef FEATURE_WLAN_TDLS
+/**
+ * ucfg_dp_tdlsta_register_txrx_ops() - Register ops for TX/RX operations
+ * @vdev: vdev mapped to TDLS STA DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_tdlsta_register_txrx_ops(struct wlan_objmgr_vdev *vdev);
+#else
+static inline
+QDF_STATUS ucfg_dp_tdlsta_register_txrx_ops(struct wlan_objmgr_vdev *vdev)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif
+
+/**
+ * ucfg_dp_ocb_register_txrx_ops() - Register ops for TX/RX operations
+ * @vdev: vdev mapped to OCB DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_ocb_register_txrx_ops(struct wlan_objmgr_vdev *vdev);
+
+#ifdef FEATURE_MONITOR_MODE_SUPPORT
+/**
+ * ucfg_dp_mon_register_txrx_ops() - Register ops for TX/RX operations
+ * @vdev: vdev mapped to Monitor mode DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_mon_register_txrx_ops(struct wlan_objmgr_vdev *vdev);
+#else
+static inline
+QDF_STATUS ucfg_dp_mon_register_txrx_ops(struct wlan_objmgr_vdev *vdev)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif
+
+/**
+ * ucfg_dp_softap_register_txrx_ops() - Register ops for TX/RX operations
+ * @vdev: vdev mapped to SAP mode DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS ucfg_dp_softap_register_txrx_ops(struct wlan_objmgr_vdev *vdev,
+					    struct ol_txrx_ops *txrx_ops);
+
+/**
+ * ucfg_dp_register_pkt_capture_callbacks() - Register ops for pkt capture operations
+ * @vdev: vdev mapped to DP interface
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+QDF_STATUS
+ucfg_dp_register_pkt_capture_callbacks(struct wlan_objmgr_vdev *vdev);
+
 /**
 /**
  * ucfg_dp_bbm_context_init() - Initialize BBM context
  * ucfg_dp_bbm_context_init() - Initialize BBM context
  * @psoc: psoc handle
  * @psoc: psoc handle
@@ -497,6 +817,14 @@ void ucfg_dp_clear_dns_payload_value(struct wlan_objmgr_vdev *vdev);
 void ucfg_dp_set_pkt_type_bitmap_value(struct wlan_objmgr_vdev *vdev,
 void ucfg_dp_set_pkt_type_bitmap_value(struct wlan_objmgr_vdev *vdev,
 				       uint32_t value);
 				       uint32_t value);
 
 
+/**
+ * ucfg_dp_intf_get_pkt_type_bitmap_value() - Get packt type bitmap info
+ * @intf_ctx: DP interface context
+ *
+ * Return: bitmap information
+ */
+uint32_t ucfg_dp_intf_get_pkt_type_bitmap_value(void *intf_ctx);
+
 /**
 /**
  * ucfg_dp_set_track_dest_ipv4_value() - Set track_dest_ipv4 value
  * ucfg_dp_set_track_dest_ipv4_value() - Set track_dest_ipv4 value
  * @vdev: vdev context
  * @vdev: vdev context
@@ -732,4 +1060,93 @@ uint32_t ucfg_dp_get_bus_bw_compute_interval(struct wlan_objmgr_psoc *psoc);
  * Return: current bandwidth level
  * Return: current bandwidth level
  */
  */
 int ucfg_dp_get_current_throughput_level(struct wlan_objmgr_psoc *psoc);
 int ucfg_dp_get_current_throughput_level(struct wlan_objmgr_psoc *psoc);
+
+/**
+ * ucfg_dp_get_txrx_stats() - get current bandwidth level
+ * @vdev: vdev handle
+ * @dp_stats : dp_stats pointer
+ *
+ * This function update dp_stats pointer with DP component
+ * txrx stats
+ * Return: 0 on success
+ */
+QDF_STATUS ucfg_dp_get_txrx_stats(struct wlan_objmgr_vdev *vdev,
+				  struct dp_tx_rx_stats *dp_stats);
+
+/**
+ * ucfg_dp_reset_cont_txtimeout_cnt() - Reset Tx Timeout count
+ * @vdev: vdev handle
+ *
+ * Return: None
+ */
+void ucfg_dp_reset_cont_txtimeout_cnt(struct wlan_objmgr_vdev *vdev);
+
+/**
+ * ucfg_dp_set_rx_thread_affinity() - Set rx thread affinity mask
+ * @psoc: psoc handle
+ *
+ * Return: None
+ */
+void ucfg_dp_set_rx_thread_affinity(struct wlan_objmgr_psoc *psoc);
+
+/**
+ * ucfg_dp_get_disable_rx_ol_val() - Get Rx OL concurrency value
+ * @psoc: psoc handle
+ * @disable_conc : disable rx OL concurrency value
+ * @disable_low_tput : disable rx OL low tput value
+ *
+ * this function reads and update value in pointer variable
+ * passed as arguments to function.
+ *
+ * Return: None
+ */
+
+void ucfg_dp_get_disable_rx_ol_val(struct wlan_objmgr_psoc *psoc,
+				   uint8_t *disable_conc,
+				   uint8_t *disable_low_tput);
+
+/**
+ * ucfg_dp_get_rx_aggregation_val() - Get Rx aggregation values
+ * @psoc: psoc handle
+ *
+ * Return: Rx aggregation value
+ */
+uint32_t ucfg_dp_get_rx_aggregation_val(struct wlan_objmgr_psoc *psoc);
+
+/**
+ * ucfg_dp_set_rx_aggregation_val() - Set rx aggregation value
+ * @psoc: psoc handle
+ * @value : value to be set
+ *
+ * Return: None
+ */
+void ucfg_dp_set_rx_aggregation_val(struct wlan_objmgr_psoc *psoc,
+				    uint32_t value);
+
+/**
+ * ucfg_dp_set_force_gro_enable() - Set force gro enable
+ * @psoc: psoc handle
+ * @value : value to be set
+ *
+ * Return: None
+ */
+void ucfg_dp_set_force_gro_enable(struct wlan_objmgr_psoc *psoc, bool value);
+
+/**
+ * ucfg_dp_runtime_disable_rx_thread() - Disable rx thread
+ * @psoc: psoc handle
+ * @value : value to be set (true/false)
+ *
+ * Return: None
+ */
+void ucfg_dp_runtime_disable_rx_thread(struct wlan_objmgr_vdev *vdev,
+				       bool value);
+
+/**
+ * ucfg_dp_get_napi_enabled() - Get NAPI enabled/disabled info
+ * @psoc: psoc handle mapped to DP context
+ *
+ * Return: true if NAPI enabled
+ */
+bool ucfg_dp_get_napi_enabled(struct wlan_objmgr_psoc *psoc);
 #endif /* _WLAN_DP_UCFG_API_H_ */
 #endif /* _WLAN_DP_UCFG_API_H_ */

+ 968 - 4
components/dp/dispatcher/src/wlan_dp_ucfg_api.c

@@ -24,6 +24,7 @@
 #include "wlan_ipa_ucfg_api.h"
 #include "wlan_ipa_ucfg_api.h"
 #include "wlan_dp_main.h"
 #include "wlan_dp_main.h"
 #include "wlan_dp_objmgr.h"
 #include "wlan_dp_objmgr.h"
+#include "wlan_pmo_obj_mgmt_api.h"
 #include "cdp_txrx_cmn.h"
 #include "cdp_txrx_cmn.h"
 #include "cfg_ucfg_api.h"
 #include "cfg_ucfg_api.h"
 #include "wlan_pmo_obj_mgmt_api.h"
 #include "wlan_pmo_obj_mgmt_api.h"
@@ -31,6 +32,9 @@
 #include "wlan_dp_bus_bandwidth.h"
 #include "wlan_dp_bus_bandwidth.h"
 #include "wlan_dp_periodic_sta_stats.h"
 #include "wlan_dp_periodic_sta_stats.h"
 #include "wlan_dp_nud_tracking.h"
 #include "wlan_dp_nud_tracking.h"
+#include "wlan_dp_txrx.h"
+#include "wlan_nlink_common.h"
+#include "wlan_pkt_capture_ucfg_api.h"
 
 
 void ucfg_dp_update_inf_mac(struct wlan_objmgr_psoc *psoc,
 void ucfg_dp_update_inf_mac(struct wlan_objmgr_psoc *psoc,
 			    struct qdf_mac_addr *cur_mac,
 			    struct qdf_mac_addr *cur_mac,
@@ -39,13 +43,13 @@ void ucfg_dp_update_inf_mac(struct wlan_objmgr_psoc *psoc,
 	struct wlan_dp_intf *dp_intf;
 	struct wlan_dp_intf *dp_intf;
 	struct wlan_dp_psoc_context *dp_ctx;
 	struct wlan_dp_psoc_context *dp_ctx;
 
 
-	dp_info("MAC update");
 	dp_ctx =  dp_psoc_get_priv(psoc);
 	dp_ctx =  dp_psoc_get_priv(psoc);
 
 
 	dp_intf = dp_get_intf_by_macaddr(dp_ctx, cur_mac);
 	dp_intf = dp_get_intf_by_macaddr(dp_ctx, cur_mac);
 	if (!dp_intf) {
 	if (!dp_intf) {
 		dp_err("DP interface not found addr:" QDF_MAC_ADDR_FMT,
 		dp_err("DP interface not found addr:" QDF_MAC_ADDR_FMT,
 		       QDF_MAC_ADDR_REF(cur_mac));
 		       QDF_MAC_ADDR_REF(cur_mac));
+		QDF_BUG(0);
 		return;
 		return;
 	}
 	}
 
 
@@ -85,6 +89,7 @@ ucfg_dp_create_intf(struct wlan_objmgr_psoc *psoc,
 	dp_periodic_sta_stats_mutex_create(dp_intf);
 	dp_periodic_sta_stats_mutex_create(dp_intf);
 	dp_nud_init_tracking(dp_intf);
 	dp_nud_init_tracking(dp_intf);
 	dp_mic_init_work(dp_intf);
 	dp_mic_init_work(dp_intf);
+	qdf_atomic_init(&dp_ctx->num_latency_critical_clients);
 
 
 	return QDF_STATUS_SUCCESS;
 	return QDF_STATUS_SUCCESS;
 }
 }
@@ -401,24 +406,374 @@ ucfg_dp_store_qdf_dev(struct wlan_objmgr_psoc *psoc)
 
 
 QDF_STATUS ucfg_dp_psoc_open(struct wlan_objmgr_psoc *psoc)
 QDF_STATUS ucfg_dp_psoc_open(struct wlan_objmgr_psoc *psoc)
 {
 {
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return QDF_STATUS_E_FAILURE;
+	}
+
 	ucfg_dp_store_qdf_dev(psoc);
 	ucfg_dp_store_qdf_dev(psoc);
 	dp_rtpm_tput_policy_init(psoc);
 	dp_rtpm_tput_policy_init(psoc);
 	dp_register_pmo_handler();
 	dp_register_pmo_handler();
 	dp_trace_init(psoc);
 	dp_trace_init(psoc);
 	dp_bus_bandwidth_init(psoc);
 	dp_bus_bandwidth_init(psoc);
+	qdf_wake_lock_create(&dp_ctx->rx_wake_lock, "qcom_rx_wakelock");
 
 
 	return QDF_STATUS_SUCCESS;
 	return QDF_STATUS_SUCCESS;
 }
 }
 
 
 QDF_STATUS ucfg_dp_psoc_close(struct wlan_objmgr_psoc *psoc)
 QDF_STATUS ucfg_dp_psoc_close(struct wlan_objmgr_psoc *psoc)
 {
 {
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return QDF_STATUS_E_FAILURE;
+	}
+
 	dp_rtpm_tput_policy_deinit(psoc);
 	dp_rtpm_tput_policy_deinit(psoc);
 	dp_unregister_pmo_handler();
 	dp_unregister_pmo_handler();
 	dp_bus_bandwidth_deinit(psoc);
 	dp_bus_bandwidth_deinit(psoc);
+	qdf_wake_lock_destroy(&dp_ctx->rx_wake_lock);
 
 
 	return QDF_STATUS_SUCCESS;
 	return QDF_STATUS_SUCCESS;
 }
 }
 
 
+void ucfg_dp_suspend_wlan(struct wlan_objmgr_psoc *psoc)
+{
+	struct wlan_dp_psoc_context *dp_ctx;
+	struct wlan_dp_intf *dp_intf, *dp_intf_next = NULL;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return;
+	}
+
+	dp_ctx->wlan_suspended = true;
+
+	dp_for_each_intf_held_safe(dp_ctx, dp_intf, dp_intf_next) {
+		dp_intf->sap_tx_block_mask |= WLAN_DP_SUSPEND;
+	}
+}
+
+void ucfg_dp_resume_wlan(struct wlan_objmgr_psoc *psoc)
+{
+	struct wlan_dp_psoc_context *dp_ctx;
+	struct wlan_dp_intf *dp_intf, *dp_intf_next = NULL;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return;
+	}
+
+	dp_ctx->wlan_suspended = false;
+
+	dp_for_each_intf_held_safe(dp_ctx, dp_intf, dp_intf_next) {
+		dp_intf->sap_tx_block_mask &= ~WLAN_DP_SUSPEND;
+	}
+}
+
+void ucfg_dp_wait_complete_tasks(void)
+{
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx =  dp_get_context();
+	dp_wait_complete_tasks(dp_ctx);
+}
+
+/*
+ * During connect/disconnect this needs to be updated
+ */
+
+void ucfg_dp_remove_conn_info(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	qdf_mem_zero(&dp_intf->conn_info,
+		     sizeof(struct wlan_dp_conn_info));
+}
+
+void ucfg_dp_conn_info_set_bssid(struct wlan_objmgr_vdev *vdev,
+				 struct qdf_mac_addr *bssid)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	qdf_copy_macaddr(&dp_intf->conn_info.bssid, bssid);
+}
+
+void ucfg_dp_conn_info_set_arp_service(struct wlan_objmgr_vdev *vdev,
+				       uint8_t proxy_arp_service)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	dp_intf->conn_info.proxy_arp_service = proxy_arp_service;
+}
+
+void ucfg_dp_conn_info_set_peer_authenticate(struct wlan_objmgr_vdev *vdev,
+					     uint8_t is_authenticated)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	dp_intf->conn_info.is_authenticated = is_authenticated;
+}
+
+void ucfg_dp_conn_info_set_peer_mac(struct wlan_objmgr_vdev *vdev,
+				    struct qdf_mac_addr *peer_mac)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	qdf_copy_macaddr(&dp_intf->conn_info.peer_macaddr, peer_mac);
+}
+
+void ucfg_dp_softap_check_wait_for_tx_eap_pkt(struct wlan_objmgr_vdev *vdev,
+					      struct qdf_mac_addr *mac_addr)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	dp_softap_check_wait_for_tx_eap_pkt(dp_intf, mac_addr);
+}
+
+void ucfg_dp_update_dhcp_state_on_disassoc(struct wlan_objmgr_vdev *vdev,
+					   struct qdf_mac_addr *mac_addr)
+{
+	struct wlan_dp_intf *dp_intf;
+	struct wlan_objmgr_peer *peer;
+	struct wlan_dp_sta_info *stainfo;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	peer = wlan_objmgr_get_peer_by_mac(dp_intf->dp_ctx->psoc,
+					   mac_addr->bytes,
+					   WLAN_DP_ID);
+	if (!peer) {
+		dp_err("Peer object not found mac:" QDF_MAC_ADDR_FMT,
+		       QDF_MAC_ADDR_REF(mac_addr));
+		return;
+	}
+
+	stainfo = dp_get_peer_priv_obj(peer);
+	if (!stainfo) {
+		wlan_objmgr_peer_release_ref(peer, WLAN_DP_ID);
+		return;
+	}
+
+	/* Send DHCP STOP indication to FW */
+	stainfo->dhcp_phase = DHCP_PHASE_ACK;
+	if (stainfo->dhcp_nego_status == DHCP_NEGO_IN_PROGRESS)
+		dp_post_dhcp_ind(dp_intf,
+				 stainfo->sta_mac.bytes,
+				 0);
+	stainfo->dhcp_nego_status = DHCP_NEGO_STOP;
+	wlan_objmgr_peer_release_ref(peer, WLAN_DP_ID);
+}
+
+void ucfg_dp_set_dfs_cac_tx(struct wlan_objmgr_vdev *vdev, bool tx_block)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	if (tx_block)
+		dp_intf->sap_tx_block_mask |= DP_TX_DFS_CAC_BLOCK;
+	else
+		dp_intf->sap_tx_block_mask &= ~DP_TX_DFS_CAC_BLOCK;
+}
+
+void ucfg_dp_set_bss_state_start(struct wlan_objmgr_vdev *vdev, bool start)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return;
+	}
+
+	if (start) {
+		dp_intf->sap_tx_block_mask &= ~DP_TX_SAP_STOP;
+		dp_intf->bss_state = BSS_INTF_START;
+	} else {
+		dp_intf->sap_tx_block_mask |= DP_TX_SAP_STOP;
+		dp_intf->bss_state = BSS_INTF_STOP;
+	}
+}
+
+QDF_STATUS ucfg_dp_lro_set_reset(struct wlan_objmgr_vdev *vdev,
+				 uint8_t enable_flag)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	return dp_lro_set_reset(dp_intf, enable_flag);
+}
+
+bool ucfg_dp_is_ol_enabled(struct wlan_objmgr_psoc *psoc)
+{
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return 0;
+	}
+
+	return dp_ctx->ol_enable;
+}
+
+#ifdef RECEIVE_OFFLOAD
+void ucfg_dp_rx_handle_concurrency(struct wlan_objmgr_psoc *psoc,
+				   bool is_wifi3_0_target,
+				   bool is_concurrency)
+{
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return;
+	}
+
+	if (is_wifi3_0_target) {
+		/*
+		 * Donot disable rx offload on concurrency for lithium and
+		 * beryllium based targets
+		 */
+		if (is_concurrency)
+			qdf_atomic_set(&dp_ctx->rx_skip_qdisc_chk_conc, 1);
+		else
+			qdf_atomic_set(&dp_ctx->rx_skip_qdisc_chk_conc, 0);
+
+		return;
+	}
+
+	if (!dp_ctx->ol_enable)
+		return;
+
+	if (is_concurrency) {
+		if (DP_BUS_BW_CFG(dp_ctx->dp_cfg.enable_tcp_delack)) {
+			struct wlan_rx_tp_data rx_tp_data;
+
+			dp_info("Enable TCP delack as LRO disabled in concurrency");
+			rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
+			rx_tp_data.level =
+				DP_BUS_BW_GET_RX_LVL(dp_ctx);
+			wlan_dp_update_tcp_rx_param(dp_ctx, &rx_tp_data);
+			dp_ctx->en_tcp_delack_no_lro = 1;
+		}
+		qdf_atomic_set(&dp_ctx->disable_rx_ol_in_concurrency, 1);
+	} else {
+		if (DP_BUS_BW_CFG(dp_ctx->dp_cfg.enable_tcp_delack)) {
+			dp_info("Disable TCP delack as LRO is enabled");
+			dp_ctx->en_tcp_delack_no_lro = 0;
+			dp_reset_tcp_delack(psoc);
+		}
+		qdf_atomic_set(&dp_ctx->disable_rx_ol_in_concurrency, 0);
+	}
+}
+
+QDF_STATUS ucfg_dp_rx_ol_init(struct wlan_objmgr_psoc *psoc,
+			      bool is_wifi3_0_target)
+{
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	return dp_rx_ol_init(dp_ctx, is_wifi3_0_target);
+}
+#else /* RECEIVE_OFFLOAD */
+
+QDF_STATUS ucfg_dp_rx_ol_init(struct wlan_objmgr_psoc *psoc,
+			      bool is_wifi3_0_target)
+{
+	dp_err("Rx_OL, LRO/GRO not supported");
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif
+
+bool ucfg_dp_is_rx_common_thread_enabled(struct wlan_objmgr_psoc *psoc)
+{
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	return dp_ctx->enable_rxthread;
+}
+
+bool ucfg_dp_is_rx_threads_enabled(struct wlan_objmgr_psoc *psoc)
+{
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx = dp_psoc_get_priv(psoc);
+	if (!dp_ctx) {
+		dp_err("DP context not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	return dp_ctx->enable_dp_rx_threads;
+}
+
 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
 /**
 /**
  * dp_get_config_rx_softirq_limits() - Update DP rx softirq limit config
  * dp_get_config_rx_softirq_limits() - Update DP rx softirq limit config
@@ -484,9 +839,11 @@ ucfg_dp_update_config(struct wlan_objmgr_psoc *psoc,
 
 
 	dp_ctx =  dp_psoc_get_priv(psoc);
 	dp_ctx =  dp_psoc_get_priv(psoc);
 
 
+	dp_ctx->arp_connectivity_map = req->arp_connectivity_map;
 	soc = cds_get_context(QDF_MODULE_ID_SOC);
 	soc = cds_get_context(QDF_MODULE_ID_SOC);
 	params.tso_enable = cfg_get(psoc, CFG_DP_TSO);
 	params.tso_enable = cfg_get(psoc, CFG_DP_TSO);
-	params.lro_enable = cfg_get(psoc, CFG_DP_LRO);
+	dp_ctx->dp_cfg.lro_enable = cfg_get(psoc, CFG_DP_LRO);
+	params.lro_enable = dp_ctx->dp_cfg.lro_enable;
 
 
 	dp_get_config_queue_threshold(psoc, &params);
 	dp_get_config_queue_threshold(psoc, &params);
 	params.flow_steering_enable =
 	params.flow_steering_enable =
@@ -499,7 +856,8 @@ ucfg_dp_update_config(struct wlan_objmgr_psoc *psoc,
 	params.tcp_udp_checksumoffload =
 	params.tcp_udp_checksumoffload =
 		cfg_get(psoc, CFG_DP_TCP_UDP_CKSUM_OFFLOAD);
 		cfg_get(psoc, CFG_DP_TCP_UDP_CKSUM_OFFLOAD);
 	params.ipa_enable = req->ipa_enable;
 	params.ipa_enable = req->ipa_enable;
-	params.gro_enable = cfg_get(psoc, CFG_DP_GRO);
+	dp_ctx->dp_cfg.gro_enable = cfg_get(psoc, CFG_DP_GRO);
+	params.gro_enable = dp_ctx->dp_cfg.gro_enable;
 	params.tx_comp_loop_pkt_limit = cfg_get(psoc,
 	params.tx_comp_loop_pkt_limit = cfg_get(psoc,
 						CFG_DP_TX_COMP_LOOP_PKT_LIMIT);
 						CFG_DP_TX_COMP_LOOP_PKT_LIMIT);
 	params.rx_reap_loop_pkt_limit = cfg_get(psoc,
 	params.rx_reap_loop_pkt_limit = cfg_get(psoc,
@@ -525,6 +883,441 @@ ucfg_dp_get_rx_softirq_yield_duration(struct wlan_objmgr_psoc *psoc)
 	return dp_ctx->dp_cfg.rx_softirq_max_yield_duration_ns;
 	return dp_ctx->dp_cfg.rx_softirq_max_yield_duration_ns;
 }
 }
 
 
+#if defined(WLAN_SUPPORT_RX_FISA)
+/**
+ * dp_rx_register_fisa_ops() - FISA callback functions
+ * @txrx_ops: operations handle holding callback functions
+ * @dp_rx_fisa_cbk: callback for fisa aggregation handle function
+ * @dp_rx_fisa_flush: callback function to flush fisa aggregation
+ *
+ * Return: None
+ */
+static inline void
+dp_rx_register_fisa_ops(struct ol_txrx_ops *txrx_ops)
+{
+	txrx_ops->rx.osif_fisa_rx = wlan_dp_rx_fisa_cbk;
+	txrx_ops->rx.osif_fisa_flush = wlan_dp_rx_fisa_flush_by_ctx_id;
+}
+#else
+static inline void
+dp_rx_register_fisa_ops(struct ol_txrx_ops *txrx_ops)
+{
+}
+#endif
+
+#ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
+static QDF_STATUS wlan_dp_get_tsf_time(void *dp_intf_ctx,
+				       uint64_t input_time,
+				       uint64_t *tsf_time)
+{
+	struct wlan_dp_intf *dp_intf = (struct wlan_dp_intf *)dp_intf_ctx;
+	struct wlan_dp_psoc_callbacks *dp_ops = &dp_intf->dp_ctx->dp_ops;
+
+	dp_ops->dp_get_tsf_time(dp_intf->intf_id,
+				input_time,
+				tsf_time);
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static QDF_STATUS wlan_dp_get_tsf_time(void *dp_intf_ctx,
+				       uint64_t input_time,
+				       uint64_t *tsf_time)
+{
+	*tsf_time = 0;
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif
+
+QDF_STATUS ucfg_dp_sta_register_txrx_ops(struct wlan_objmgr_vdev *vdev)
+{
+	struct ol_txrx_ops txrx_ops;
+	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* Register the vdev transmit and receive functions */
+	qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
+
+	if (dp_intf->dp_ctx->enable_dp_rx_threads) {
+		txrx_ops.rx.rx = dp_rx_pkt_thread_enqueue_cbk;
+		txrx_ops.rx.rx_stack = dp_rx_packet_cbk;
+		txrx_ops.rx.rx_flush = dp_rx_flush_packet_cbk;
+		txrx_ops.rx.rx_gro_flush = dp_rx_thread_gro_flush_ind_cbk;
+		dp_intf->rx_stack = dp_rx_packet_cbk;
+	} else {
+		txrx_ops.rx.rx = dp_rx_packet_cbk;
+		txrx_ops.rx.rx_stack = NULL;
+		txrx_ops.rx.rx_flush = NULL;
+	}
+
+	if (dp_intf->dp_ctx->dp_cfg.fisa_enable &&
+		(dp_intf->device_mode != QDF_MONITOR_MODE)) {
+		dp_debug("FISA feature enabled");
+		dp_rx_register_fisa_ops(&txrx_ops);
+	}
+
+	txrx_ops.rx.stats_rx = dp_tx_rx_collect_connectivity_stats_info;
+
+	txrx_ops.tx.tx_comp = dp_sta_notify_tx_comp_cb;
+	txrx_ops.tx.tx = NULL;
+	txrx_ops.get_tsf_time = wlan_dp_get_tsf_time;
+	cdp_vdev_register(soc, dp_intf->intf_id, (ol_osif_vdev_handle)dp_intf,
+			  &txrx_ops);
+	if (!txrx_ops.tx.tx) {
+		dp_err("vdev register fail");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	dp_intf->tx_fn = txrx_ops.tx.tx;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS ucfg_dp_tdlsta_register_txrx_ops(struct wlan_objmgr_vdev *vdev)
+{
+	struct ol_txrx_ops txrx_ops;
+	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* Register the vdev transmit and receive functions */
+	qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
+	if (dp_intf->dp_ctx->enable_dp_rx_threads) {
+		txrx_ops.rx.rx = dp_rx_pkt_thread_enqueue_cbk;
+		txrx_ops.rx.rx_stack = dp_rx_packet_cbk;
+		txrx_ops.rx.rx_flush = dp_rx_flush_packet_cbk;
+		txrx_ops.rx.rx_gro_flush = dp_rx_thread_gro_flush_ind_cbk;
+		dp_intf->rx_stack = dp_rx_packet_cbk;
+	} else {
+		txrx_ops.rx.rx = dp_rx_packet_cbk;
+		txrx_ops.rx.rx_stack = NULL;
+		txrx_ops.rx.rx_flush = NULL;
+	}
+	if (dp_intf->dp_ctx->dp_cfg.fisa_enable &&
+	    dp_intf->device_mode != QDF_MONITOR_MODE) {
+		dp_debug("FISA feature enabled");
+		dp_rx_register_fisa_ops(&txrx_ops);
+	}
+
+	txrx_ops.rx.stats_rx = dp_tx_rx_collect_connectivity_stats_info;
+
+	txrx_ops.tx.tx_comp = dp_sta_notify_tx_comp_cb;
+	txrx_ops.tx.tx = NULL;
+
+	cdp_vdev_register(soc, dp_intf->intf_id, (ol_osif_vdev_handle)dp_intf,
+			  &txrx_ops);
+
+	if (!txrx_ops.tx.tx) {
+		dp_err("vdev register fail");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	dp_intf->tx_fn = txrx_ops.tx.tx;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS ucfg_dp_ocb_register_txrx_ops(struct wlan_objmgr_vdev *vdev)
+{
+	struct ol_txrx_ops txrx_ops;
+	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* Register the vdev transmit and receive functions */
+	qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
+	txrx_ops.rx.rx = dp_rx_packet_cbk;
+	txrx_ops.rx.stats_rx = dp_tx_rx_collect_connectivity_stats_info;
+
+	cdp_vdev_register(soc, dp_intf->intf_id, (ol_osif_vdev_handle)dp_intf,
+			  &txrx_ops);
+	if (!txrx_ops.tx.tx) {
+		dp_err("vdev register fail");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	dp_intf->tx_fn = txrx_ops.tx.tx;
+
+	qdf_copy_macaddr(&dp_intf->conn_info.peer_macaddr,
+			 &dp_intf->mac_addr);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+#ifdef FEATURE_MONITOR_MODE_SUPPORT
+QDF_STATUS ucfg_dp_mon_register_txrx_ops(struct wlan_objmgr_vdev *vdev)
+{
+	struct ol_txrx_ops txrx_ops;
+	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
+	txrx_ops.rx.rx = dp_mon_rx_packet_cbk;
+	dp_monitor_set_rx_monitor_cb(&txrx_ops, dp_rx_monitor_callback);
+	cdp_vdev_register(soc, dp_intf->intf_id,
+			  (ol_osif_vdev_handle)dp_intf,
+			  &txrx_ops);
+
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+QDF_STATUS ucfg_dp_softap_register_txrx_ops(struct wlan_objmgr_vdev *vdev,
+					    struct ol_txrx_ops *txrx_ops)
+{
+	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* Register the vdev transmit and receive functions */
+	txrx_ops->tx.tx_comp = dp_softap_notify_tx_compl_cbk;
+
+	if (dp_intf->dp_ctx->enable_dp_rx_threads) {
+		txrx_ops->rx.rx = dp_rx_pkt_thread_enqueue_cbk;
+		txrx_ops->rx.rx_stack = dp_softap_rx_packet_cbk;
+		txrx_ops->rx.rx_flush = dp_rx_flush_packet_cbk;
+		txrx_ops->rx.rx_gro_flush = dp_rx_thread_gro_flush_ind_cbk;
+		dp_intf->rx_stack = dp_softap_rx_packet_cbk;
+	} else {
+		txrx_ops->rx.rx = dp_softap_rx_packet_cbk;
+		txrx_ops->rx.rx_stack = NULL;
+		txrx_ops->rx.rx_flush = NULL;
+	}
+
+	txrx_ops->get_tsf_time = wlan_dp_get_tsf_time;
+	cdp_vdev_register(soc,
+			  dp_intf->intf_id,
+			  (ol_osif_vdev_handle)dp_intf,
+			  txrx_ops);
+	if (!txrx_ops->tx.tx) {
+		dp_err("vdev register fail");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	dp_intf->tx_fn = txrx_ops->tx.tx;
+	dp_intf->sap_tx_block_mask &= ~DP_TX_FN_CLR;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS ucfg_dp_register_pkt_capture_callbacks(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	return ucfg_pkt_capture_register_callbacks(vdev,
+						   dp_mon_rx_packet_cbk,
+						   dp_intf);
+}
+
+QDF_STATUS ucfg_dp_init_txrx(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS ucfg_dp_deinit_txrx(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	dp_intf->tx_fn = NULL;
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS ucfg_dp_softap_init_txrx(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	qdf_mem_zero(&dp_intf->stats, sizeof(qdf_net_dev_stats));
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS ucfg_dp_softap_deinit_txrx(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	dp_intf->tx_fn = NULL;
+	dp_intf->sap_tx_block_mask |= DP_TX_FN_CLR;
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS ucfg_dp_start_xmit(qdf_nbuf_t nbuf, struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+	QDF_STATUS status;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err_rl("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	qdf_atomic_inc(&dp_intf->num_active_task);
+	status = dp_start_xmit(dp_intf, nbuf);
+	qdf_atomic_dec(&dp_intf->num_active_task);
+
+	return status;
+}
+
+QDF_STATUS ucfg_dp_rx_packet_cbk(struct wlan_objmgr_vdev *vdev, qdf_nbuf_t nbuf)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err_rl("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	return dp_rx_packet_cbk(dp_intf, nbuf);
+}
+
+void ucfg_dp_tx_timeout(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err_rl("DP interface not found");
+		return;
+	}
+
+	dp_tx_timeout(dp_intf);
+}
+
+QDF_STATUS
+ucfg_dp_softap_start_xmit(qdf_nbuf_t nbuf, struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+	QDF_STATUS status;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err_rl("DP interface not found");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	qdf_atomic_inc(&dp_intf->num_active_task);
+	status = dp_softap_start_xmit(nbuf, dp_intf);
+	qdf_atomic_dec(&dp_intf->num_active_task);
+
+	return status;
+}
+
+void ucfg_dp_softap_tx_timeout(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err_rl("DP interface not found");
+		return;
+	}
+
+	dp_softap_tx_timeout(dp_intf);
+}
+
+qdf_net_dev_stats *ucfg_dp_get_dev_stats(struct qdf_mac_addr *intf_addr)
+{
+	struct wlan_dp_intf *dp_intf;
+	struct wlan_dp_psoc_context *dp_ctx;
+
+	dp_ctx =  dp_get_context();
+
+	dp_intf = dp_get_intf_by_macaddr(dp_ctx, intf_addr);
+	if (!dp_intf) {
+		dp_err("DP interface not found addr:"QDF_MAC_ADDR_FMT,
+		       QDF_MAC_ADDR_REF(intf_addr));
+		QDF_BUG(0);
+		return NULL;
+	}
+
+	return &dp_intf->stats;
+}
+
+void ucfg_dp_inc_rx_pkt_stats(struct wlan_objmgr_vdev *vdev,
+			      uint32_t pkt_len,
+			      bool delivered)
+{
+	struct wlan_dp_intf *dp_intf;
+	struct dp_tx_rx_stats *stats;
+	unsigned int cpu_index;
+
+	dp_intf = dp_get_vdev_priv_obj(vdev);
+	if (unlikely(!dp_intf)) {
+		dp_err_rl("DP interface not found");
+		return;
+	}
+
+	cpu_index = qdf_get_cpu();
+	stats = &dp_intf->dp_stats.tx_rx_stats;
+
+	++stats->per_cpu[cpu_index].rx_packets;
+	QDF_NET_DEV_STATS_INC_RX_PKTS(&dp_intf->stats);
+	QDF_NET_DEV_STATS_RX_BYTES(&dp_intf->stats) += pkt_len;
+
+	if (delivered)
+		++stats->per_cpu[cpu_index].rx_delivered;
+	else
+		++stats->per_cpu[cpu_index].rx_refused;
+}
+
 void ucfg_dp_register_rx_mic_error_ind_handler(void *soc)
 void ucfg_dp_register_rx_mic_error_ind_handler(void *soc)
 {
 {
 	cdp_register_rx_mic_error_ind_handler(soc, dp_rx_mic_error_ind);
 	cdp_register_rx_mic_error_ind_handler(soc, dp_rx_mic_error_ind);
@@ -786,6 +1579,18 @@ void ucfg_dp_set_pkt_type_bitmap_value(struct wlan_objmgr_vdev *vdev,
 	dp_intf->pkt_type_bitmap = value;
 	dp_intf->pkt_type_bitmap = value;
 }
 }
 
 
+uint32_t ucfg_dp_intf_get_pkt_type_bitmap_value(void *intf_ctx)
+{
+	struct wlan_dp_intf *dp_intf = (struct wlan_dp_intf *)intf_ctx;
+
+	if (!dp_intf) {
+		dp_err("DP Context is NULL");
+		return 0;
+	}
+
+	return dp_intf->pkt_type_bitmap;
+}
+
 void ucfg_dp_set_track_dest_ipv4_value(struct wlan_objmgr_vdev *vdev,
 void ucfg_dp_set_track_dest_ipv4_value(struct wlan_objmgr_vdev *vdev,
 				       uint32_t value)
 				       uint32_t value)
 {
 {
@@ -1083,6 +1888,28 @@ void ucfg_dp_register_hdd_callbacks(struct wlan_objmgr_psoc *psoc,
 	dp_ctx->dp_ops.dp_is_link_adapter = cb_obj->dp_is_link_adapter;
 	dp_ctx->dp_ops.dp_is_link_adapter = cb_obj->dp_is_link_adapter;
 	dp_ctx->dp_ops.dp_get_pause_map = cb_obj->dp_get_pause_map;
 	dp_ctx->dp_ops.dp_get_pause_map = cb_obj->dp_get_pause_map;
 	dp_ctx->dp_ops.dp_nud_failure_work = cb_obj->dp_nud_failure_work;
 	dp_ctx->dp_ops.dp_nud_failure_work = cb_obj->dp_nud_failure_work;
+
+	dp_ctx->dp_ops.dp_get_tx_resource = cb_obj->dp_get_tx_resource;
+	dp_ctx->dp_ops.dp_get_tx_flow_low_watermark =
+		cb_obj->dp_get_tx_flow_low_watermark;
+	dp_ctx->dp_ops.dp_get_tsf_time = cb_obj->dp_get_tsf_time;
+	dp_ctx->dp_ops.dp_tsf_timestamp_rx = cb_obj->dp_tsf_timestamp_rx;
+	dp_ctx->dp_ops.dp_gro_rx_legacy_get_napi =
+		cb_obj->dp_gro_rx_legacy_get_napi;
+	dp_ctx->dp_ops.dp_get_nw_intf_mac_by_vdev_mac =
+		cb_obj->dp_get_nw_intf_mac_by_vdev_mac;
+
+	dp_ctx->dp_ops.dp_nbuf_push_pkt = cb_obj->dp_nbuf_push_pkt;
+	dp_ctx->dp_ops.dp_rx_napi_gro_flush = cb_obj->dp_rx_napi_gro_flush;
+	dp_ctx->dp_ops.dp_rx_napi_gro_receive = cb_obj->dp_rx_napi_gro_receive;
+	dp_ctx->dp_ops.dp_lro_rx_cb = cb_obj->dp_lro_rx_cb;
+	dp_ctx->dp_ops.dp_register_rx_offld_flush_cb =
+		cb_obj->dp_register_rx_offld_flush_cb;
+	dp_ctx->dp_ops.dp_rx_check_qdisc_configured =
+		cb_obj->dp_rx_check_qdisc_configured;
+	dp_ctx->dp_ops.dp_is_gratuitous_arp_unsolicited_na =
+		cb_obj->dp_is_gratuitous_arp_unsolicited_na;
+	dp_ctx->dp_ops.dp_send_rx_pkt_over_nl = cb_obj->dp_send_rx_pkt_over_nl;
 }
 }
 
 
 void ucfg_dp_register_event_handler(struct wlan_objmgr_psoc *psoc,
 void ucfg_dp_register_event_handler(struct wlan_objmgr_psoc *psoc,
@@ -1107,5 +1934,142 @@ uint32_t ucfg_dp_get_bus_bw_compute_interval(struct wlan_objmgr_psoc *psoc)
 		dp_err("DP ctx is NULL");
 		dp_err("DP ctx is NULL");
 		return 0;
 		return 0;
 	}
 	}
-	return dp_ctx->dp_cfg.bus_bw_compute_interval;
+	return DP_BUS_BW_CFG(dp_ctx->dp_cfg.bus_bw_compute_interval);
+}
+
+QDF_STATUS ucfg_dp_get_txrx_stats(struct wlan_objmgr_vdev *vdev,
+				  struct dp_tx_rx_stats *dp_stats)
+{
+	struct wlan_dp_intf *dp_intf = dp_get_vdev_priv_obj(vdev);
+	struct dp_tx_rx_stats *txrx_stats;
+	int i = 0, rx_mcast_drp = 0;
+
+	if (!dp_intf) {
+		dp_err("Unable to get DP interface");
+		return QDF_STATUS_E_INVAL;
+	}
+
+	txrx_stats = &dp_intf->dp_stats.tx_rx_stats;
+	for (i = 0; i < NUM_CPUS; i++) {
+		dp_stats->per_cpu[i].rx_packets = txrx_stats->per_cpu[i].rx_packets;
+		dp_stats->per_cpu[i].rx_dropped = txrx_stats->per_cpu[i].rx_dropped;
+		dp_stats->per_cpu[i].rx_delivered = txrx_stats->per_cpu[i].rx_delivered;
+		dp_stats->per_cpu[i].rx_refused = txrx_stats->per_cpu[i].rx_refused;
+		dp_stats->per_cpu[i].tx_called = txrx_stats->per_cpu[i].tx_called;
+		dp_stats->per_cpu[i].tx_dropped = txrx_stats->per_cpu[i].tx_dropped;
+		dp_stats->per_cpu[i].tx_orphaned = txrx_stats->per_cpu[i].tx_orphaned;
+	}
+	rx_mcast_drp = qdf_atomic_read(&txrx_stats->rx_usolict_arp_n_mcast_drp);
+	qdf_atomic_set(&dp_stats->rx_usolict_arp_n_mcast_drp, rx_mcast_drp);
+
+	dp_stats->rx_aggregated = txrx_stats->rx_aggregated;
+	dp_stats->rx_gro_dropped = txrx_stats->rx_gro_dropped;
+	dp_stats->rx_non_aggregated = txrx_stats->rx_non_aggregated;
+	dp_stats->rx_gro_flush_skip = txrx_stats->rx_gro_flush_skip;
+	dp_stats->rx_gro_low_tput_flush = txrx_stats->rx_gro_low_tput_flush;
+	dp_stats->tx_timeout_cnt = txrx_stats->tx_timeout_cnt;
+	dp_stats->cont_txtimeout_cnt = txrx_stats->cont_txtimeout_cnt;
+	dp_stats->last_txtimeout = txrx_stats->last_txtimeout;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+void ucfg_dp_reset_cont_txtimeout_cnt(struct wlan_objmgr_vdev *vdev)
+{
+	struct wlan_dp_intf *dp_intf = dp_get_vdev_priv_obj(vdev);
+
+	if (!dp_intf) {
+		dp_err("Unable to get DP interface");
+		return;
+	}
+	dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
+}
+
+void ucfg_dp_set_rx_thread_affinity(struct wlan_objmgr_psoc *psoc)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
+	struct wlan_dp_psoc_cfg *cfg;
+
+	if (!dp_ctx) {
+		dp_err("DP ctx is NULL");
+		return;
+	}
+	cfg = &dp_ctx->dp_cfg;
+
+	if (cfg->rx_thread_affinity_mask)
+		cds_set_rx_thread_cpu_mask(cfg->rx_thread_affinity_mask);
+
+	if (cfg->rx_thread_ul_affinity_mask)
+		cds_set_rx_thread_ul_cpu_mask(cfg->rx_thread_ul_affinity_mask);
+}
+
+void ucfg_dp_get_disable_rx_ol_val(struct wlan_objmgr_psoc *psoc,
+				   uint8_t *disable_conc,
+				   uint8_t *disable_low_tput)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
+
+	if (!dp_ctx) {
+		dp_err("Unable to get DP context");
+		return;
+	}
+	*disable_conc = qdf_atomic_read(&dp_ctx->disable_rx_ol_in_concurrency);
+	*disable_low_tput = qdf_atomic_read(&dp_ctx->disable_rx_ol_in_low_tput);
+}
+
+uint32_t ucfg_dp_get_rx_aggregation_val(struct wlan_objmgr_psoc *psoc)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
+
+	if (!dp_ctx) {
+		dp_err("Unable to get DP context");
+		return 0;
+	}
+	return qdf_atomic_read(&dp_ctx->dp_agg_param.rx_aggregation);
+}
+
+void ucfg_dp_set_rx_aggregation_val(struct wlan_objmgr_psoc *psoc,
+				    uint32_t value)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
+
+	if (!dp_ctx) {
+		dp_err("Unable to get DP context");
+		return;
+	}
+	qdf_atomic_set(&dp_ctx->dp_agg_param.rx_aggregation, !!value);
+}
+
+void ucfg_dp_set_force_gro_enable(struct wlan_objmgr_psoc *psoc, bool value)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
+
+	if (!dp_ctx) {
+		dp_err("DP ctx is NULL");
+		return;
+	}
+	dp_ctx->dp_agg_param.force_gro_enable = value;
+}
+
+void ucfg_dp_runtime_disable_rx_thread(struct wlan_objmgr_vdev *vdev,
+				       bool value)
+{
+	struct wlan_dp_intf *dp_intf = dp_get_vdev_priv_obj(vdev);
+
+	if (!dp_intf) {
+		dp_err("Unable to get DP interface");
+		return;
+	}
+	dp_intf->runtime_disable_rx_thread = value;
+}
+
+bool ucfg_dp_get_napi_enabled(struct wlan_objmgr_psoc *psoc)
+{
+	struct wlan_dp_psoc_context *dp_ctx = dp_psoc_get_priv(psoc);
+
+	if (!dp_ctx) {
+		dp_err("Unable to get DP context");
+		return 0;
+	}
+	return dp_ctx->napi_enable;
 }
 }