Jelajahi Sumber

qcacld-3.0: Add changes for per NAPI or per Rx CE LRO manager

Make changes for per per Rx context LRO manager, this addresses
all parallel Rx concurrency issues. There by removes all the contention.

Change-Id: I3609bcdb67e0046fac5a22bba9cb6a5eb4bcbe84
CRs-Fixed: 1079320
Manjunathappa Prakash 8 tahun lalu
induk
melakukan
04f2644230

+ 4 - 1
core/dp/htt/htt_rx.c

@@ -1994,14 +1994,16 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 	qdf_nbuf_t msdu, next, prev = NULL;
 	uint8_t *rx_ind_data;
 	uint32_t *msg_word;
+	uint32_t rx_ctx_id;
 	unsigned int msdu_count = 0;
 	uint8_t offload_ind, frag_ind;
-	struct htt_host_rx_desc_base *rx_desc;
 	uint8_t peer_id;
+	struct htt_host_rx_desc_base *rx_desc;
 
 	HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
 
 	rx_ind_data = qdf_nbuf_data(rx_ind_msg);
+	rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(rx_ind_msg);
 	msg_word = (uint32_t *) rx_ind_data;
 	peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
 					*(u_int32_t *)rx_ind_data);
@@ -2059,6 +2061,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 		QDF_NBUF_CB_DP_TRACE_PRINT(msdu) = false;
 		qdf_dp_trace_set_track(msdu, QDF_RX);
 		QDF_NBUF_CB_TX_PACKET_TRACK(msdu) = QDF_NBUF_TX_PKT_DATA_TRACK;
+		QDF_NBUF_CB_RX_CTX_ID(msdu) = rx_ctx_id;
 		ol_rx_log_packet(pdev, peer_id, msdu);
 		DPTRACE(qdf_dp_trace(msdu,
 			QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD,

+ 53 - 26
core/dp/txrx/ol_txrx.c

@@ -70,6 +70,7 @@
 #include <ol_tx_queue.h>
 #include <ol_tx_sched.h>           /* ol_tx_sched_attach, etc. */
 #include <ol_txrx.h>
+#include <ol_txrx_types.h>
 #include <cdp_txrx_flow_ctrl_legacy.h>
 #include <cdp_txrx_ipa.h>
 #include "wma.h"
@@ -4637,7 +4638,7 @@ void ol_txrx_lro_flush_handler(void *context,
 			       void *rxpkt,
 			       uint16_t staid)
 {
-	ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)context;
+	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (qdf_unlikely(!pdev)) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
@@ -4647,7 +4648,7 @@ void ol_txrx_lro_flush_handler(void *context,
 	}
 
 	if (pdev->lro_info.lro_flush_cb)
-		pdev->lro_info.lro_flush_cb(pdev->lro_info.lro_data);
+		pdev->lro_info.lro_flush_cb(context);
 	else
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			  "%s: lro_flush_cb NULL", __func__);
@@ -4666,13 +4667,13 @@ void ol_txrx_lro_flush(void *data)
 {
 	p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
 	struct cds_ol_rx_pkt *pkt;
-	ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)data;
+	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (qdf_unlikely(!sched_ctx))
 		return;
 
 	if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
-		ol_txrx_lro_flush_handler((void *)pdev, NULL, 0);
+		ol_txrx_lro_flush_handler(data, NULL, 0);
 	} else {
 		pkt = cds_alloc_ol_rx_pkt(sched_ctx);
 		if (qdf_unlikely(!pkt)) {
@@ -4683,7 +4684,7 @@ void ol_txrx_lro_flush(void *data)
 
 		pkt->callback =
 			 (cds_ol_rx_thread_cb) ol_txrx_lro_flush_handler;
-		pkt->context = pdev;
+		pkt->context = data;
 		pkt->Rxpkt = NULL;
 		pkt->staId = 0;
 		cds_indicate_rxpkt(sched_ctx, pkt);
@@ -4692,51 +4693,77 @@ void ol_txrx_lro_flush(void *data)
 
 /**
  * ol_register_lro_flush_cb() - register the LRO flush callback
- * @handler: callback function
- * @data: opaque data pointer to be passed back
+ * @lro_flush_cb: flush callback function
+ * @lro_init_cb: Allocate and initialize LRO data structure.
  *
  * Store the LRO flush callback provided and in turn
  * register OL's LRO flush handler with CE
  *
  * Return: none
  */
-void ol_register_lro_flush_cb(void (handler)(void *), void *data)
+void ol_register_lro_flush_cb(void (lro_flush_cb)(void *),
+			      void *(lro_init_cb)(void))
 {
-	struct hif_opaque_softc *hif_device =
-		(struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
+	struct hif_opaque_softc *hif_device;
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
-	if (pdev != NULL) {
-		pdev->lro_info.lro_flush_cb = handler;
-		pdev->lro_info.lro_data = data;
-	} else
+	if (pdev == NULL) {
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev NULL!", __func__);
+		TXRX_ASSERT2(0);
+		goto out;
+	}
+	if (pdev->lro_info.lro_flush_cb != NULL) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			   "%s: LRO already initialised\n", __func__);
+		if (pdev->lro_info.lro_flush_cb != lro_flush_cb) {
+			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+				   "lro_flush_cb is differ to previously registered callback\n")
+			TXRX_ASSERT2(0);
+			goto out;
+		}
+		qdf_atomic_inc(&pdev->lro_info.lro_dev_cnt);
+		goto out;
+	}
+	pdev->lro_info.lro_flush_cb = lro_flush_cb;
+	hif_device = (struct hif_opaque_softc *)
+				cds_get_context(QDF_MODULE_ID_HIF);
 
-	hif_lro_flush_cb_register(hif_device, ol_txrx_lro_flush, pdev);
+	hif_lro_flush_cb_register(hif_device, ol_txrx_lro_flush, lro_init_cb);
+	qdf_atomic_inc(&pdev->lro_info.lro_dev_cnt);
+
+out:
+	return;
 }
 
 /**
- * ol_deregister_lro_flush_cb() - deregister the LRO flush
- * callback
+ * ol_deregister_lro_flush_cb() - deregister the LRO flush callback
+ * @lro_deinit_cb: callback function for deregistration.
  *
  * Remove the LRO flush callback provided and in turn
  * deregister OL's LRO flush handler with CE
  *
  * Return: none
  */
-void ol_deregister_lro_flush_cb(void)
+void ol_deregister_lro_flush_cb(void (lro_deinit_cb)(void *))
 {
-	struct hif_opaque_softc *hif_device =
-		(struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
+	struct hif_opaque_softc *hif_device;
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
-	hif_lro_flush_cb_deregister(hif_device);
-
-	if (pdev != NULL) {
-		pdev->lro_info.lro_flush_cb = NULL;
-		pdev->lro_info.lro_data = NULL;
-	} else
+	if (pdev == NULL) {
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev NULL!", __func__);
+		return;
+	}
+	if (qdf_atomic_dec_and_test(&pdev->lro_info.lro_dev_cnt) == 0) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			   "%s: Other LRO enabled modules still exist, do not unregister the lro_flush_cb\n", __func__);
+		return;
+	}
+	hif_device =
+		(struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
+
+	hif_lro_flush_cb_deregister(hif_device, lro_deinit_cb);
+
+	pdev->lro_info.lro_flush_cb = NULL;
 }
 #endif /* FEATURE_LRO */
 

+ 1 - 1
core/dp/txrx/ol_txrx_types.h

@@ -948,8 +948,8 @@ struct ol_txrx_pdev_t {
 	ol_tx_pause_callback_fp pause_cb;
 
 	struct {
-		void *lro_data;
 		void (*lro_flush_cb)(void *);
+		qdf_atomic_t lro_dev_cnt;
 	} lro_info;
 	struct ol_txrx_peer_t *self_peer;
 };

+ 0 - 4
core/hdd/inc/wlan_hdd_lro.h

@@ -72,7 +72,6 @@ struct hdd_lro_desc_entry {
 struct hdd_lro_desc_pool {
 	struct hdd_lro_desc_entry *lro_desc_array;
 	struct list_head lro_free_list_head;
-	qdf_spinlock_t lro_pool_lock;
 };
 
 /**
@@ -93,7 +92,6 @@ struct hdd_lro_desc_table {
  */
 struct hdd_lro_desc_info {
 	struct hdd_lro_desc_table *lro_hash_table;
-	qdf_spinlock_t lro_hash_lock;
 	struct hdd_lro_desc_pool lro_desc_pool;
 };
 
@@ -151,8 +149,6 @@ struct hdd_lro_stats {
 struct hdd_lro_s {
 	struct net_lro_mgr *lro_mgr;
 	struct hdd_lro_desc_info lro_desc_info;
-	qdf_spinlock_t lro_mgr_arr_access_lock;
-	struct hdd_lro_stats lro_stats;
 };
 
 int hdd_lro_init(hdd_context_t *hdd_ctx);

+ 90 - 247
core/hdd/src/wlan_hdd_lro.c

@@ -53,14 +53,6 @@
 	(LRO_DESC | LRO_ELIGIBILITY_CHECKED | LRO_TCP_ACK_NUM | \
 	 LRO_TCP_DATA_CSUM | LRO_TCP_SEQ_NUM | LRO_TCP_WIN)
 
-#define LRO_HIST_UPDATE(lro_desc, adapter) \
-	 do { \
-		uint8_t bucket = lro_desc->pkt_aggr_cnt >> 3; \
-		if (unlikely(bucket > HDD_LRO_BUCKET_MAX)) \
-			bucket = HDD_LRO_BUCKET_MAX; \
-		adapter->lro_info.lro_stats.pkt_aggr_hist[bucket]++; \
-	 } while (0);
-
 /**
  * hdd_lro_get_skb_header() - LRO callback function
  * @skb: network buffer
@@ -110,7 +102,6 @@ static void hdd_lro_desc_pool_init(struct hdd_lro_desc_pool *lro_desc_pool,
 		list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node,
 			 &lro_desc_pool->lro_free_list_head);
 	}
-	qdf_spinlock_create(&lro_desc_pool->lro_pool_lock);
 }
 
 /**
@@ -137,41 +128,6 @@ static void hdd_lro_desc_info_init(struct hdd_lro_s *hdd_info)
 			 lro_hash_table[i].lro_desc_list);
 	}
 
-	qdf_spinlock_create(&hdd_info->lro_desc_info.lro_hash_lock);
-	qdf_spinlock_create(&hdd_info->lro_mgr_arr_access_lock);
-}
-
-/**
- * hdd_lro_desc_pool_deinit() - Free the LRO descriptor list
- * @hdd_info: HDD LRO data structure
- *
- * Free the pool of LRO descriptors
- *
- * Return: none
- */
-static void hdd_lro_desc_pool_deinit(struct hdd_lro_desc_pool *lro_desc_pool)
-{
-	INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head);
-	qdf_spinlock_destroy(&lro_desc_pool->lro_pool_lock);
-}
-
-/**
- * hdd_lro_desc_info_deinit() - Deinitialize the LRO descriptors
- *
- * @hdd_info: HDD LRO data structure
- *
- * Deinitialize the free pool of LRO descriptors and the entries
- * of the hash table
- *
- * Return: none
- */
-static void hdd_lro_desc_info_deinit(struct hdd_lro_s *hdd_info)
-{
-	struct hdd_lro_desc_info *desc_info = &hdd_info->lro_desc_info;
-
-	hdd_lro_desc_pool_deinit(&desc_info->lro_desc_pool);
-	qdf_spinlock_destroy(&desc_info->lro_hash_lock);
-	qdf_spinlock_destroy(&hdd_info->lro_mgr_arr_access_lock);
 }
 
 /**
@@ -214,7 +170,7 @@ static inline bool hdd_lro_tcp_flow_match(struct net_lro_desc *lro_desc,
  *
  * Return: 0 - success, < 0 - failure
  */
-static int hdd_lro_desc_find(hdd_adapter_t *adapter,
+static int hdd_lro_desc_find(struct hdd_lro_s *lro_info,
 	 struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph,
 	 struct net_lro_desc **lro_desc)
 {
@@ -223,7 +179,7 @@ static int hdd_lro_desc_find(hdd_adapter_t *adapter,
 	struct list_head *ptr;
 	struct hdd_lro_desc_entry *entry;
 	struct hdd_lro_desc_pool *free_pool;
-	struct hdd_lro_desc_info *desc_info = &adapter->lro_info.lro_desc_info;
+	struct hdd_lro_desc_info *desc_info = &lro_info->lro_desc_info;
 
 	*lro_desc = NULL;
 	i = QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(skb) & LRO_DESC_TABLE_SZ_MASK;
@@ -236,7 +192,6 @@ static int hdd_lro_desc_find(hdd_adapter_t *adapter,
 		return -EINVAL;
 	}
 
-	qdf_spin_lock_bh(&desc_info->lro_hash_lock);
 	/* Check if this flow exists in the descriptor list */
 	list_for_each(ptr, &lro_hash_table->lro_desc_list) {
 		struct net_lro_desc *tmp_lro_desc = NULL;
@@ -245,27 +200,22 @@ static int hdd_lro_desc_find(hdd_adapter_t *adapter,
 		if (tmp_lro_desc->active) {
 			if (hdd_lro_tcp_flow_match(tmp_lro_desc, iph, tcph)) {
 				*lro_desc = entry->lro_desc;
-				qdf_spin_unlock_bh(&desc_info->lro_hash_lock);
 				return 0;
 			}
 		}
 	}
-	qdf_spin_unlock_bh(&desc_info->lro_hash_lock);
 
 	/* no existing flow found, a new LRO desc needs to be allocated */
-	free_pool = &adapter->lro_info.lro_desc_info.lro_desc_pool;
-	qdf_spin_lock_bh(&free_pool->lro_pool_lock);
+	free_pool = &lro_info->lro_desc_info.lro_desc_pool;
 	entry = list_first_entry_or_null(
 		 &free_pool->lro_free_list_head,
 		 struct hdd_lro_desc_entry, lro_node);
 	if (NULL == entry) {
 		hdd_err("Could not allocate LRO desc!");
-		qdf_spin_unlock_bh(&free_pool->lro_pool_lock);
 		return -ENOMEM;
 	}
 
 	list_del_init(&entry->lro_node);
-	qdf_spin_unlock_bh(&free_pool->lro_pool_lock);
 
 	if (NULL == entry->lro_desc) {
 		hdd_err("entry->lro_desc is NULL!");
@@ -278,10 +228,8 @@ static int hdd_lro_desc_find(hdd_adapter_t *adapter,
 	 * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval
 	 * should be 0 for newly allocated lro descriptors
 	 */
-	qdf_spin_lock_bh(&desc_info->lro_hash_lock);
 	list_add_tail(&entry->lro_node,
 		 &lro_hash_table->lro_desc_list);
-	qdf_spin_unlock_bh(&desc_info->lro_hash_lock);
 	*lro_desc = entry->lro_desc;
 
 	return 0;
@@ -327,7 +275,7 @@ static struct net_lro_desc *hdd_lro_get_desc(struct net_lro_mgr *lro_mgr,
  * Return: true - LRO eligible frame, false - frame is not LRO
  * eligible
  */
-static bool hdd_lro_eligible(hdd_adapter_t *adapter, struct sk_buff *skb,
+static bool hdd_lro_eligible(struct hdd_lro_s *lro_info, struct sk_buff *skb,
 	 struct iphdr *iph, struct tcphdr *tcph, struct net_lro_desc **desc)
 {
 	struct net_lro_desc *lro_desc = NULL;
@@ -338,7 +286,7 @@ static bool hdd_lro_eligible(hdd_adapter_t *adapter, struct sk_buff *skb,
 	if (!hw_lro_eligible)
 		return false;
 
-	if (0 != hdd_lro_desc_find(adapter, skb, iph, tcph, desc)) {
+	if (0 != hdd_lro_desc_find(lro_info, skb, iph, tcph, desc)) {
 		hdd_err("finding the LRO desc failed");
 		return false;
 	}
@@ -384,11 +332,12 @@ static bool hdd_lro_eligible(hdd_adapter_t *adapter, struct sk_buff *skb,
  * Return: none
  */
 static void hdd_lro_desc_free(struct net_lro_desc *desc,
-	 hdd_adapter_t *adapter)
+	struct hdd_lro_s *lro_info)
 {
 	struct hdd_lro_desc_entry *entry;
-	struct net_lro_desc *arr_base = adapter->lro_info.lro_mgr->lro_arr;
-	struct hdd_lro_desc_info *desc_info = &adapter->lro_info.lro_desc_info;
+	struct net_lro_mgr *lro_mgr = lro_info->lro_mgr;
+	struct net_lro_desc *arr_base = lro_mgr->lro_arr;
+	struct hdd_lro_desc_info *desc_info = &lro_info->lro_desc_info;
 	int i = desc - arr_base;
 
 	if (i >= LRO_DESC_POOL_SZ) {
@@ -398,14 +347,10 @@ static void hdd_lro_desc_free(struct net_lro_desc *desc,
 
 	entry = &desc_info->lro_desc_pool.lro_desc_array[i];
 
-	qdf_spin_lock_bh(&desc_info->lro_hash_lock);
 	list_del_init(&entry->lro_node);
-	qdf_spin_unlock_bh(&desc_info->lro_hash_lock);
 
-	qdf_spin_lock_bh(&desc_info->lro_desc_pool.lro_pool_lock);
 	list_add_tail(&entry->lro_node, &desc_info->
 		 lro_desc_pool.lro_free_list_head);
-	qdf_spin_unlock_bh(&desc_info->lro_desc_pool.lro_pool_lock);
 }
 
 /**
@@ -422,7 +367,7 @@ static void hdd_lro_desc_free(struct net_lro_desc *desc,
  */
 static void hdd_lro_flush_pkt(struct net_lro_mgr *lro_mgr,
 			      struct iphdr *iph, struct tcphdr *tcph,
-			      hdd_adapter_t *adapter)
+			      struct hdd_lro_s *lro_info)
 {
 	struct net_lro_desc *lro_desc;
 
@@ -430,9 +375,7 @@ static void hdd_lro_flush_pkt(struct net_lro_mgr *lro_mgr,
 
 	if (lro_desc) {
 		/* statistics */
-		LRO_HIST_UPDATE(lro_desc, adapter);
-
-		hdd_lro_desc_free(lro_desc, adapter);
+		hdd_lro_desc_free(lro_desc, lro_info);
 		lro_flush_desc(lro_mgr, lro_desc);
 	}
 }
@@ -448,51 +391,15 @@ static void hdd_lro_flush_pkt(struct net_lro_mgr *lro_mgr,
  */
 static void hdd_lro_flush(void *data)
 {
-	hdd_adapter_t *adapter = (hdd_adapter_t *)data;
-	struct hdd_lro_s *hdd_lro;
-	struct hdd_context_s *ctx;
-	QDF_STATUS status;
-	hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
+	struct hdd_lro_s *hdd_lro = data;
+	struct net_lro_mgr *lro_mgr = hdd_lro->lro_mgr;
 	int i;
 
-	/*
-	 * There is a more comprehensive solution that refactors
-	 * lro_mgr in the adapter into multiple instances, that
-	 * will replace this solution. The following is an interim
-	 * fix.
-	 */
-
-	/* Loop over all adapters and flush them all */
-	ctx = (struct hdd_context_s *)cds_get_context(QDF_MODULE_ID_HDD);
-	if (unlikely(ctx == NULL)) {
-		hdd_err("%s: cannot get hdd_ctx. Flushing failed", __func__);
-		return;
-	}
-
-	status = hdd_get_front_adapter(ctx, &adapter_node);
-	while (NULL != adapter_node && QDF_STATUS_SUCCESS == status) {
-		adapter = adapter_node->pAdapter;
-		hdd_lro = &adapter->lro_info;
-		if (adapter->dev == NULL) {
-			hdd_err("vdev interface going down");
-		} else if (adapter->dev->features & NETIF_F_LRO) {
-			qdf_spin_lock_bh(&hdd_lro->lro_mgr_arr_access_lock);
-			for (i = 0; i < hdd_lro->lro_mgr->max_desc; i++) {
-				if (hdd_lro->lro_mgr->lro_arr[i].active) {
-					hdd_lro_desc_free(
-						&hdd_lro->lro_mgr->lro_arr[i],
-						(void *)adapter);
-					lro_flush_desc(
-						hdd_lro->lro_mgr,
-						&hdd_lro->lro_mgr->lro_arr[i]);
-					LRO_HIST_UPDATE((&hdd_lro->lro_mgr->lro_arr[i]),
-						 adapter);
-				}
-			}
-			qdf_spin_unlock_bh(&hdd_lro->lro_mgr_arr_access_lock);
+	for (i = 0; i < lro_mgr->max_desc; i++) {
+		if (lro_mgr->lro_arr[i].active) {
+			hdd_lro_desc_free(&lro_mgr->lro_arr[i], hdd_lro);
+			lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]);
 		}
-		status = hdd_get_next_adapter(ctx, adapter_node, &next);
-		adapter_node = next;
 	}
 }
 
@@ -541,53 +448,39 @@ int hdd_lro_init(hdd_context_t *hdd_ctx)
 	return 0;
 }
 
-/**
- * hdd_lro_enable() - enable LRO
- * @hdd_ctx: HDD context
- * @adapter: HDD adapter
- *
- * This function enables LRO in the network device attached to
- * the HDD adapter. It also allocates the HDD LRO instance for
- * that network device
- *
- * Return: 0 - success, < 0 - failure
- */
-int hdd_lro_enable(hdd_context_t *hdd_ctx,
-	 hdd_adapter_t *adapter)
+static void *hdd_init_lro_mgr(void)
 {
 	struct hdd_lro_s *hdd_lro;
-	size_t lro_mgr_sz, desc_arr_sz, desc_pool_sz, hash_table_sz;
+	hdd_context_t *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
+	size_t lro_info_sz, lro_mgr_sz, desc_arr_sz, desc_pool_sz;
+	size_t hash_table_sz;
 	uint8_t *lro_mem_ptr;
 
-	if (!hdd_ctx->config->lro_enable ||
-		 QDF_STA_MODE != adapter->device_mode) {
-		hdd_info("LRO Disabled");
-		return 0;
+	if (NULL == hdd_ctx) {
+		hdd_err("hdd_ctx is NULL");
+		return NULL;
 	}
-
-	hdd_info("LRO Enabled");
-
-	hdd_lro = &adapter->lro_info;
-	qdf_mem_zero((void *)hdd_lro, sizeof(struct hdd_lro_s));
-
 	/*
 	* Allocate all the LRO data structures at once and then carve
 	* them up as needed
 	*/
+	lro_info_sz = sizeof(struct hdd_lro_s);
 	lro_mgr_sz = sizeof(struct net_lro_mgr);
 	desc_arr_sz = (LRO_DESC_POOL_SZ * sizeof(struct net_lro_desc));
 	desc_pool_sz = (LRO_DESC_POOL_SZ * sizeof(struct hdd_lro_desc_entry));
 	hash_table_sz = (sizeof(struct hdd_lro_desc_table) * LRO_DESC_TABLE_SZ);
 
-	lro_mem_ptr = qdf_mem_malloc(lro_mgr_sz + desc_arr_sz + desc_pool_sz +
-		 hash_table_sz);
+	lro_mem_ptr = qdf_mem_malloc(lro_info_sz + lro_mgr_sz + desc_arr_sz +
+					desc_pool_sz + hash_table_sz);
 
 	if (NULL == lro_mem_ptr) {
 		hdd_err("Unable to allocate memory for LRO");
 		hdd_ctx->config->lro_enable = 0;
-		return -ENOMEM;
+		return NULL;
 	}
 
+	hdd_lro = (struct hdd_lro_s *)lro_mem_ptr;
+	lro_mem_ptr += lro_info_sz;
 	/* LRO manager */
 	hdd_lro->lro_mgr = (struct net_lro_mgr *)lro_mem_ptr;
 	lro_mem_ptr += lro_mgr_sz;
@@ -606,9 +499,8 @@ int hdd_lro_enable(hdd_context_t *hdd_ctx,
 		 (struct hdd_lro_desc_table *)lro_mem_ptr;
 
 	/* Initialize the LRO descriptors */
-	 hdd_lro_desc_info_init(hdd_lro);
+	hdd_lro_desc_info_init(hdd_lro);
 
-	hdd_lro->lro_mgr->dev = adapter->dev;
 	if (hdd_ctx->enableRxThread)
 		hdd_lro->lro_mgr->features = LRO_F_NI;
 
@@ -621,14 +513,46 @@ int hdd_lro_enable(hdd_context_t *hdd_ctx,
 	hdd_lro->lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
 	hdd_lro->lro_mgr->max_desc = LRO_DESC_POOL_SZ;
 
-	adapter->dev->features |= NETIF_F_LRO;
+	return hdd_lro;
+}
+
+/**
+ * hdd_lro_enable() - enable LRO
+ * @hdd_ctx: HDD context
+ * @adapter: HDD adapter
+ *
+ * This function enables LRO in the network device attached to
+ * the HDD adapter. It also allocates the HDD LRO instance for
+ * that network device
+ *
+ * Return: 0 - success, < 0 - failure
+ */
+int hdd_lro_enable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
+{
+
+	if (!hdd_ctx->config->lro_enable ||
+		 QDF_STA_MODE != adapter->device_mode) {
+		hdd_info("LRO Disabled");
+		return 0;
+	}
 
 	/* Register the flush callback */
-	ol_register_lro_flush_cb(hdd_lro_flush, adapter);
+	ol_register_lro_flush_cb(hdd_lro_flush, hdd_init_lro_mgr);
+	adapter->dev->features |= NETIF_F_LRO;
+
+	hdd_info("LRO Enabled");
 
 	return 0;
 }
 
+void hdd_deinit_lro_mgr(void *lro_info)
+{
+	if (lro_info) {
+		hdd_err("LRO instance %p is being freed", lro_info);
+		qdf_mem_free(lro_info);
+	}
+}
+
 /**
  * hdd_lro_disable() - disable LRO
  * @hdd_ctx: HDD context
@@ -646,17 +570,8 @@ void hdd_lro_disable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
 		return;
 
 	/* Deregister the flush callback */
-	ol_deregister_lro_flush_cb();
-
-	if (adapter->lro_info.lro_mgr) {
-		hdd_lro_desc_info_deinit(&adapter->lro_info);
-		qdf_mem_free(adapter->lro_info.lro_mgr);
-		adapter->lro_info.lro_mgr = NULL;
-		adapter->lro_info.lro_desc_info.
-			lro_desc_pool.lro_desc_array = NULL;
-		adapter->lro_info.lro_desc_info.
-			lro_hash_table = NULL;
-	}
+	ol_deregister_lro_flush_cb(hdd_deinit_lro_mgr);
+
 	return;
 }
 
@@ -681,12 +596,26 @@ enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 		struct iphdr *iph;
 		struct tcphdr *tcph;
 		struct net_lro_desc *lro_desc = NULL;
-		struct hdd_lro_s *hdd_lro = &adapter->lro_info;
+		struct hdd_lro_s *lro_info;
+		struct hif_opaque_softc *hif_hdl =
+			(struct hif_opaque_softc *)cds_get_context(
+							QDF_MODULE_ID_HIF);
+		if (hif_hdl == NULL) {
+			hdd_err("hif_hdl is NULL");
+			return status;
+		}
+
+		lro_info = hif_get_lro_info(QDF_NBUF_CB_RX_CTX_ID(skb),
+					hif_hdl);
+		if (lro_info == NULL) {
+			hdd_err("LRO mgr is NULL, vdev could be going down");
+			return status;
+		}
+
 		iph = (struct iphdr *)skb->data;
 		tcph = (struct tcphdr *)(skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb));
-		qdf_spin_lock_bh(
-			&hdd_lro->lro_mgr_arr_access_lock);
-		if (hdd_lro_eligible(adapter, skb, iph, tcph, &lro_desc)) {
+		lro_info->lro_mgr->dev = adapter->dev;
+		if (hdd_lro_eligible(lro_info, skb, iph, tcph, &lro_desc)) {
 			struct net_lro_info hdd_lro_info;
 
 			hdd_lro_info.valid_fields = LRO_VALID_FIELDS;
@@ -699,52 +628,22 @@ enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 			hdd_lro_info.tcp_seq_num = QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb);
 			hdd_lro_info.tcp_win = QDF_NBUF_CB_RX_TCP_WIN(skb);
 
-			lro_receive_skb_ext(adapter->lro_info.lro_mgr, skb,
+			lro_receive_skb_ext(lro_info->lro_mgr, skb,
 				 (void *)adapter, &hdd_lro_info);
 
 			if (!hdd_lro_info.lro_desc->active) {
-				hdd_lro_desc_free(lro_desc, adapter);
+				hdd_lro_desc_free(lro_desc, lro_info);
 			}
 
 			status = HDD_LRO_RX;
-			adapter->lro_info.lro_stats.lro_eligible_tcp++;
 		} else {
-			hdd_lro_flush_pkt(adapter->lro_info.lro_mgr,
-				 iph, tcph, adapter);
-			adapter->lro_info.lro_stats.lro_ineligible_tcp++;
+			hdd_lro_flush_pkt(lro_info->lro_mgr,
+				 iph, tcph, lro_info);
 		}
-		qdf_spin_unlock_bh(
-			&hdd_lro->lro_mgr_arr_access_lock);
 	}
 	return status;
 }
 
-/**
- * hdd_lro_bucket_to_string() - return string conversion of
- * bucket
- * @bucket: bucket
- *
- * This utility function helps log string conversion of bucket
- * enum
- *
- * Return: string conversion of the LRO bucket, if match found;
- *        "Invalid" otherwise.
- */
-static const char *hdd_lro_bucket_to_string(enum hdd_lro_pkt_aggr_bucket bucket)
-{
-	switch (bucket) {
-	CASE_RETURN_STRING(HDD_LRO_BUCKET_0_7);
-	CASE_RETURN_STRING(HDD_LRO_BUCKET_8_15);
-	CASE_RETURN_STRING(HDD_LRO_BUCKET_16_23);
-	CASE_RETURN_STRING(HDD_LRO_BUCKET_24_31);
-	CASE_RETURN_STRING(HDD_LRO_BUCKET_32_39);
-	CASE_RETURN_STRING(HDD_LRO_BUCKET_40_47);
-	CASE_RETURN_STRING(HDD_LRO_BUCKET_48_OR_MORE);
-	default:
-		return "Invalid";
-	}
-}
-
 /**
  * wlan_hdd_display_lro_stats() - display LRO statistics
  * @hdd_ctx: hdd context
@@ -753,61 +652,5 @@ static const char *hdd_lro_bucket_to_string(enum hdd_lro_pkt_aggr_bucket bucket)
  */
 void hdd_lro_display_stats(hdd_context_t *hdd_ctx)
 {
-
-	hdd_adapter_t *adapter = NULL;
-	hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
-	QDF_STATUS status;
-	int i;
-
-	if (!hdd_ctx->config->lro_enable) {
-		hdd_err("LRO Disabled");
-		return;
-	}
-
-	status = hdd_get_front_adapter(hdd_ctx, &adapter_node);
-	while (NULL != adapter_node && QDF_STATUS_SUCCESS == status) {
-		struct hdd_lro_stats *stats;
-		hdd_err("\nLRO statistics:");
-
-		adapter = adapter_node->pAdapter;
-		if (!adapter) {
-			status = hdd_get_next_adapter(hdd_ctx,
-				 adapter_node, &next);
-			adapter_node = next;
-			continue;
-		}
-
-		stats = &adapter->lro_info.lro_stats;
-		hdd_err("Session_id %d device mode %d",
-			adapter->sessionId, adapter->device_mode);
-
-		if (NL80211_IFTYPE_STATION != adapter->wdev.iftype) {
-			hdd_err("No LRO on interface type %d",
-				 adapter->wdev.iftype);
-			status = hdd_get_next_adapter(hdd_ctx,
-				 adapter_node, &next);
-			adapter_node = next;
-			continue;
-		}
-
-		for (i = 0; i <= HDD_LRO_BUCKET_MAX; i++) {
-			if (stats && stats->pkt_aggr_hist)
-				hdd_err("bucket %s: %d packets",
-					 hdd_lro_bucket_to_string(i),
-					 stats->pkt_aggr_hist[i]);
-		}
-
-		hdd_err("LRO eligible TCP packets %d\n"
-			 "LRO ineligible TCP packets %d",
-			 stats->lro_eligible_tcp, stats->lro_ineligible_tcp);
-
-		if (adapter->lro_info.lro_mgr)
-			hdd_err("LRO manager aggr %lu flushed %lu no desc %lu",
-				 adapter->lro_info.lro_mgr->stats.aggregated,
-				 adapter->lro_info.lro_mgr->stats.flushed,
-				 adapter->lro_info.lro_mgr->stats.no_desc);
-
-		status = hdd_get_next_adapter(hdd_ctx, adapter_node, &next);
-		adapter_node = next;
-	}
+	hdd_err("LRO stats is broken, will fix it");
 }