Prechádzať zdrojové kódy

qcacld-3.0: Modify legacy LRO implementation to use QDF

Modify the legacy LRO implementation which is in HDD to
use the implementation in QDF instead. This is to avoid
code duplication and to unify the LRO implementations for
Napier and Helium.

Change-Id: Ie9d3bb6d3dcda5a7d1d5fda6e7cbdef92be30461
CRs-Fixed: 2042812
Dhanashri Atre 8 rokov pred
rodič
commit
1a6a4ce139

+ 1 - 2
core/dp/htt/htt_internal.h

@@ -181,7 +181,7 @@ static inline struct htt_host_rx_desc_base *htt_rx_desc(qdf_nbuf_t msdu)
 		~HTT_RX_DESC_ALIGN_MASK);
 }
 
-#if defined(FEATURE_LRO) && defined(HELIUMPLUS_PADDR64)
+#if defined(FEATURE_LRO) && defined(HELIUMPLUS)
 /**
  * htt_print_rx_desc_lro() - print LRO information in the rx
  * descriptor
@@ -244,7 +244,6 @@ static inline void htt_rx_extract_lro_info(qdf_nbuf_t msdu,
 		QDF_NBUF_CB_RX_TCP_WIN(msdu) = rx_desc->msdu_end.window_size;
 		QDF_NBUF_CB_RX_TCP_PROTO(msdu) = rx_desc->msdu_start.tcp_proto;
 		QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = rx_desc->msdu_start.ipv6_proto;
-		QDF_NBUF_CB_RX_IP_OFFSET(msdu) = rx_desc->msdu_start.l3_offset;
 		QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = rx_desc->msdu_start.l4_offset;
 		QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(msdu) =
 			 rx_desc->msdu_start.flow_id_toeplitz;

+ 0 - 169
core/dp/txrx/ol_txrx.c

@@ -74,7 +74,6 @@
 #include <cdp_txrx_flow_ctrl_legacy.h>
 #include <cdp_txrx_bus.h>
 #include <cdp_txrx_ipa.h>
-#include <cdp_txrx_lro.h>
 #include <cdp_txrx_pmf.h>
 #include "wma.h"
 #include "hif.h"
@@ -4869,166 +4868,6 @@ static QDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
 }
 #endif
 
-#if defined(FEATURE_LRO)
-/**
- * ol_txrx_lro_flush_handler() - LRO flush handler
- * @context: dev handle
- * @rxpkt: rx data
- * @staid: station id
- *
- * This function handles an LRO flush indication.
- * If the rx thread is enabled, it will be invoked by the rx
- * thread else it will be called in the tasklet context
- *
- * Return: none
- */
-static void ol_txrx_lro_flush_handler(void *context,
-				      void *rxpkt,
-				      uint16_t staid)
-{
-	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-
-	if (qdf_unlikely(!pdev)) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: Invalid context", __func__);
-		qdf_assert(0);
-		return;
-	}
-
-	if (pdev->lro_info.lro_flush_cb)
-		pdev->lro_info.lro_flush_cb(context);
-	else
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: lro_flush_cb NULL", __func__);
-}
-
-/**
- * ol_txrx_lro_flush() - LRO flush callback
- * @data: opaque data pointer
- *
- * This is the callback registered with CE to trigger
- * an LRO flush
- *
- * Return: none
- */
-static void ol_txrx_lro_flush(void *data)
-{
-	p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
-	struct cds_ol_rx_pkt *pkt;
-	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-
-	if (qdf_unlikely(!sched_ctx))
-		return;
-
-	if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
-		ol_txrx_lro_flush_handler(data, NULL, 0);
-	} else {
-		pkt = cds_alloc_ol_rx_pkt(sched_ctx);
-		if (qdf_unlikely(!pkt)) {
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-				  "%s: Not able to allocate context", __func__);
-			return;
-		}
-
-		pkt->callback =
-			 (cds_ol_rx_thread_cb) ol_txrx_lro_flush_handler;
-		pkt->context = data;
-		pkt->Rxpkt = NULL;
-		pkt->staId = 0;
-		cds_indicate_rxpkt(sched_ctx, pkt);
-	}
-}
-
-/**
- * ol_register_lro_flush_cb() - register the LRO flush callback
- * @lro_flush_cb: flush callback function
- * @lro_init_cb: Allocate and initialize LRO data structure.
- *
- * Store the LRO flush callback provided and in turn
- * register OL's LRO flush handler with CE
- *
- * Return: none
- */
-static void ol_register_lro_flush_cb(void (lro_flush_cb)(void *),
-			      void *(lro_init_cb)(void))
-{
-	struct hif_opaque_softc *hif_device;
-	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-
-	if (pdev == NULL) {
-		ol_txrx_err("%s: pdev NULL!", __func__);
-		TXRX_ASSERT2(0);
-		goto out;
-	}
-	if (pdev->lro_info.lro_flush_cb != NULL) {
-		ol_txrx_info(
-			   "%s: LRO already initialised\n", __func__);
-		if (pdev->lro_info.lro_flush_cb != lro_flush_cb) {
-			ol_txrx_err(
-				   "lro_flush_cb is differ to previously registered callback\n")
-			TXRX_ASSERT2(0);
-			goto out;
-		}
-		qdf_atomic_inc(&pdev->lro_info.lro_dev_cnt);
-		goto out;
-	}
-	pdev->lro_info.lro_flush_cb = lro_flush_cb;
-	hif_device = (struct hif_opaque_softc *)
-				cds_get_context(QDF_MODULE_ID_HIF);
-
-	if (qdf_unlikely(hif_device == NULL)) {
-		ol_txrx_err(
-			"%s: hif_device NULL!", __func__);
-		qdf_assert(0);
-		goto out;
-	}
-
-	hif_lro_flush_cb_register(hif_device, ol_txrx_lro_flush, lro_init_cb);
-	qdf_atomic_inc(&pdev->lro_info.lro_dev_cnt);
-
-out:
-	return;
-}
-
-/**
- * ol_deregister_lro_flush_cb() - deregister the LRO flush callback
- * @lro_deinit_cb: callback function for deregistration.
- *
- * Remove the LRO flush callback provided and in turn
- * deregister OL's LRO flush handler with CE
- *
- * Return: none
- */
-static void ol_deregister_lro_flush_cb(void (lro_deinit_cb)(void *))
-{
-	struct hif_opaque_softc *hif_device;
-	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-
-	if (pdev == NULL) {
-		ol_txrx_err("%s: pdev NULL!", __func__);
-		return;
-	}
-	if (qdf_atomic_dec_and_test(&pdev->lro_info.lro_dev_cnt) == 0) {
-		ol_txrx_info(
-			   "%s: Other LRO enabled modules still exist, do not unregister the lro_flush_cb\n", __func__);
-		return;
-	}
-	hif_device =
-		(struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
-
-	if (qdf_unlikely(hif_device == NULL)) {
-		ol_txrx_err(
-			"%s: hif_device NULL!", __func__);
-		qdf_assert(0);
-		return;
-	}
-
-	hif_lro_flush_cb_deregister(hif_device, lro_deinit_cb);
-
-	pdev->lro_info.lro_flush_cb = NULL;
-}
-#endif /* FEATURE_LRO */
-
 void
 ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
 {
@@ -5418,13 +5257,6 @@ static struct cdp_ipa_ops ol_ops_ipa = {
 #endif /* IPA_OFFLOAD */
 };
 
-static struct cdp_lro_ops ol_ops_lro = {
-#ifdef FEATURE_LRO
-	.register_lro_flush_cb = ol_register_lro_flush_cb,
-	.deregister_lro_flush_cb = ol_deregister_lro_flush_cb
-#endif /* FEATURE_LRO */
-};
-
 static struct cdp_bus_ops ol_ops_bus = {
 	.bus_suspend = ol_txrx_bus_suspend,
 	.bus_resume = ol_txrx_bus_resume
@@ -5541,7 +5373,6 @@ static struct cdp_ops ol_txrx_ops = {
 	.flowctl_ops = &ol_ops_flowctl,
 	.l_flowctl_ops = &ol_ops_l_flowctl,
 	.ipa_ops = &ol_ops_ipa,
-	.lro_ops = &ol_ops_lro,
 	.bus_ops = &ol_ops_bus,
 	.ocb_ops = &ol_ops_ocb,
 	.peer_ops = &ol_ops_peer,

+ 1 - 131
core/hdd/inc/wlan_hdd_lro.h

@@ -43,133 +43,13 @@ enum hdd_lro_rx_status {
 };
 
 #if defined(FEATURE_LRO)
-
-#include <linux/inet_lro.h>
-#include <linux/list.h>
-
-/* LRO_DESC_TABLE_SZ must be a power of 2 */
-#define LRO_DESC_TABLE_SZ 16
-#define LRO_DESC_TABLE_SZ_MASK (LRO_DESC_TABLE_SZ - 1)
-#define LRO_DESC_POOL_SZ 10
-
-/**
- * hdd_lro_desc_entry - defines the LRO descriptor
- * element stored in the list
- * @lro_node: node of the list
- * @lro_desc: the LRO descriptor contained in this list entry
- */
-struct hdd_lro_desc_entry {
-	struct list_head lro_node;
-	struct net_lro_desc *lro_desc;
-};
-
-/**
- * hdd_lro_desc_pool - pool of free LRO descriptors
- * @lro_desc_array: array of LRO descriptors allocated
- * @lro_free_list_head: head of the list
- * @lro_pool_lock: lock to protect access to the list
- */
-struct hdd_lro_desc_pool {
-	struct hdd_lro_desc_entry *lro_desc_array;
-	struct list_head lro_free_list_head;
-};
-
-/**
- * hdd_lro_desc_table - defines each entry of the LRO
- * hash table
- * @lro_desc_list: list of LRO descriptors
- */
-struct hdd_lro_desc_table {
-	struct list_head lro_desc_list;
-};
-
-/**
- * hdd_lro_desc_info - structure containing the LRO
- * descriptor information
- * @lro_hash_table: hash table used for a quick desc. look-up
- * @lro_hash_lock: lock to protect access to the hash table
- * @lro_desc_pool: Free pool of LRO descriptors
- */
-struct hdd_lro_desc_info {
-	struct hdd_lro_desc_table *lro_hash_table;
-	struct hdd_lro_desc_pool lro_desc_pool;
-};
-
-/**
- * enum hdd_lro_pkt_aggr_bucket - idenitifies the bucket holding
- * the count of the aggregated packets
- * @HDD_LRO_BUCKET_0_7: identifies the packet count when the
- * aggregate size is between 0 to 7 packets
- * @HDD_LRO_BUCKET_8_15: identifies the packet count when the
- * aggregate size is between 8 to 15 packets
- * @HDD_LRO_BUCKET_16_23: identifies the packet count when the
- * aggregate size is between 16 to 23 packets
- * @HDD_LRO_BUCKET_24_31: identifies the packet count when the
- * aggregate size is between 24 to 31 packets
- * @HDD_LRO_BUCKET_32_39: identifies the packet count when the
- * aggregate size is between 32 to 39 packets
- * @HDD_LRO_BUCKET_40_47: identifies the packet count when the
- * aggregate size is between 40 to 47 packets
- * @HDD_LRO_BUCKET_48_OR_MORE: identifies the packet count when
- * the aggregate size is 48 or more packets
- * @HDD_LRO_BUCKET_MAX: identifies the packet count when the
- * aggregate size is 48 or more packets
- */
-enum hdd_lro_pkt_aggr_bucket {
-	HDD_LRO_BUCKET_0_7 = 0,
-	HDD_LRO_BUCKET_8_15 = 1,
-	HDD_LRO_BUCKET_16_23 = 2,
-	HDD_LRO_BUCKET_24_31 = 3,
-	HDD_LRO_BUCKET_32_39 = 4,
-	HDD_LRO_BUCKET_40_47 = 5,
-	HDD_LRO_BUCKET_48_OR_MORE = 6,
-	HDD_LRO_BUCKET_MAX = HDD_LRO_BUCKET_48_OR_MORE,
-};
-
-/**
- * hdd_lro_stats - structure containing the LRO statistics
- * information
- * @pkt_aggr_hist: histogram of the number of aggregated packets
- * @lro_eligible_tcp: number of LRO elgible TCP packets
- * @lro_ineligible_tcp: number of LRO inelgible TCP packets
- */
-struct hdd_lro_stats {
-	uint16_t pkt_aggr_hist[HDD_LRO_BUCKET_MAX + 1];
-	uint32_t lro_eligible_tcp;
-	uint32_t lro_ineligible_tcp;
-};
-
-/**
- * hdd_lro_s - LRO information per HDD adapter
- * @lro_mgr: LRO manager
- * @lro_desc_info: LRO descriptor information
- * @lro_mgr_arr_access_lock: Lock to access LRO manager array.
- * @lro_stats: LRO statistics
- */
-struct hdd_lro_s {
-	struct net_lro_mgr *lro_mgr;
-	struct hdd_lro_desc_info lro_desc_info;
-};
-
 int hdd_lro_init(hdd_context_t *hdd_ctx);
 
-int hdd_lro_enable(hdd_context_t *hdd_ctx,
-	 hdd_adapter_t *adapter);
-
-void hdd_lro_disable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter);
-
 enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 	 hdd_adapter_t *adapter, struct sk_buff *skb);
-
-void hdd_lro_flush_all(hdd_context_t *hdd_ctx,
-	 hdd_adapter_t *adapter);
-
 void hdd_lro_display_stats(hdd_context_t *hdd_ctx);
 #else
-struct hdd_lro_s {};
-
-static inline int hdd_lro_enable(hdd_context_t *hdd_ctx,
-	 hdd_adapter_t *adapter)
+static inline int hdd_lro_init(hdd_context_t *hdd_ctx)
 {
 	return 0;
 }
@@ -180,16 +60,6 @@ static inline enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 	return HDD_LRO_NO_RX;
 }
 
-static inline int hdd_lro_init(hdd_context_t *hdd_ctx)
-{
-	return 0;
-}
-
-static inline void hdd_lro_disable(hdd_context_t *hdd_ctx,
-	 hdd_adapter_t *adapter)
-{
-}
-
 static inline void hdd_lro_display_stats(hdd_context_t *hdd_ctx)
 {
 }

+ 0 - 1
core/hdd/inc/wlan_hdd_main.h

@@ -1124,7 +1124,6 @@ struct hdd_adapter_s {
 	struct hdd_netif_queue_history
 		 queue_oper_history[WLAN_HDD_MAX_HISTORY_ENTRY];
 	struct hdd_netif_queue_stats queue_oper_stats[WLAN_REASON_TYPE_MAX];
-	struct hdd_lro_s lro_info;
 	ol_txrx_tx_fp tx_fn;
 	/* debugfs entry */
 	struct dentry *debugfs_phy;

+ 25 - 571
core/hdd/src/wlan_hdd_lro.c

@@ -36,7 +36,6 @@
 #include <wlan_hdd_lro.h>
 #include <wlan_hdd_napi.h>
 #include <wma_api.h>
-#include <cdp_txrx_lro.h>
 
 #include <linux/inet_lro.h>
 #include <linux/list.h>
@@ -64,439 +63,11 @@ int hdd_lro_init(hdd_context_t *hdd_ctx)
 	return 0;
 }
 
-/**
- * hdd_lro_enable() - enable LRO
- * @hdd_ctx: HDD context
- * @adapter: HDD adapter
- *
- * This function enables LRO in the network device attached to
- * the HDD adapter. It also allocates the HDD LRO instance for
- * that network device
- *
- * Return: 0 - success, < 0 - failure
- */
-int hdd_lro_enable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
-{
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * hdd_lro_rx() - LRO receive function
- * @hdd_ctx: HDD context
- * @adapter: HDD adapter
- * @skb: network buffer
- *
- * Delivers LRO eligible frames to the LRO manager
- *
- * Return: HDD_LRO_RX - frame delivered to LRO manager
- * HDD_LRO_NO_RX - frame not delivered
- */
-enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
-	 hdd_adapter_t *adapter, struct sk_buff *skb)
-{
-	struct net_lro_mgr *lro_mgr;
-	qdf_lro_ctx_t ctx = (qdf_lro_ctx_t)QDF_NBUF_CB_RX_LRO_CTX(skb);
-	/* LRO is not supported or non-TCP packet */
-	if (!ctx)
-		return HDD_LRO_NO_RX;
-
-	lro_mgr = ctx->lro_mgr;
-
-	if (QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb)) {
-		struct net_lro_info hdd_lro_info;
-
-		hdd_lro_info.valid_fields = LRO_VALID_FIELDS;
-
-		hdd_lro_info.lro_desc = QDF_NBUF_CB_RX_LRO_DESC(skb);
-		hdd_lro_info.lro_eligible = 1;
-		hdd_lro_info.tcp_ack_num = QDF_NBUF_CB_RX_TCP_ACK_NUM(skb);
-		hdd_lro_info.tcp_data_csum =
-			 csum_unfold(QDF_NBUF_CB_RX_TCP_CHKSUM(skb));
-		hdd_lro_info.tcp_seq_num = QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb);
-		hdd_lro_info.tcp_win = QDF_NBUF_CB_RX_TCP_WIN(skb);
-
-		lro_receive_skb_ext(lro_mgr, skb,
-			 (void *)adapter, &hdd_lro_info);
-
-		if (!hdd_lro_info.lro_desc->active)
-			qdf_lro_flow_free(skb);
-
-		return HDD_LRO_RX;
-	} else {
-		lro_flush_desc(lro_mgr, QDF_NBUF_CB_RX_LRO_DESC(skb));
-		return HDD_LRO_NO_RX;
-	}
-}
-/**
- * hdd_lro_disable() - disable LRO
- * @hdd_ctx: HDD context
- * @adapter: HDD adapter
- *
- * This function frees the HDD LRO instance for the network
- * device attached to the HDD adapter
- *
- * Return: none
- */
-void hdd_lro_disable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
+static qdf_lro_ctx_t wlan_hdd_get_lro_ctx(struct sk_buff *skb)
 {
-	return;
+	return (qdf_lro_ctx_t)QDF_NBUF_CB_RX_LRO_CTX(skb);
 }
-
 #else
-
-#define LRO_MAX_AGGR_SIZE 100
-
-/**
- * hdd_lro_get_skb_header() - LRO callback function
- * @skb: network buffer
- * @ip_hdr: contains a pointer to the IP header
- * @tcpudp_hdr: contains a pointer to the TCP header
- * @hdr_flags: indicates if this is a TCP, IPV4 frame
- * @priv: private driver specific opaque pointer
- *
- * Get the IP and TCP headers from the skb
- *
- * Return: 0 - success, < 0 - failure
- */
-static int hdd_lro_get_skb_header(struct sk_buff *skb, void **ip_hdr,
-	void **tcpudp_hdr, u64 *hdr_flags, void *priv)
-{
-	if (QDF_NBUF_CB_RX_IPV6_PROTO(skb)) {
-		hdr_flags = 0;
-		return -EINVAL;
-	}
-
-	*hdr_flags |= (LRO_IPV4 | LRO_TCP);
-	(*ip_hdr) = skb->data;
-	(*tcpudp_hdr) = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
-	return 0;
-}
-
-/**
- * hdd_lro_desc_pool_init() - Initialize the free pool of LRO
- * descriptors
- * @lro_desc_pool: free pool of the LRO descriptors
- * @lro_mgr: LRO manager
- *
- * Initialize a list that holds the free LRO descriptors
- *
- * Return: none
- */
-static void hdd_lro_desc_pool_init(struct hdd_lro_desc_pool *lro_desc_pool,
-	 struct net_lro_mgr *lro_mgr)
-{
-	int i;
-
-	INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head);
-
-	for (i = 0; i < LRO_DESC_POOL_SZ; i++) {
-		lro_desc_pool->lro_desc_array[i].lro_desc =
-			 &lro_mgr->lro_arr[i];
-		list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node,
-			 &lro_desc_pool->lro_free_list_head);
-	}
-}
-
-/**
- * hdd_lro_desc_info_init() - Initialize the LRO descriptors
- * @hdd_info: HDD LRO data structure
- *
- * Initialize the free pool of LRO descriptors and the entries
- * of the hash table
- *
- * Return: none
- */
-static void hdd_lro_desc_info_init(struct hdd_lro_s *hdd_info)
-{
-	int i;
-
-	/* Initialize pool of free LRO desc.*/
-	hdd_lro_desc_pool_init(&hdd_info->lro_desc_info.lro_desc_pool,
-		 hdd_info->lro_mgr);
-
-	/* Initialize the hash table of LRO desc.*/
-	for (i = 0; i < LRO_DESC_TABLE_SZ; i++) {
-		/* initialize the flows in the hash table */
-		INIT_LIST_HEAD(&hdd_info->lro_desc_info.
-			 lro_hash_table[i].lro_desc_list);
-	}
-
-}
-
-/**
- * hdd_lro_tcp_flow_match() - function to check for a flow match
- * @iph: IP header
- * @tcph: TCP header
- * @lro_desc: LRO decriptor
- *
- * Checks if the descriptor belongs to the same flow as the one
- * indicated by the TCP and IP header.
- *
- * Return: true - flow match, false - flow does not match
- */
-static inline bool hdd_lro_tcp_flow_match(struct net_lro_desc *lro_desc,
-	 struct iphdr *iph,
-	 struct tcphdr *tcph)
-{
-	if ((lro_desc->tcph->source != tcph->source) ||
-		 (lro_desc->tcph->dest != tcph->dest) ||
-		 (lro_desc->iph->saddr != iph->saddr) ||
-		 (lro_desc->iph->daddr != iph->daddr))
-		return false;
-
-	return true;
-
-}
-
-/**
- * hdd_lro_desc_find() - LRO descriptor look-up function
- *
- * @adapter: HDD adaptor
- * @skb: network buffer
- * @iph: IP header
- * @tcph: TCP header
- * @lro_desc: contains a pointer to the LRO decriptor
- *
- * Look-up the LRO descriptor in the hash table based on the
- * flow ID toeplitz. If the flow is not found, allocates a new
- * LRO descriptor and places it in the hash table
- *
- * Return: 0 - success, < 0 - failure
- */
-static int hdd_lro_desc_find(struct hdd_lro_s *lro_info,
-	 struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph,
-	 struct net_lro_desc **lro_desc)
-{
-	uint32_t i;
-	struct hdd_lro_desc_table *lro_hash_table;
-	struct list_head *ptr;
-	struct hdd_lro_desc_entry *entry;
-	struct hdd_lro_desc_pool *free_pool;
-	struct hdd_lro_desc_info *desc_info = &lro_info->lro_desc_info;
-
-	*lro_desc = NULL;
-	i = QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(skb) & LRO_DESC_TABLE_SZ_MASK;
-
-	lro_hash_table = &desc_info->lro_hash_table[i];
-
-	if (!lro_hash_table) {
-		hdd_err("Invalid hash entry");
-		QDF_ASSERT(0);
-		return -EINVAL;
-	}
-
-	/* Check if this flow exists in the descriptor list */
-	list_for_each(ptr, &lro_hash_table->lro_desc_list) {
-		struct net_lro_desc *tmp_lro_desc = NULL;
-
-		entry = list_entry(ptr, struct hdd_lro_desc_entry, lro_node);
-		tmp_lro_desc = entry->lro_desc;
-		if (tmp_lro_desc->active) {
-			if (hdd_lro_tcp_flow_match(tmp_lro_desc, iph, tcph)) {
-				*lro_desc = entry->lro_desc;
-				return 0;
-			}
-		}
-	}
-
-	/* no existing flow found, a new LRO desc needs to be allocated */
-	free_pool = &lro_info->lro_desc_info.lro_desc_pool;
-	entry = list_first_entry_or_null(
-		 &free_pool->lro_free_list_head,
-		 struct hdd_lro_desc_entry, lro_node);
-	if (NULL == entry) {
-		hdd_debug("Could not allocate LRO desc!");
-		return -ENOMEM;
-	}
-
-	list_del_init(&entry->lro_node);
-
-	if (NULL == entry->lro_desc) {
-		hdd_err("entry->lro_desc is NULL!");
-		return -EINVAL;
-	}
-
-	qdf_mem_zero((void *)entry->lro_desc, sizeof(struct net_lro_desc));
-
-	/*
-	 * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval
-	 * should be 0 for newly allocated lro descriptors
-	 */
-	list_add_tail(&entry->lro_node,
-		 &lro_hash_table->lro_desc_list);
-	*lro_desc = entry->lro_desc;
-
-	return 0;
-}
-
-/**
- * hdd_lro_get_desc() - LRO descriptor look-up function
- * @iph: IP header
- * @tcph: TCP header
- * @lro_arr: Array of LRO decriptors
- * @lro_mgr: LRO manager
- *
- * Looks-up the LRO descriptor for a given flow
- *
- * Return: LRO descriptor
- */
-static struct net_lro_desc *hdd_lro_get_desc(struct net_lro_mgr *lro_mgr,
-	 struct net_lro_desc *lro_arr,
-	 struct iphdr *iph,
-	 struct tcphdr *tcph)
-{
-	int i;
-
-	for (i = 0; i < lro_mgr->max_desc; i++) {
-		if (lro_arr[i].active)
-			if (hdd_lro_tcp_flow_match(&lro_arr[i], iph, tcph))
-				return &lro_arr[i];
-	}
-
-	return NULL;
-}
-
-/**
- * hdd_lro_eligible() - LRO eligibilty check
- * @iph: IP header
- * @tcph: TCP header
- * @adapter: HDD adaptor
- * @desc: LRO descriptor
- * @skb: network buffer
- *
- * Determines if the frame is LRO eligible
- *
- * Return: true - LRO eligible frame, false - frame is not LRO
- * eligible
- */
-static bool hdd_lro_eligible(struct hdd_lro_s *lro_info, struct sk_buff *skb,
-	 struct iphdr *iph, struct tcphdr *tcph, struct net_lro_desc **desc)
-{
-	struct net_lro_desc *lro_desc = NULL;
-	int hw_lro_eligible =
-		 QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) &&
-		 (!QDF_NBUF_CB_RX_TCP_PURE_ACK(skb));
-
-	if (!hw_lro_eligible)
-		return false;
-
-	if (0 != hdd_lro_desc_find(lro_info, skb, iph, tcph, desc)) {
-		hdd_debug("finding the LRO desc failed");
-		return false;
-	}
-
-	lro_desc = *desc;
-	if (!lro_desc)
-		return false;
-
-	/* if this is not the first skb, check the timestamp option */
-	if (lro_desc->tcp_rcv_tsval) {
-		if (tcph->doff == 8) {
-			__be32 *topt = (__be32 *)(tcph + 1);
-
-			if (*topt != htonl((TCPOPT_NOP << 24)
-				 |(TCPOPT_NOP << 16)
-				 | (TCPOPT_TIMESTAMP << 8)
-				 | TCPOLEN_TIMESTAMP))
-				return true;
-
-			/* timestamp should be in right order */
-			topt++;
-			if (after(ntohl(lro_desc->tcp_rcv_tsval),
-					 ntohl(*topt)))
-				return false;
-
-			/* timestamp reply should not be zero */
-			topt++;
-			if (*topt == 0)
-				return false;
-		}
-	}
-
-	return true;
-}
-
-/**
- * hdd_lro_desc_free() - Free the LRO descriptor
- * @adapter: HDD adaptor
- * @desc: LRO descriptor
- *
- * Return the LRO descriptor to the free pool
- *
- * Return: none
- */
-static void hdd_lro_desc_free(struct net_lro_desc *desc,
-	struct hdd_lro_s *lro_info)
-{
-	struct hdd_lro_desc_entry *entry;
-	struct net_lro_mgr *lro_mgr = lro_info->lro_mgr;
-	struct net_lro_desc *arr_base = lro_mgr->lro_arr;
-	struct hdd_lro_desc_info *desc_info = &lro_info->lro_desc_info;
-	int i = desc - arr_base;
-
-	if (i >= LRO_DESC_POOL_SZ) {
-		hdd_err("invalid index %d", i);
-		return;
-	}
-
-	entry = &desc_info->lro_desc_pool.lro_desc_array[i];
-
-	list_del_init(&entry->lro_node);
-
-	list_add_tail(&entry->lro_node, &desc_info->
-		 lro_desc_pool.lro_free_list_head);
-}
-
-/**
- * hdd_lro_flush_pkt() - function to flush the LRO flow
- * @iph: IP header
- * @tcph: TCP header
- * @adapter: HDD adaptor
- * @lro_mgr: LRO manager
- *
- * Flush all the packets aggregated in the LRO manager for the
- * flow indicated by the TCP and IP header
- *
- * Return: none
- */
-static void hdd_lro_flush_pkt(struct net_lro_mgr *lro_mgr,
-			      struct iphdr *iph, struct tcphdr *tcph,
-			      struct hdd_lro_s *lro_info)
-{
-	struct net_lro_desc *lro_desc;
-
-	lro_desc = hdd_lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
-
-	if (lro_desc) {
-		/* statistics */
-		hdd_lro_desc_free(lro_desc, lro_info);
-		lro_flush_desc(lro_mgr, lro_desc);
-	}
-}
-
-/**
- * hdd_lro_flush() - LRO flush callback
- * @data: opaque pointer containing HDD specific information
- *
- * Callback registered to flush all the packets aggregated in
- * the LRO manager for all the flows
- *
- * Return: none
- */
-static void hdd_lro_flush(void *data)
-{
-	struct hdd_lro_s *hdd_lro = data;
-	struct net_lro_mgr *lro_mgr = hdd_lro->lro_mgr;
-	int i;
-
-	for (i = 0; i < lro_mgr->max_desc; i++) {
-		if (lro_mgr->lro_arr[i].active) {
-			hdd_lro_desc_free(&lro_mgr->lro_arr[i], hdd_lro);
-			lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]);
-		}
-	}
-}
-
 /**
  * hdd_lro_init() - initialization for LRO
  * @hdd_ctx: HDD context
@@ -541,133 +112,17 @@ int hdd_lro_init(hdd_context_t *hdd_ctx)
 	return 0;
 }
 
-static void *hdd_init_lro_mgr(void)
+static qdf_lro_ctx_t wlan_hdd_get_lro_ctx(struct sk_buff *skb)
 {
-	struct hdd_lro_s *hdd_lro;
-	hdd_context_t *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
-	size_t lro_info_sz, lro_mgr_sz, desc_arr_sz, desc_pool_sz;
-	size_t hash_table_sz;
-	uint8_t *lro_mem_ptr;
-
-	if (NULL == hdd_ctx) {
-		hdd_err("hdd_ctx is NULL");
-		return NULL;
+	struct hif_opaque_softc *hif_hdl =
+		(struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
+	if (hif_hdl == NULL) {
+		hdd_err("hif_hdl is NULL");
 	}
-	/*
-	 * Allocate all the LRO data structures at once and then carve
-	 * them up as needed
-	 */
-	lro_info_sz = sizeof(struct hdd_lro_s);
-	lro_mgr_sz = sizeof(struct net_lro_mgr);
-	desc_arr_sz = (LRO_DESC_POOL_SZ * sizeof(struct net_lro_desc));
-	desc_pool_sz = (LRO_DESC_POOL_SZ * sizeof(struct hdd_lro_desc_entry));
-	hash_table_sz = (sizeof(struct hdd_lro_desc_table) * LRO_DESC_TABLE_SZ);
-
-	lro_mem_ptr = qdf_mem_malloc(lro_info_sz + lro_mgr_sz + desc_arr_sz +
-					desc_pool_sz + hash_table_sz);
-
-	if (NULL == lro_mem_ptr) {
-		hdd_err("Unable to allocate memory for LRO");
-		hdd_ctx->config->lro_enable = 0;
-		return NULL;
-	}
-
-	hdd_lro = (struct hdd_lro_s *)lro_mem_ptr;
-	lro_mem_ptr += lro_info_sz;
-	/* LRO manager */
-	hdd_lro->lro_mgr = (struct net_lro_mgr *)lro_mem_ptr;
-	lro_mem_ptr += lro_mgr_sz;
-
-	/* LRO decriptor array */
-	hdd_lro->lro_mgr->lro_arr = (struct net_lro_desc *)lro_mem_ptr;
-	lro_mem_ptr += desc_arr_sz;
-
-	/* LRO descriptor pool */
-	hdd_lro->lro_desc_info.lro_desc_pool.lro_desc_array =
-		 (struct hdd_lro_desc_entry *)lro_mem_ptr;
-	lro_mem_ptr += desc_pool_sz;
-
-	/* hash table to store the LRO descriptors */
-	hdd_lro->lro_desc_info.lro_hash_table =
-		 (struct hdd_lro_desc_table *)lro_mem_ptr;
 
-	/* Initialize the LRO descriptors */
-	hdd_lro_desc_info_init(hdd_lro);
-
-	if (hdd_ctx->enableRxThread)
-		hdd_lro->lro_mgr->features = LRO_F_NI;
-
-	if (hdd_napi_enabled(HDD_NAPI_ANY))
-		hdd_lro->lro_mgr->features |= LRO_F_NAPI;
-
-	hdd_lro->lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
-	hdd_lro->lro_mgr->max_aggr = LRO_MAX_AGGR_SIZE;
-	hdd_lro->lro_mgr->get_skb_header = hdd_lro_get_skb_header;
-	hdd_lro->lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
-	hdd_lro->lro_mgr->max_desc = LRO_DESC_POOL_SZ;
-
-	return hdd_lro;
-}
-
-/**
- * hdd_lro_enable() - enable LRO
- * @hdd_ctx: HDD context
- * @adapter: HDD adapter
- *
- * This function enables LRO in the network device attached to
- * the HDD adapter. It also allocates the HDD LRO instance for
- * that network device
- *
- * Return: 0 - success, < 0 - failure
- */
-int hdd_lro_enable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
-{
-	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
-
-	if (!hdd_ctx->config->lro_enable ||
-		 QDF_STA_MODE != adapter->device_mode) {
-		hdd_debug("LRO Disabled");
-		return 0;
-	}
-
-	/* Register the flush callback */
-	cdp_register_lro_flush_cb(soc, hdd_lro_flush, hdd_init_lro_mgr);
-	adapter->dev->features |= NETIF_F_LRO;
-
-	hdd_debug("LRO Enabled");
-
-	return 0;
-}
-
-static void hdd_deinit_lro_mgr(void *lro_info)
-{
-	if (lro_info) {
-		hdd_debug("LRO instance %p is being freed", lro_info);
-		qdf_mem_free(lro_info);
-	}
-}
-
-/**
- * hdd_lro_disable() - disable LRO
- * @hdd_ctx: HDD context
- * @adapter: HDD adapter
- *
- * This function frees the HDD LRO instance for the network
- * device attached to the HDD adapter
- *
- * Return: none
- */
-void hdd_lro_disable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
-{
-	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
-
-	if (!hdd_ctx->config->lro_enable ||
-		 QDF_STA_MODE != adapter->device_mode)
-		return;
-
-	/* Deregister the flush callback */
-	cdp_deregister_lro_flush_cb(soc, hdd_deinit_lro_mgr);
+	return hif_get_lro_info(QDF_NBUF_CB_RX_CTX_ID(skb), hif_hdl);
 }
+#endif
 
 /**
  * hdd_lro_rx() - LRO receive function
@@ -683,14 +138,13 @@ void hdd_lro_disable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
 enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 	 hdd_adapter_t *adapter, struct sk_buff *skb)
 {
+	qdf_lro_ctx_t ctx;
 	enum hdd_lro_rx_status status = HDD_LRO_NO_RX;
 
 	if ((adapter->dev->features & NETIF_F_LRO) &&
 		 QDF_NBUF_CB_RX_TCP_PROTO(skb)) {
-		struct iphdr *iph;
-		struct tcphdr *tcph;
+		struct qdf_lro_info info;
 		struct net_lro_desc *lro_desc = NULL;
-		struct hdd_lro_s *lro_info;
 		struct hif_opaque_softc *hif_hdl =
 			(struct hif_opaque_softc *)cds_get_context(
 							QDF_MODULE_ID_HIF);
@@ -699,17 +153,20 @@ enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 			return status;
 		}
 
-		lro_info = hif_get_lro_info(QDF_NBUF_CB_RX_CTX_ID(skb),
-					hif_hdl);
-		if (lro_info == NULL) {
+		ctx = wlan_hdd_get_lro_ctx(skb);
+
+		QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
+			 "%s %d: ctx %p\n", __func__, __LINE__, ctx);
+
+		if (ctx == NULL) {
 			hdd_err("LRO mgr is NULL, vdev could be going down");
 			return status;
 		}
 
-		iph = (struct iphdr *)skb->data;
-		tcph = (struct tcphdr *)(skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb));
-		lro_info->lro_mgr->dev = adapter->dev;
-		if (hdd_lro_eligible(lro_info, skb, iph, tcph, &lro_desc)) {
+		info.iph = skb->data;
+		info.tcph = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
+		ctx->lro_mgr->dev = adapter->dev;
+		if (qdf_lro_get_info(ctx, skb, &info, (void **)&lro_desc)) {
 			struct net_lro_info hdd_lro_info;
 
 			hdd_lro_info.valid_fields = LRO_VALID_FIELDS;
@@ -722,25 +179,22 @@ enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 			hdd_lro_info.tcp_seq_num = QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb);
 			hdd_lro_info.tcp_win = QDF_NBUF_CB_RX_TCP_WIN(skb);
 
-			lro_receive_skb_ext(lro_info->lro_mgr, skb,
+			lro_receive_skb_ext(ctx->lro_mgr, skb,
 				 (void *)adapter, &hdd_lro_info);
 
 			if (!hdd_lro_info.lro_desc->active)
-				hdd_lro_desc_free(lro_desc, lro_info);
+				qdf_lro_desc_free(ctx, lro_desc);
 
 			status = HDD_LRO_RX;
 		} else {
-			hdd_lro_flush_pkt(lro_info->lro_mgr,
-				 iph, tcph, lro_info);
+			qdf_lro_flush_pkt(ctx, &info);
 		}
 	}
 	return status;
 }
 
-#endif
-
 /**
- * wlan_hdd_display_lro_stats() - display LRO statistics
+ * hdd_lro_display_stats() - display LRO statistics
  * @hdd_ctx: hdd context
  *
  * Return: none

+ 1 - 7
core/hdd/src/wlan_hdd_main.c

@@ -3164,14 +3164,9 @@ QDF_STATUS hdd_init_station_mode(hdd_adapter_t *adapter)
 	if (status != QDF_STATUS_SUCCESS)
 		goto error_tdls_init;
 
-	status = hdd_lro_enable(hdd_ctx, adapter);
-	if (status != QDF_STATUS_SUCCESS)
-		goto error_lro_enable;
-
+	adapter->dev->features |= NETIF_F_LRO;
 	return QDF_STATUS_SUCCESS;
 
-error_lro_enable:
-	wlan_hdd_tdls_exit(adapter);
 error_tdls_init:
 	clear_bit(WMM_INIT_DONE, &adapter->event_flags);
 	hdd_wmm_adapter_close(adapter);
@@ -3990,7 +3985,6 @@ QDF_STATUS hdd_stop_adapter(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter,
 		if (scan_info != NULL && scan_info->mScanPending)
 			wlan_hdd_scan_abort(adapter);
 
-		hdd_lro_disable(hdd_ctx, adapter);
 		wlan_hdd_cleanup_remain_on_channel_ctx(adapter);
 
 #ifdef WLAN_OPEN_SOURCE