Parcourir la source

qcacmn: Modify legacy LRO implementation to use QDF

Modify the legacy LRO implementation to use the implementation
in QDF instead. This is to avoid code duplication and to unify
the LRO implementations for Napier and Helium.

CRs-Fixed: 2042812
Change-Id: I38e9da3b54392a1c5781133916361aac3875d43d
Dhanashri Atre il y a 8 ans
Parent
commit
991ee4defc

+ 0 - 82
dp/inc/cdp_txrx_lro.h

@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
- /**
- * @file cdp_txrx_lro.h
- * @brief Define the host data path Large Receive Offload API
- * functions
- */
-#ifndef _CDP_TXRX_LRO_H_
-#define _CDP_TXRX_LRO_H_
-/**
- * cdp_register_lro_flush_cb() - register lro flsu cb function pointer
- * @soc - data path soc handle
- * @pdev - device instance pointer
- *
- * register lro flush callback function pointer
- *
- * return none
- */
-static inline void cdp_register_lro_flush_cb(ol_txrx_soc_handle soc,
-		void (lro_flush_cb)(void *), void *(lro_init_cb)(void))
-{
-	if (!soc || !soc->ops || !soc->ops->lro_ops) {
-		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
-			"%s invalid instance", __func__);
-		return;
-	}
-
-	if (soc->ops->lro_ops->register_lro_flush_cb)
-		return soc->ops->lro_ops->register_lro_flush_cb(lro_flush_cb,
-			lro_init_cb);
-
-	return;
-}
-/**
- * cdp_deregister_lro_flush_cb() - deregister lro flsu cb function pointer
- * @soc - data path soc handle
- *
- * deregister lro flush callback function pointer
- *
- * return none
- */
-static inline void cdp_deregister_lro_flush_cb(ol_txrx_soc_handle soc,
-		void (lro_deinit_cb)(void *))
-{
-	if (!soc || !soc->ops || !soc->ops->lro_ops) {
-		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
-			"%s invalid instance", __func__);
-		return;
-	}
-
-	if (soc->ops->lro_ops->deregister_lro_flush_cb)
-		return soc->ops->lro_ops->deregister_lro_flush_cb(
-			lro_deinit_cb);
-
-	return;
-}
-
-#endif /* _CDP_TXRX_LRO_H_ */

+ 0 - 12
dp/inc/cdp_txrx_ops.h

@@ -782,17 +782,6 @@ struct cdp_ipa_ops {
 		uint64_t quota_bytes);
 };
 
-/**
- * struct cdp_lro_ops - mcl large receive offload ops
- * @register_lro_flush_cb:
- * @deregister_lro_flush_cb:
- */
-struct cdp_lro_ops {
-	void (*register_lro_flush_cb)(void (lro_flush_cb)(void *),
-			void *(lro_init_cb)(void));
-	void (*deregister_lro_flush_cb)(void (lro_deinit_cb)(void *));
-};
-
 /**
  * struct cdp_bus_ops - mcl bus suspend/resume ops
  * @bus_suspend:
@@ -924,7 +913,6 @@ struct cdp_ops {
 	struct cdp_flowctl_ops      *flowctl_ops;
 	struct cdp_lflowctl_ops     *l_flowctl_ops;
 	struct cdp_ipa_ops          *ipa_ops;
-	struct cdp_lro_ops          *lro_ops;
 	struct cdp_bus_ops          *bus_ops;
 	struct cdp_ocb_ops          *ocb_ops;
 	struct cdp_peer_ops         *peer_ops;

+ 0 - 5
dp/wifi3.0/dp_main.c

@@ -3851,10 +3851,6 @@ static struct cdp_ipa_ops dp_ops_ipa = {
 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
 };
 
-static struct cdp_lro_ops dp_ops_lro = {
-	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
-};
-
 /**
  * dp_dummy_bus_suspend() - dummy bus suspend op
  *
@@ -3937,7 +3933,6 @@ static struct cdp_ops dp_txrx_ops = {
 	.flowctl_ops = &dp_ops_flowctl,
 	.l_flowctl_ops = &dp_ops_l_flowctl,
 	.ipa_ops = &dp_ops_ipa,
-	.lro_ops = &dp_ops_lro,
 	.bus_ops = &dp_ops_bus,
 	.ocb_ops = &dp_ops_ocb,
 	.peer_ops = &dp_ops_peer,

+ 61 - 40
dp/wifi3.0/dp_rx.c

@@ -636,6 +636,33 @@ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
 }
 #endif
 
+#if defined(FEATURE_LRO)
+static void dp_rx_print_lro_info(uint8_t *rx_tlv)
+{
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+	FL("----------------------RX DESC LRO----------------------\n"));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+	FL("---------------------------------------------------------\n"));
+}
+
 /**
  * dp_rx_lro() - LRO related processing
  * @rx_tlv: TLV data extracted from the rx packet
@@ -647,52 +674,47 @@ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu)
  *
  * Return: true: LRO enabled false: LRO is not enabled
  */
-#if defined(FEATURE_LRO)
-static bool dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
+static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
 	 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx)
 {
-	qdf_assert(rx_tlv);
-	if (peer->vdev->lro_enable &&
-	 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) {
-		QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
-			 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv) &&
-			 !HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
-
-		if (QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)) {
-			QDF_NBUF_CB_RX_LRO_CTX(msdu) = ctx;
-			QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
-				 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
-			QDF_NBUF_CB_RX_TCP_WIN(msdu) =
-				 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
-			QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
-				 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
-			QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
-				 HAL_RX_TLV_GET_TCP_CHKSUM
-					(rx_tlv);
-			QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(msdu) =
-				 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ
-					(rx_tlv);
-			QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
-				 HAL_RX_TLV_GET_TCP_OFFSET
-					(rx_tlv);
-			QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
-				 HAL_RX_TLV_GET_IPV6(rx_tlv);
-
-			QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
-				 qdf_lro_update_info(ctx, msdu);
-		}
-		/* LRO 'enabled' packet, it may not be LRO 'eligible' */
-		return true;
+	if (!peer || !peer->vdev || !peer->vdev->lro_enable) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			 FL("no peer, no vdev or LRO disabled"));
+		QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0;
+		return;
 	}
+	qdf_assert(rx_tlv);
+	dp_rx_print_lro_info(rx_tlv);
+
+	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
+		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
+
+	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
+			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
+
+	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
+			 HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv);
+	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
+			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
+	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
+			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
+	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
+			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
+	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
+			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
+	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
+			 HAL_RX_TLV_GET_IPV6(rx_tlv);
+	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
+			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
+	QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(msdu) =
+			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
+	QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx;
 
-	/* LRO not supported on this vdev or a non-TCP packet */
-	return false;
 }
 #else
-static bool dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
+static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
 	 qdf_nbuf_t msdu, qdf_lro_ctx_t ctx)
 {
-	return false;
 }
 #endif
 
@@ -1099,8 +1121,7 @@ done:
 
 			rx_bufs_used++;
 
-			if (!dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx))
-				QDF_NBUF_CB_RX_LRO_CTX(nbuf) = NULL;
+			dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx);
 
 			DP_RX_LIST_APPEND(deliver_list_head,
 						deliver_list_tail,

+ 3 - 2
hif/inc/hif.h

@@ -35,6 +35,7 @@ extern "C" {
 /* Header files */
 #include <qdf_status.h>
 #include "qdf_nbuf.h"
+#include "qdf_lro.h"
 #include "ol_if_athvar.h"
 #include <linux/platform_device.h>
 #ifdef HIF_PCI
@@ -211,8 +212,8 @@ struct qca_napi_info {
 	int                  irq;
 	struct qca_napi_stat stats[NR_CPUS];
 	/* will only be present for data rx CE's */
-	void (*lro_flush_cb)(void *arg);
-	void                 *lro_ctx;
+	void (*lro_flush_cb)(void *);
+	qdf_lro_ctx_t        lro_ctx;
 	qdf_spinlock_t lro_unloading_lock;
 };
 

+ 0 - 7
hif/inc/hif_napi.h

@@ -96,13 +96,6 @@ enum qca_napi_event {
 #define NAPI_ID2PIPE(i) ((i)-1)
 #define NAPI_PIPE2ID(p) ((p)+1)
 
-int hif_napi_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
-				   void (lro_flush_handler)(void *arg),
-				   void *(lro_init_handler)(void));
-
-void hif_napi_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
-				      void (lro_deinit_cb)(void *arg));
-
 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id);
 #ifdef FEATURE_NAPI
 

+ 0 - 7
hif/src/ce/ce_api.h

@@ -487,13 +487,6 @@ static inline void ce_pkt_error_count_incr(
 
 bool ce_check_rx_pending(struct CE_state *CE_state);
 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
-#if defined(FEATURE_LRO)
-int ce_lro_flush_cb_register(struct hif_opaque_softc *scn,
-			     void (handler)(void *arg),
-			     void *(lro_init_handler)(void));
-int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
-			       void (lro_deinit_cb)(void *arg));
-#endif
 struct ce_ops *ce_services_srng(void);
 struct ce_ops *ce_services_legacy(void);
 bool ce_srng_based(struct hif_softc *scn);

+ 1 - 2
hif/src/ce/ce_internal.h

@@ -150,8 +150,7 @@ struct CE_state {
 	/* datapath - for faster access, use bools instead of a bitmap */
 	bool htt_tx_data;
 	bool htt_rx_data;
-	void (*lro_flush_cb)(void *);
-	void *lro_data;
+	qdf_lro_ctx_t lro_data;
 	qdf_spinlock_t lro_unloading_lock;
 };
 

+ 4 - 84
hif/src/ce/ce_main.c

@@ -1292,6 +1292,8 @@ void ce_fini(struct CE_handle *copyeng)
 
 	qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
 
+	qdf_lro_deinit(CE_state->lro_data);
+
 	if (CE_state->src_ring) {
 		/* Cleanup the datapath Tx ring */
 		ce_h2t_tx_ce_cleanup(copyeng);
@@ -2467,6 +2469,8 @@ int hif_config_ce(struct hif_softc *scn)
 			goto err;
 		}
 
+		ce_state->lro_data = qdf_lro_init();
+
 		if (attr->flags & CE_ATTR_DIAG) {
 			/* Reserve the ultimate CE for
 			 * Diagnostic Window support
@@ -2742,90 +2746,6 @@ void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
 
 	return ce_state->lro_data;
 }
-
-/**
- * ce_lro_flush_cb_register() - register the LRO flush
- * callback
- * @scn: HIF context
- * @handler: callback function
- * @data: opaque data pointer to be passed back
- *
- * Store the LRO flush callback provided
- *
- * Return: Number of instances the callback is registered for
- */
-int ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
-			     void (handler)(void *),
-			     void *(lro_init_handler)(void))
-{
-	int rc = 0;
-	int i;
-	struct CE_state *ce_state;
-	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
-	void *data = NULL;
-
-	QDF_ASSERT(scn != NULL);
-
-	if (scn != NULL) {
-		for (i = 0; i < scn->ce_count; i++) {
-			ce_state = scn->ce_id_to_state[i];
-			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
-				data = lro_init_handler();
-				if (data == NULL) {
-					HIF_ERROR("%s: Failed to init LRO for CE %d",
-						  __func__, i);
-					continue;
-				}
-				ce_state->lro_flush_cb = handler;
-				ce_state->lro_data = data;
-				rc++;
-			}
-		}
-	} else {
-		HIF_ERROR("%s: hif_state NULL!", __func__);
-	}
-	return rc;
-}
-
-/**
- * ce_lro_flush_cb_deregister() - deregister the LRO flush
- * callback
- * @scn: HIF context
- *
- * Remove the LRO flush callback
- *
- * Return: Number of instances the callback is de-registered
- */
-int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
-			       void (lro_deinit_cb)(void *))
-{
-	int rc = 0;
-	int i;
-	struct CE_state *ce_state;
-	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
-
-	QDF_ASSERT(scn != NULL);
-	if (scn != NULL) {
-		for (i = 0; i < scn->ce_count; i++) {
-			ce_state = scn->ce_id_to_state[i];
-			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
-				qdf_spin_lock_bh(
-					&ce_state->lro_unloading_lock);
-				ce_state->lro_flush_cb = NULL;
-				lro_deinit_cb(ce_state->lro_data);
-				ce_state->lro_data = NULL;
-				qdf_spin_unlock_bh(
-					&ce_state->lro_unloading_lock);
-				qdf_spinlock_destroy(
-					&ce_state->lro_unloading_lock);
-				rc++;
-			}
-		}
-	} else {
-		HIF_ERROR("%s: hif_state NULL!", __func__);
-	}
-	return rc;
-}
 #endif
 
 /**

+ 2 - 2
hif/src/ce/ce_tasklet.c

@@ -174,8 +174,8 @@ static void ce_tasklet(unsigned long data)
 	qdf_spin_lock_bh(&CE_state->lro_unloading_lock);
 	ce_per_engine_service(scn, tasklet_entry->ce_id);
 
-	if (CE_state->lro_flush_cb != NULL)
-		CE_state->lro_flush_cb(CE_state->lro_data);
+	qdf_lro_flush(CE_state->lro_data);
+
 	qdf_spin_unlock_bh(&CE_state->lro_unloading_lock);
 
 	if (ce_check_rx_pending(CE_state)) {

+ 0 - 34
hif/src/hif_main.c

@@ -847,25 +847,6 @@ struct hif_target_info *hif_get_target_info_handle(
 }
 
 #if defined(FEATURE_LRO)
-/**
- * hif_lro_flush_cb_register - API to register for LRO Flush Callback
- * @scn: HIF Context
- * @handler: Function pointer to be called by HIF
- * @data: Private data to be used by the module registering to HIF
- *
- * Return: void
- */
-void hif_lro_flush_cb_register(struct hif_opaque_softc *scn,
-			       void (lro_flush_handler)(void *),
-			       void *(lro_init_handler)(void))
-{
-	if (hif_napi_enabled(scn, -1))
-		hif_napi_lro_flush_cb_register(scn, lro_flush_handler,
-					       lro_init_handler);
-	else
-		ce_lro_flush_cb_register(scn, lro_flush_handler,
-					lro_init_handler);
-}
 
 /**
  * hif_get_lro_info - Returns LRO instance for instance ID
@@ -901,21 +882,6 @@ int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
 		return ctx_id;
 }
 
-/**
- * hif_lro_flush_cb_deregister - API to deregister for LRO Flush Callbacks
- * @hif_hdl: HIF Context
- * @lro_deinit_cb: LRO deinit callback
- *
- * Return: void
- */
-void hif_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
-				 void (lro_deinit_cb)(void *))
-{
-	if (hif_napi_enabled(hif_hdl, -1))
-		hif_napi_lro_flush_cb_deregister(hif_hdl, lro_deinit_cb);
-	else
-		ce_lro_flush_cb_deregister(hif_hdl, lro_deinit_cb);
-}
 #else /* !defined(FEATURE_LRO) */
 int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
 {

+ 6 - 90
hif/src/hif_napi.c

@@ -182,6 +182,10 @@ int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
 			   napii->netdev.napi_list.prev,
 			   napii->netdev.napi_list.next);
 
+		napii->lro_ctx = qdf_lro_init();
+		NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %p\n",
+				i, napii->id, napii->lro_ctx);
+
 		/* It is OK to change the state variable below without
 		 * protection as there should be no-one around yet
 		 */
@@ -261,6 +265,7 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
 				   napii->netdev.napi_list.next);
 
 			qdf_spinlock_destroy(&napii->lro_unloading_lock);
+			qdf_lro_deinit(napii->lro_ctx);
 			netif_napi_del(&(napii->napi));
 
 			napid->ce_map &= ~(0x01 << ce);
@@ -287,94 +292,6 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
 	return rc;
 }
 
-/**
- * hif_napi_lro_flush_cb_register() - init and register flush callback for LRO
- * @hif_hdl: pointer to hif context
- * @lro_flush_handler: register LRO flush callback
- * @lro_init_handler: Callback for initializing LRO
- *
- * Return: positive value on success and 0 on failure
- */
-int hif_napi_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
-				   void (lro_flush_handler)(void *),
-				   void *(lro_init_handler)(void))
-{
-	int rc = 0;
-	int i;
-	struct CE_state *ce_state;
-	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
-	void *data = NULL;
-	struct qca_napi_data *napid;
-	struct qca_napi_info *napii;
-
-	QDF_ASSERT(scn != NULL);
-
-	napid = hif_napi_get_all(hif_hdl);
-	if (scn != NULL) {
-		for (i = 0; i < scn->ce_count; i++) {
-			ce_state = scn->ce_id_to_state[i];
-			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
-				data = lro_init_handler();
-				if (data == NULL) {
-					HIF_ERROR("%s: Failed to init LRO for CE %d",
-						  __func__, i);
-					continue;
-				}
-				napii = &(napid->napis[i]);
-				napii->lro_flush_cb = lro_flush_handler;
-				napii->lro_ctx = data;
-				HIF_DBG("Registering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
-					i, napii->id, napii->lro_flush_cb,
-					napii->lro_ctx);
-				rc++;
-			}
-		}
-	} else {
-		HIF_ERROR("%s: hif_state NULL!", __func__);
-	}
-	return rc;
-}
-
-/**
- * hif_napi_lro_flush_cb_deregister() - Degregister and free LRO.
- * @hif: pointer to hif context
- * @lro_deinit_cb: LRO deinit callback
- *
- * Return: NONE
- */
-void hif_napi_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
-				     void (lro_deinit_cb)(void *))
-{
-	int i;
-	struct CE_state *ce_state;
-	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
-	struct qca_napi_data *napid;
-	struct qca_napi_info *napii;
-
-	QDF_ASSERT(scn != NULL);
-
-	napid = hif_napi_get_all(hif_hdl);
-	if (scn != NULL) {
-		for (i = 0; i < scn->ce_count; i++) {
-			ce_state = scn->ce_id_to_state[i];
-			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
-				napii = &(napid->napis[i]);
-				HIF_DBG("deRegistering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
-					i, napii->id, napii->lro_flush_cb,
-					napii->lro_ctx);
-				qdf_spin_lock_bh(&napii->lro_unloading_lock);
-				napii->lro_flush_cb = NULL;
-				lro_deinit_cb(napii->lro_ctx);
-				napii->lro_ctx = NULL;
-				qdf_spin_unlock_bh(
-					&napii->lro_unloading_lock);
-			}
-		}
-	} else {
-		HIF_ERROR("%s: hif_state NULL!", __func__);
-	}
-}
-
 /**
  * hif_napi_get_lro_info() - returns the address LRO data for napi_id
  * @hif: pointer to hif context
@@ -809,8 +726,7 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
 	NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
 		    __func__, rc);
 
-	if (napi_info->lro_flush_cb)
-		napi_info->lro_flush_cb(napi_info->lro_ctx);
+	qdf_lro_flush(napi_info->lro_ctx);
 	qdf_spin_unlock_bh(&napi_info->lro_unloading_lock);
 
 	/* do not return 0, if there was some work done,

+ 70 - 11
qdf/inc/qdf_lro.h

@@ -39,21 +39,86 @@
  */
 typedef __qdf_lro_ctx_t qdf_lro_ctx_t;
 
+/**
+ * qdf_lro_info_s - LRO information
+ * @iph: IP header
+ * @tcph: TCP header
+ */
+struct qdf_lro_info {
+	uint8_t *iph;
+	uint8_t *tcph;
+};
+
 #if defined(FEATURE_LRO)
 
+/**
+ * qdf_lro_init() - LRO initialization function
+ *
+ * Return: LRO context
+ */
 qdf_lro_ctx_t qdf_lro_init(void);
 
+/**
+ * qdf_lro_deinit() - LRO deinitialization function
+ * @lro_ctx: LRO context
+ *
+ * Return: nothing
+ */
 void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx);
 
-bool qdf_lro_update_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf);
-
-void qdf_lro_flow_free(qdf_nbuf_t nbuf);
+/**
+ *  qdf_lro_get_info() - Update the LRO information
+ *
+ * @lro_ctx: LRO context
+ * @nbuf: network buffer
+ * @info: LRO related information passed in by the caller
+ * @plro_desc: lro information returned as output
+ *
+ * Look-up the LRO descriptor based on the LRO information and
+ * the network buffer provided. Update the skb cb with the
+ * descriptor found
+ *
+ * Return: true: LRO eligible false: LRO ineligible
+ */
+bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf,
+						 struct qdf_lro_info *info,
+						 void **plro_desc);
 
-void qdf_lro_flush_pkt(struct iphdr *iph,
-	 struct tcphdr *tcph, qdf_lro_ctx_t lro_ctx);
+/**
+ * qdf_lro_flush_pkt() - function to flush the LRO flow
+ * @info: LRO related information passed by the caller
+ * @lro_ctx: LRO context
+ *
+ * Flush all the packets aggregated in the LRO manager for the
+ * flow indicated by the TCP and IP header
+ *
+ * Return: none
+ */
+void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx,
+	 struct qdf_lro_info *info);
 
+/**
+ * qdf_lro_flush() - LRO flush API
+ * @lro_ctx: LRO context
+ *
+ * Flush all the packets aggregated in the LRO manager for all
+ * the flows
+ *
+ * Return: none
+ */
 void qdf_lro_flush(qdf_lro_ctx_t lro_ctx);
 
+/**
+ * qdf_lro_desc_free() - Free the LRO descriptor
+ * @desc: LRO descriptor
+ * @lro_ctx: LRO context
+ *
+ * Return the LRO descriptor to the free pool
+ *
+ * Return: none
+ */
+void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx, void *desc);
+
 #else
 
 static inline qdf_lro_ctx_t qdf_lro_init(void)
@@ -65,14 +130,8 @@ static inline void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx)
 {
 }
 
-static inline void qdf_lro_flush_pkt(struct iphdr *iph,
-	 struct tcphdr *tcph, qdf_lro_ctx_t lro_ctx)
-{
-}
-
 static inline void qdf_lro_flush(qdf_lro_ctx_t lro_ctx)
 {
 }
-
 #endif /* FEATURE_LRO */
 #endif

+ 2 - 2
qdf/linux/src/i_qdf_nbuf.h

@@ -100,6 +100,7 @@ typedef union {
  *   @rx.tcp_seq_num     : TCP sequence number
  *   @rx.tcp_ack_num     : TCP ACK number
  *   @rx.flow_id_toeplitz: 32-bit 5-tuple Toeplitz hash
+ *   @rx.lro_ctx         : LRO context
  * @tx.extra_frag  : represent HTC/HTT header
  * @tx.efrag.vaddr       : virtual address of ~
  * @tx.efrag.paddr       : physical/DMA address of ~
@@ -146,8 +147,7 @@ struct qdf_nbuf_cb {
 			uint32_t tcp_ack_num;
 			uint32_t flow_id_toeplitz;
 			uint32_t map_index;
-			void *lro_desc;
-			void *lro_ctx;
+			unsigned char *lro_ctx;
 			union {
 				uint8_t packet_state;
 				uint8_t dp_trace:1,

+ 58 - 50
qdf/linux/src/qdf_lro.c

@@ -170,7 +170,7 @@ qdf_lro_ctx_t qdf_lro_init(void)
 	qdf_lro_desc_info_init(lro_ctx);
 
 	/* LRO TODO - NAPI or RX thread */
-	/* lro_ctx->lro_mgr->features = LRO_F_NI */
+	lro_ctx->lro_mgr->features |= LRO_F_NAPI;
 
 	lro_ctx->lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
 	lro_ctx->lro_mgr->max_aggr = QDF_LRO_MAX_AGGR_SIZE;
@@ -189,7 +189,7 @@ qdf_lro_ctx_t qdf_lro_init(void)
  */
 void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx)
 {
-	if (unlikely(lro_ctx)) {
+	if (likely(lro_ctx)) {
 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			 "LRO instance %p is being freed", lro_ctx);
 		qdf_mem_free(lro_ctx);
@@ -221,23 +221,25 @@ static inline bool qdf_lro_tcp_flow_match(struct net_lro_desc *lro_desc,
 
 }
 
-/**qdf_lro_desc_find() - LRO descriptor look-up function
+/**
+ * qdf_lro_desc_find() - LRO descriptor look-up function
  *
  * @lro_ctx: LRO context
  * @skb: network buffer
  * @iph: IP header
  * @tcph: TCP header
- * @flow_id: toeplitz hash
+ * @flow_hash: toeplitz hash
+ * @lro_desc: LRO descriptor to be returned
  *
  * Look-up the LRO descriptor in the hash table based on the
  * flow ID toeplitz. If the flow is not found, allocates a new
  * LRO descriptor and places it in the hash table
  *
- * Return: lro descriptor
+ * Return: 0 - success, < 0 - failure
  */
-static struct net_lro_desc *qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
+static int qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
 	 struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph,
-	 uint32_t flow_hash)
+	 uint32_t flow_hash, struct net_lro_desc **lro_desc)
 {
 	uint32_t i;
 	struct qdf_lro_desc_table *lro_hash_table;
@@ -246,6 +248,7 @@ static struct net_lro_desc *qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
 	struct qdf_lro_desc_pool *free_pool;
 	struct qdf_lro_desc_info *desc_info = &lro_ctx->lro_desc_info;
 
+	*lro_desc = NULL;
 	i = flow_hash & QDF_LRO_DESC_TABLE_SZ_MASK;
 
 	lro_hash_table = &desc_info->lro_hash_table[i];
@@ -254,7 +257,7 @@ static struct net_lro_desc *qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			 "Invalid hash entry");
 		QDF_ASSERT(0);
-		return NULL;
+		return -EINVAL;
 	}
 
 	/* Check if this flow exists in the descriptor list */
@@ -263,11 +266,10 @@ static struct net_lro_desc *qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
 
 		entry = list_entry(ptr, struct qdf_lro_desc_entry, lro_node);
 		tmp_lro_desc = entry->lro_desc;
-		if (tmp_lro_desc->active) {
-			if (qdf_lro_tcp_flow_match(tmp_lro_desc, iph, tcph)) {
-				return entry->lro_desc;
+			if (qdf_lro_tcp_flow_match(entry->lro_desc, iph, tcph)) {
+				*lro_desc = entry->lro_desc;
+				return 0;
 			}
-		}
 	}
 
 	/* no existing flow found, a new LRO desc needs to be allocated */
@@ -278,7 +280,7 @@ static struct net_lro_desc *qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
 	if (unlikely(!entry)) {
 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			 "Could not allocate LRO desc!");
-		return NULL;
+		return -ENOMEM;
 	}
 
 	list_del_init(&entry->lro_node);
@@ -286,7 +288,7 @@ static struct net_lro_desc *qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
 	if (unlikely(!entry->lro_desc)) {
 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			 "entry->lro_desc is NULL!");
-		return NULL;
+		return -EINVAL;
 	}
 
 	memset(entry->lro_desc, 0, sizeof(struct net_lro_desc));
@@ -298,13 +300,17 @@ static struct net_lro_desc *qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
 	list_add_tail(&entry->lro_node,
 		 &lro_hash_table->lro_desc_list);
 
-	return entry->lro_desc;
+	*lro_desc = entry->lro_desc;
+	return 0;
 }
 
-/**qdf_lro_update_info() - Update the LRO information
+/**
+ *  qdf_lro_get_info() - Update the LRO information
  *
  * @lro_ctx: LRO context
  * @nbuf: network buffer
+ * @info: LRO related information passed in by the caller
+ * @plro_desc: lro information returned as output
  *
  * Look-up the LRO descriptor based on the LRO information and
  * the network buffer provided. Update the skb cb with the
@@ -312,19 +318,37 @@ static struct net_lro_desc *qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
  *
  * Return: true: LRO eligible false: LRO ineligible
  */
-bool qdf_lro_update_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf)
+bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf,
+						 struct qdf_lro_info *info,
+						 void **plro_desc)
 {
+	struct net_lro_desc *lro_desc;
 	struct iphdr *iph;
 	struct tcphdr *tcph;
-	struct net_lro_desc *lro_desc = NULL;
+	int hw_lro_eligible =
+		 QDF_NBUF_CB_RX_LRO_ELIGIBLE(nbuf) &&
+		 (!QDF_NBUF_CB_RX_TCP_PURE_ACK(nbuf));
 
-	iph = (struct iphdr *)((char *)nbuf->data + ETH_HLEN);
-	tcph = (struct tcphdr *)((char *)nbuf->data + ETH_HLEN +
-			 QDF_NBUF_CB_RX_TCP_OFFSET(nbuf));
-	QDF_NBUF_CB_RX_LRO_DESC(nbuf) = NULL;
+	if (unlikely(!lro_ctx)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			 "Invalid LRO context");
+		return false;
+	}
 
-	lro_desc = qdf_lro_desc_find(lro_ctx, nbuf, iph, tcph,
-		 QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(nbuf));
+	if (!hw_lro_eligible)
+		return false;
+
+	iph = (struct iphdr *)info->iph;
+	tcph = (struct tcphdr *)info->tcph;
+	if (0 != qdf_lro_desc_find(lro_ctx, nbuf, iph, tcph,
+		 QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(nbuf),
+		 (struct net_lro_desc **)plro_desc)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			 "finding the LRO desc failed");
+		return false;
+	}
+
+	lro_desc = (struct net_lro_desc *)(*plro_desc);
 	if (unlikely(!lro_desc)) {
 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			 "finding the LRO desc failed");
@@ -355,7 +379,6 @@ bool qdf_lro_update_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf)
 		}
 	}
 
-	QDF_NBUF_CB_RX_LRO_DESC(nbuf) = (void *)lro_desc;
 	return true;
 }
 
@@ -368,14 +391,15 @@ bool qdf_lro_update_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf)
  *
  * Return: none
  */
-static void qdf_lro_desc_free(struct net_lro_desc *desc,
-	 qdf_lro_ctx_t lro_ctx)
+void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx,
+	 void *data)
 {
 	struct qdf_lro_desc_entry *entry;
 	struct net_lro_mgr *lro_mgr;
 	struct net_lro_desc *arr_base;
 	struct qdf_lro_desc_info *desc_info;
 	int i;
+	struct net_lro_desc *desc = (struct net_lro_desc *)data;
 
 	qdf_assert(desc);
 	qdf_assert(lro_ctx);
@@ -405,22 +429,6 @@ static void qdf_lro_desc_free(struct net_lro_desc *desc,
 		 lro_desc_pool.lro_free_list_head);
 }
 
-/**
- * qdf_lro_flow_free() - Free the LRO flow resources
- * @nbuf: network buffer
- *
- * Return the LRO descriptor to the free pool
- *
- * Return: none
- */
-void qdf_lro_flow_free(qdf_nbuf_t nbuf)
-{
-	struct net_lro_desc *desc = QDF_NBUF_CB_RX_LRO_DESC(nbuf);
-	qdf_lro_ctx_t ctx = QDF_NBUF_CB_RX_LRO_CTX(nbuf);
-
-	qdf_lro_desc_free(desc, ctx);
-}
-
 /**
  * qdf_lro_flush() - LRO flush API
  * @lro_ctx: LRO context
@@ -437,12 +445,11 @@ void qdf_lro_flush(qdf_lro_ctx_t lro_ctx)
 
 	for (i = 0; i < lro_mgr->max_desc; i++) {
 		if (lro_mgr->lro_arr[i].active) {
-			qdf_lro_desc_free(&lro_mgr->lro_arr[i], lro_ctx);
+			qdf_lro_desc_free(lro_ctx, &lro_mgr->lro_arr[i]);
 			lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]);
 		}
 	}
 }
-
 /**
  * qdf_lro_get_desc() - LRO descriptor look-up function
  * @iph: IP header
@@ -472,8 +479,7 @@ static struct net_lro_desc *qdf_lro_get_desc(struct net_lro_mgr *lro_mgr,
 
 /**
  * qdf_lro_flush_pkt() - function to flush the LRO flow
- * @iph: IP header
- * @tcph: TCP header
+ * @info: LRO related information passed by the caller
  * @lro_ctx: LRO context
  *
  * Flush all the packets aggregated in the LRO manager for the
@@ -481,17 +487,19 @@ static struct net_lro_desc *qdf_lro_get_desc(struct net_lro_mgr *lro_mgr,
  *
  * Return: none
  */
-void qdf_lro_flush_pkt(struct iphdr *iph,
-	 struct tcphdr *tcph, qdf_lro_ctx_t lro_ctx)
+void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx,
+	 struct qdf_lro_info *info)
 {
 	struct net_lro_desc *lro_desc;
 	struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr;
+	struct iphdr *iph = (struct iphdr *) info->iph;
+	struct tcphdr *tcph = (struct tcphdr *) info->tcph;
 
 	lro_desc = qdf_lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
 
 	if (lro_desc) {
 		/* statistics */
-		qdf_lro_desc_free(lro_desc, lro_ctx);
+		qdf_lro_desc_free(lro_ctx, lro_desc);
 		lro_flush_desc(lro_mgr, lro_desc);
 	}
 }