Jelajahi Sumber

qcacld-3.0: Add GRO support for STA mode

Add GRO support and make it configurable through INI(GROEnable).
GRO and LRO are mutually exclusive. Only one of them can be enabled.
And disable GRO during following conditions
1) Low TPUT scenario
2) Concurrency cases
3) For Rx packets when Peer is not connected

Change-Id: I15535827a03953231670d4138235c4876b16e045
CRs-Fixed: 2098772
Manjunathappa Prakash 7 tahun lalu
induk
melakukan
7b0ad46796

+ 3 - 0
Kbuild

@@ -277,6 +277,7 @@ CONFIG_FEATURE_TSO := y
 CONFIG_TSO_DEBUG_LOG_ENABLE := y
 CONFIG_DP_LFR := y
 CONFIG_HTT_PADDR64 := y
+CONFIG_RX_OL := y
 endif
 
 # As per target team, build is done as follows:
@@ -526,6 +527,7 @@ ifeq ($(CONFIG_HELIUMPLUS), y)
 ifneq ($(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE), y)
 CONFIG_ENABLE_DEBUG_ADDRESS_MARKING := y
 endif
+CONFIG_RX_OL := y
 endif
 
 ifeq ($(CONFIG_SLUB_DEBUG_ON), y)
@@ -2352,6 +2354,7 @@ cppflags-$(CONFIG_ATH_PROCFS_DIAG_SUPPORT) += -DCONFIG_ATH_PROCFS_DIAG_SUPPORT
 cppflags-$(CONFIG_11AC_TXCOMPACT) += -DATH_11AC_TXCOMPACT
 
 cppflags-$(CONFIG_HELIUMPLUS) += -DHELIUMPLUS
+cppflags-$(CONFIG_RX_OL) += -DRECEIVE_OFFLOAD
 cppflags-$(CONFIG_AR900B) += -DAR900B
 cppflags-$(CONFIG_HTT_PADDR64) += -DHTT_PADDR64
 cppflags-$(CONFIG_OL_RX_INDICATION_RECORD) += -DOL_RX_INDICATION_RECORD

+ 2 - 2
core/dp/htt/htt_internal.h

@@ -179,7 +179,7 @@ static inline struct htt_host_rx_desc_base *htt_rx_desc(qdf_nbuf_t msdu)
 		~HTT_RX_DESC_ALIGN_MASK);
 }
 
-#if defined(FEATURE_LRO) && defined(HELIUMPLUS)
+#if defined(HELIUMPLUS)
 /**
  * htt_print_rx_desc_lro() - print LRO information in the rx
  * descriptor
@@ -265,7 +265,7 @@ static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
 {}
 static inline void htt_rx_extract_lro_info(qdf_nbuf_t msdu,
 	 struct htt_host_rx_desc_base *rx_desc) {}
-#endif /* FEATURE_LRO */
+#endif /* HELIUMPLUS */
 
 static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
 {

+ 22 - 0
core/hdd/inc/wlan_hdd_cfg.h

@@ -8628,6 +8628,27 @@ enum hdd_link_speed_rpt_type {
 #define CFG_LRO_ENABLED_MAX            (1)
 #define CFG_LRO_ENABLED_DEFAULT        (0)
 
+/*
+ * <ini>
+ * GROEnable - Control to enable gro feature
+ *
+ * @Disable: 0
+ * @Enable: 1
+ * @Default: 0
+ *
+ * This ini is used to enable GRO feature
+ *
+ * Supported Feature: GRO
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_GRO_ENABLED_NAME           "GROEnable"
+#define CFG_GRO_ENABLED_MIN            (0)
+#define CFG_GRO_ENABLED_MAX            (1)
+#define CFG_GRO_ENABLED_DEFAULT        (0)
+
 /*
  * Enable Rx traffic flow steering to enable Rx interrupts on multiple CEs based
  * on the flows. Different CEs<==>different IRQs<==>probably different CPUs.
@@ -14794,6 +14815,7 @@ struct hdd_config {
 	bool send_deauth_before_con;
 	bool tso_enable;
 	bool lro_enable;
+	bool gro_enable;
 	bool flow_steering_enable;
 	uint8_t max_msdus_per_rxinorderind;
 	bool active_mode_offload;

+ 11 - 40
core/hdd/inc/wlan_hdd_lro.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -26,22 +26,15 @@
 
 struct hdd_context;
 
+#if defined(FEATURE_LRO)
 /**
- * enum hdd_lro_rx_status - LRO receive frame status
- * @HDD_LRO_RX: frame sent over the LRO interface
- * @HDD_LRO_NO_RX: frame not sent over the LRO interface
+ * hdd_lro_rx() - Handle Rx procesing via LRO
+ * @adapter: pointer to adapter context
+ * @skb: pointer to sk_buff
+ *
+ * Return: QDF_STATUS_SUCCESS if processed via LRO or non zero return code
  */
-enum hdd_lro_rx_status {
-	HDD_LRO_RX = 0,
-	HDD_LRO_NO_RX = 1,
-};
-
-#if defined(FEATURE_LRO)
-int hdd_lro_init(struct hdd_context *hdd_ctx);
-
-enum hdd_lro_rx_status hdd_lro_rx(struct hdd_context *hdd_ctx,
-				  struct hdd_adapter *adapter,
-				  struct sk_buff *skb);
+QDF_STATUS hdd_lro_rx(struct hdd_adapter *adapter, struct sk_buff *skb);
 
 void hdd_lro_display_stats(struct hdd_context *hdd_ctx);
 
@@ -57,29 +50,16 @@ QDF_STATUS hdd_lro_set_reset(struct hdd_context *hdd_ctx,
 			     struct hdd_adapter *adapter,
 			     uint8_t enable_flag);
 
-void hdd_disable_lro_in_concurrency(bool disable);
-
-/**
- * hdd_disable_lro_for_low_tput() - enable/disable LRO based on tput
- * hdd_ctx: hdd context
- * disable: boolean to enable/disable LRO
- *
- * This API enables/disables LRO based on tput.
- *
- * Return: void
- */
-void hdd_disable_lro_for_low_tput(struct hdd_context *hdd_ctx, bool disable);
 #else
 static inline int hdd_lro_init(struct hdd_context *hdd_ctx)
 {
 	return 0;
 }
 
-static inline enum hdd_lro_rx_status hdd_lro_rx(struct hdd_context *hdd_ctx,
-						struct hdd_adapter *adapter,
-						struct sk_buff *skb)
+static inline QDF_STATUS hdd_lro_rx(struct hdd_adapter *adapter,
+				    struct sk_buff *skb)
 {
-	return HDD_LRO_NO_RX;
+	return QDF_STATUS_E_NOSUPPORT;
 }
 
 static inline void hdd_lro_display_stats(struct hdd_context *hdd_ctx)
@@ -92,14 +72,5 @@ static inline QDF_STATUS hdd_lro_set_reset(struct hdd_context *hdd_ctx,
 {
 	return 0;
 }
-
-static inline void hdd_disable_lro_in_concurrency(bool disable)
-{
-}
-
-static inline void
-hdd_disable_lro_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
-{
-}
 #endif /* FEATURE_LRO */
 #endif /* __WLAN_HDD_LRO_H__ */

+ 13 - 0
core/hdd/inc/wlan_hdd_main.h

@@ -1622,6 +1622,16 @@ enum tos {
 #define HDD_AC_BIT_INDX                 0
 #define HDD_DWELL_TIME_INDX             1
 
+/**
+ * enum RX_OFFLOAD - Receive offload modes
+ * @CFG_LRO_ENABLED: Large Rx offload
+ * @CFG_GRO_ENABLED: Generic Rx Offload
+ */
+enum RX_OFFLOAD {
+	CFG_LRO_ENABLED = 1,
+	CFG_GRO_ENABLED,
+};
+
 /* One per STA: 1 for BCMC_STA_ID, 1 for each SAP_SELF_STA_ID,
  * 1 for WDS_STAID
  */
@@ -1821,6 +1831,7 @@ struct hdd_context {
 	qdf_work_t sap_pre_cac_work;
 	bool hbw_requested;
 	uint32_t last_nil_scan_bug_report_timestamp;
+	enum RX_OFFLOAD ol_enable;
 #ifdef WLAN_FEATURE_NAN_DATAPATH
 	bool nan_datapath_enabled;
 #endif
@@ -1876,6 +1887,8 @@ struct hdd_context {
 	bool imps_enabled;
 	int user_configured_pkt_filter_rules;
 	bool is_fils_roaming_supported;
+	QDF_STATUS (*receive_offload_cb)(struct hdd_adapter *,
+					 struct sk_buff *);
 	qdf_atomic_t vendor_disable_lro_flag;
 	qdf_atomic_t disable_lro_in_concurrency;
 	qdf_atomic_t disable_lro_in_low_tput;

+ 25 - 0
core/hdd/inc/wlan_hdd_tx_rx.h

@@ -57,6 +57,31 @@ QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter);
 QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter);
 QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf);
 
+/**
+ * hdd_rx_ol_init() - Initialize Rx mode(LRO or GRO) method
+ * @hdd_ctx: pointer to HDD Station Context
+ *
+ * Return: 0 on success and non zero on failure.
+ */
+int hdd_rx_ol_init(struct hdd_context *hdd_ctx);
+
+/**
+ * hdd_disable_rx_ol_in_concurrency() - Disable Rx offload due to concurrency
+ * @disable: true/false to disable/enable the Rx offload
+ *
+ * Return: none
+ */
+void hdd_disable_rx_ol_in_concurrency(bool disable);
+
+/**
+ * hdd_disable_rx_ol_for_low_tput() - Disable Rx offload in low TPUT scenario
+ * @hdd_ctx: hdd context
+ * @disable: true/false to disable/enable the Rx offload
+ *
+ * Return: none
+ */
+void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable);
+
 QDF_STATUS hdd_get_peer_sta_id(struct hdd_station_ctx *sta_ctx,
 				struct qdf_mac_addr *peer_mac_addr,
 				uint8_t *sta_id);

+ 7 - 0
core/hdd/src/wlan_hdd_cfg.c

@@ -3457,6 +3457,13 @@ struct reg_table_entry g_registry_table[] = {
 		     CFG_LRO_ENABLED_MIN,
 		     CFG_LRO_ENABLED_MAX),
 
+	REG_VARIABLE(CFG_GRO_ENABLED_NAME, WLAN_PARAM_Integer,
+		     struct hdd_config, gro_enable,
+		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+		     CFG_GRO_ENABLED_DEFAULT,
+		     CFG_GRO_ENABLED_MIN,
+		     CFG_GRO_ENABLED_MAX),
+
 	REG_VARIABLE(CFG_BPF_PACKET_FILTER_OFFLOAD, WLAN_PARAM_Integer,
 		     struct hdd_config, bpf_packet_filter_enable,
 		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,

+ 35 - 160
core/hdd/src/wlan_hdd_lro.c

@@ -39,75 +39,11 @@
 	 LRO_TCP_DATA_CSUM | LRO_TCP_SEQ_NUM | LRO_TCP_WIN)
 
 #if defined(QCA_WIFI_QCA6290)
-/**
- * hdd_lro_init() - initialization for LRO
- * @hdd_ctx: HDD context
- *
- * This function sends the LRO configuration to the firmware
- * via WMA
- * Make sure that this function gets called after NAPI
- * instances have been created.
- *
- * Return: 0 - success, < 0 - failure
- */
-int hdd_lro_init(struct hdd_context *hdd_ctx)
-{
-	return 0;
-}
-
 static qdf_lro_ctx_t wlan_hdd_get_lro_ctx(struct sk_buff *skb)
 {
 	return (qdf_lro_ctx_t)QDF_NBUF_CB_RX_LRO_CTX(skb);
 }
 #else
-/**
- * hdd_lro_init() - initialization for LRO
- * @hdd_ctx: HDD context
- *
- * This function sends the LRO configuration to the firmware
- * via WMA
- * Make sure that this function gets called after NAPI
- * instances have been created.
- *
- * Return: 0 - success, < 0 - failure
- */
-int hdd_lro_init(struct hdd_context *hdd_ctx)
-{
-	struct cdp_lro_hash_config lro_config;
-
-	if ((!hdd_ctx->config->lro_enable) &&
-	    (hdd_napi_enabled(HDD_NAPI_ANY) == 0)) {
-		hdd_warn("LRO and NAPI are both disabled");
-		return QDF_STATUS_E_FAILURE;
-	}
-
-	lro_config.lro_enable = 1;
-	lro_config.tcp_flag = TCPHDR_ACK;
-	lro_config.tcp_flag_mask = TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST |
-		TCPHDR_ACK | TCPHDR_URG | TCPHDR_ECE | TCPHDR_CWR;
-
-	get_random_bytes(lro_config.toeplitz_hash_ipv4,
-		 (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
-		 LRO_IPV4_SEED_ARR_SZ));
-
-	get_random_bytes(lro_config.toeplitz_hash_ipv6,
-		 (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
-		 LRO_IPV6_SEED_ARR_SZ));
-
-	hdd_debug("sending the LRO configuration to the fw");
-	if (0 != wma_lro_init(&lro_config)) {
-		hdd_err("Failed to send LRO configuration!");
-		hdd_ctx->config->lro_enable = 0;
-		return -EAGAIN;
-	}
-
-	if (hdd_ctx->config->enable_tcp_delack) {
-		hdd_ctx->en_tcp_delack_no_lro = 0;
-		hdd_reset_tcp_delack(hdd_ctx);
-	}
-	return 0;
-}
-
 static qdf_lro_ctx_t wlan_hdd_get_lro_ctx(struct sk_buff *skb)
 {
 	struct hif_opaque_softc *hif_hdl =
@@ -123,71 +59,55 @@ static qdf_lro_ctx_t wlan_hdd_get_lro_ctx(struct sk_buff *skb)
 
 /**
  * hdd_lro_rx() - LRO receive function
- * @hdd_ctx: HDD context
  * @adapter: HDD adapter
  * @skb: network buffer
  *
  * Delivers LRO eligible frames to the LRO manager
  *
- * Return: HDD_LRO_RX - frame delivered to LRO manager
- * HDD_LRO_NO_RX - frame not delivered
+ * Return: QDF_STATUS_SUCCESS - frame delivered to LRO manager
+ * QDF_STATUS_E_FAILURE - frame not delivered
  */
-enum hdd_lro_rx_status hdd_lro_rx(struct hdd_context *hdd_ctx,
-	 struct hdd_adapter *adapter, struct sk_buff *skb)
+QDF_STATUS hdd_lro_rx(struct hdd_adapter *adapter, struct sk_buff *skb)
 {
 	qdf_lro_ctx_t ctx;
-	enum hdd_lro_rx_status status = HDD_LRO_NO_RX;
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+	struct qdf_lro_info info;
+	struct net_lro_desc *lro_desc = NULL;
 
-	if (((adapter->dev->features & NETIF_F_LRO) != NETIF_F_LRO) ||
-		!QDF_NBUF_CB_RX_TCP_PROTO(skb) ||
-		QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) ||
-		qdf_atomic_read(&hdd_ctx->disable_lro_in_concurrency) ||
-		qdf_atomic_read(&hdd_ctx->disable_lro_in_low_tput))
-		return HDD_LRO_NO_RX;
+	if ((adapter->dev->features & NETIF_F_LRO) != NETIF_F_LRO)
+		return QDF_STATUS_E_NOSUPPORT;
 
-	{
-		struct qdf_lro_info info;
-		struct net_lro_desc *lro_desc = NULL;
-		struct hif_opaque_softc *hif_hdl =
-			(struct hif_opaque_softc *)cds_get_context(
-							QDF_MODULE_ID_HIF);
-		if (hif_hdl == NULL) {
-			hdd_err("hif_hdl is NULL");
-			return status;
-		}
-
-		ctx = wlan_hdd_get_lro_ctx(skb);
-		if (ctx == NULL) {
-			hdd_err("LRO mgr is NULL, vdev could be going down");
-			return status;
-		}
+	ctx = wlan_hdd_get_lro_ctx(skb);
+	if (!ctx) {
+		hdd_err("LRO mgr is NULL");
+		return status;
+	}
 
-		info.iph = skb->data;
-		info.tcph = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
-		ctx->lro_mgr->dev = adapter->dev;
-		if (qdf_lro_get_info(ctx, skb, &info, (void **)&lro_desc)) {
-			struct net_lro_info hdd_lro_info;
+	info.iph = skb->data;
+	info.tcph = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
+	ctx->lro_mgr->dev = adapter->dev;
+	if (qdf_lro_get_info(ctx, skb, &info, (void **)&lro_desc)) {
+		struct net_lro_info hdd_lro_info;
 
-			hdd_lro_info.valid_fields = LRO_VALID_FIELDS;
+		hdd_lro_info.valid_fields = LRO_VALID_FIELDS;
 
-			hdd_lro_info.lro_desc = lro_desc;
-			hdd_lro_info.lro_eligible = 1;
-			hdd_lro_info.tcp_ack_num = QDF_NBUF_CB_RX_TCP_ACK_NUM(skb);
-			hdd_lro_info.tcp_data_csum =
-				 csum_unfold(htons(QDF_NBUF_CB_RX_TCP_CHKSUM(skb)));
-			hdd_lro_info.tcp_seq_num = QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb);
-			hdd_lro_info.tcp_win = QDF_NBUF_CB_RX_TCP_WIN(skb);
+		hdd_lro_info.lro_desc = lro_desc;
+		hdd_lro_info.lro_eligible = 1;
+		hdd_lro_info.tcp_ack_num = QDF_NBUF_CB_RX_TCP_ACK_NUM(skb);
+		hdd_lro_info.tcp_data_csum =
+			 csum_unfold(htons(QDF_NBUF_CB_RX_TCP_CHKSUM(skb)));
+		hdd_lro_info.tcp_seq_num = QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb);
+		hdd_lro_info.tcp_win = QDF_NBUF_CB_RX_TCP_WIN(skb);
 
-			lro_receive_skb_ext(ctx->lro_mgr, skb,
-				 (void *)adapter, &hdd_lro_info);
+		lro_receive_skb_ext(ctx->lro_mgr, skb, (void *)adapter,
+				    &hdd_lro_info);
 
-			if (!hdd_lro_info.lro_desc->active)
-				qdf_lro_desc_free(ctx, lro_desc);
+		if (!hdd_lro_info.lro_desc->active)
+			qdf_lro_desc_free(ctx, lro_desc);
 
-			status = HDD_LRO_RX;
-		} else {
-			qdf_lro_flush_pkt(ctx, &info);
-		}
+		status = QDF_STATUS_SUCCESS;
+	} else {
+		qdf_lro_flush_pkt(ctx, &info);
 	}
 	return status;
 }
@@ -207,8 +127,8 @@ QDF_STATUS
 hdd_lro_set_reset(struct hdd_context *hdd_ctx, struct hdd_adapter *adapter,
 			       uint8_t enable_flag)
 {
-	if (!hdd_ctx->config->lro_enable ||
-		 QDF_STA_MODE != adapter->device_mode) {
+	if ((hdd_ctx->ol_enable != CFG_LRO_ENABLED) ||
+	    (adapter->device_mode != QDF_STA_MODE)) {
 		hdd_debug("LRO is already Disabled");
 		return 0;
 	}
@@ -238,49 +158,4 @@ hdd_lro_set_reset(struct hdd_context *hdd_ctx, struct hdd_adapter *adapter,
 	return 0;
 }
 
-/**
- * hdd_disable_lro_in_concurrency() - Disable LRO due to concurrency
- * @disable: bool value
- *
- * Return: none
- */
-void hdd_disable_lro_in_concurrency(bool disable)
-{
-	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
-
-	if (hdd_ctx == NULL) {
-		hdd_err("hdd_ctx is NULL");
-		return;
-	}
-
-	if (disable) {
-		if (hdd_ctx->en_tcp_delack_no_lro) {
-			struct wlan_rx_tp_data rx_tp_data;
-
-			hdd_info("Enable TCP delack as LRO disabled in concurrency");
-			rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
-			rx_tp_data.level = GET_CUR_RX_LVL(hdd_ctx);
-			wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
-						WLAN_SVC_WLAN_TP_IND,
-						&rx_tp_data,
-						sizeof(rx_tp_data));
-			hdd_ctx->en_tcp_delack_no_lro = 1;
-		}
-		qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 1);
-	} else {
-		if (hdd_ctx->en_tcp_delack_no_lro) {
-			hdd_info("Disable TCP delack as LRO is enabled");
-			hdd_ctx->en_tcp_delack_no_lro = 0;
-			hdd_reset_tcp_delack(hdd_ctx);
-		}
-		qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 0);
-	}
-}
 
-void hdd_disable_lro_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
-{
-	if (disable)
-		qdf_atomic_set(&hdd_ctx->disable_lro_in_low_tput, 1);
-	else
-		qdf_atomic_set(&hdd_ctx->disable_lro_in_low_tput, 0);
-}

+ 9 - 7
core/hdd/src/wlan_hdd_main.c

@@ -4005,7 +4005,7 @@ QDF_STATUS hdd_init_station_mode(struct hdd_adapter *adapter)
 	 * the LRO again. Keep the LRO state same as before SSR.
 	 */
 	if (!(qdf_atomic_read(&hdd_ctx->vendor_disable_lro_flag)))
-	adapter->dev->features |= NETIF_F_LRO;
+		adapter->dev->features |= NETIF_F_LRO;
 
 	/* rcpi info initialization */
 	qdf_mem_zero(&adapter->rcpi, sizeof(adapter->rcpi));
@@ -7114,9 +7114,9 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx,
 	hdd_ctx->prev_rx = rx_packets;
 
 	if (temp_rx < hdd_ctx->config->busBandwidthLowThreshold)
-		hdd_disable_lro_for_low_tput(hdd_ctx, true);
+		hdd_disable_rx_ol_for_low_tput(hdd_ctx, true);
 	else
-		hdd_disable_lro_for_low_tput(hdd_ctx, false);
+		hdd_disable_rx_ol_for_low_tput(hdd_ctx, false);
 
 	if (temp_rx > hdd_ctx->config->tcpDelackThresholdHigh) {
 		if ((hdd_ctx->cur_rx_level != WLAN_SVC_TP_HIGH) &&
@@ -9955,8 +9955,8 @@ static int hdd_features_init(struct hdd_context *hdd_ctx, struct hdd_adapter *ad
 	if (sme_set_vc_mode_config(hdd_ctx->config->vc_mode_cfg_bitmap))
 		hdd_warn("Error in setting Voltage Corner mode config to FW");
 
-	if (hdd_lro_init(hdd_ctx))
-		hdd_err("Unable to initialize LRO in fw");
+	if (hdd_rx_ol_init(hdd_ctx))
+		hdd_err("Unable to initialize Rx LRO/GRO in fw");
 
 	if (hdd_adaptive_dwelltime_init(hdd_ctx))
 		hdd_err("Unable to send adaptive dwelltime setting to FW");
@@ -10094,7 +10094,7 @@ int hdd_configure_cds(struct hdd_context *hdd_ctx, struct hdd_adapter *adapter)
 	uint32_t num_abg_tx_chains = 0;
 	uint32_t num_11b_tx_chains = 0;
 	uint32_t num_11ag_tx_chains = 0;
-	struct policy_mgr_dp_cbacks dp_cbs;
+	struct policy_mgr_dp_cbacks dp_cbs = {0};
 
 	if (hdd_ctx->config->sifs_burst_duration) {
 		set_value = (SIFS_BURST_DUR_MULTIPLIER) *
@@ -10189,7 +10189,9 @@ int hdd_configure_cds(struct hdd_context *hdd_ctx, struct hdd_adapter *adapter)
 	if (ret)
 		goto cds_disable;
 
-	dp_cbs.hdd_disable_lro_in_concurrency = hdd_disable_lro_in_concurrency;
+	if (hdd_ctx->ol_enable)
+		dp_cbs.hdd_disable_rx_ol_in_concurrency =
+				hdd_disable_rx_ol_in_concurrency;
 	dp_cbs.hdd_set_rx_mode_rps_cb = hdd_set_rx_mode_rps;
 	dp_cbs.hdd_ipa_set_mcc_mode_cb = hdd_ipa_set_mcc_mode;
 	status = policy_mgr_register_dp_cb(hdd_ctx->hdd_psoc, &dp_cbs);

+ 218 - 28
core/hdd/src/wlan_hdd_tx_rx.c

@@ -55,6 +55,8 @@
 #include "wlan_hdd_power.h"
 #include "wlan_hdd_cfg80211.h"
 #include <wlan_hdd_tsf.h>
+#include <net/tcp.h>
+#include "wma_api.h"
 
 #include "wlan_hdd_nud_tracking.h"
 
@@ -1522,6 +1524,201 @@ static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb)
 	return false;
 }
 
+#ifdef RECEIVE_OFFLOAD
+/**
+ * hdd_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO
+ * @hdd_ctx: pointer to HDD Station Context
+ *
+ * Return: None
+ */
+static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
+{
+	if (!(hdd_ctx->config->lro_enable ^
+	    hdd_ctx->config->gro_enable)) {
+		hdd_ctx->config->lro_enable && hdd_ctx->config->gro_enable ?
+		hdd_err("Can't enable both LRO and GRO, disabling Rx offload") :
+		hdd_debug("LRO and GRO both are disabled");
+		hdd_ctx->ol_enable = 0;
+	} else if (hdd_ctx->config->lro_enable) {
+		hdd_debug("Rx offload LRO is enabled");
+		hdd_ctx->ol_enable = CFG_LRO_ENABLED;
+	} else {
+		hdd_debug("Rx offload GRO is enabled");
+		hdd_ctx->ol_enable = CFG_GRO_ENABLED;
+	}
+}
+
+/**
+ * hdd_gro_rx() - Handle Rx procesing via GRO
+ * @adapter: pointer to adapter context
+ * @skb: pointer to sk_buff
+ *
+ * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
+ */
+static QDF_STATUS hdd_gro_rx(struct hdd_adapter *adapter, struct sk_buff *skb)
+{
+	struct napi_struct *napi;
+	struct qca_napi_data *napid;
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+
+	/* Only enabling it for STA mode like LRO today */
+	if (QDF_STA_MODE != adapter->device_mode)
+		return QDF_STATUS_E_NOSUPPORT;
+
+	napid = hdd_napi_get_all();
+	napi = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(skb), napid);
+	skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
+
+	if (GRO_DROP != napi_gro_receive(napi, skb))
+		status = QDF_STATUS_SUCCESS;
+
+	return status;
+}
+
+/**
+ * hdd_register_rx_ol() - Register LRO/GRO rx processing callbacks
+ *
+ * Return: none
+ */
+static void hdd_register_rx_ol(void)
+{
+	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
+
+	if  (!hdd_ctx)
+		hdd_err("HDD context is NULL");
+
+	if (hdd_ctx->ol_enable == CFG_LRO_ENABLED) {
+		/* Register the flush callback */
+		hdd_ctx->receive_offload_cb = hdd_lro_rx;
+		hdd_debug("LRO is enabled");
+	} else if (hdd_ctx->ol_enable == CFG_GRO_ENABLED) {
+		hdd_ctx->receive_offload_cb = hdd_gro_rx;
+		hdd_debug("GRO is enabled");
+	}
+}
+
+int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
+{
+	struct cdp_lro_hash_config lro_config = {0};
+
+	hdd_resolve_rx_ol_mode(hdd_ctx);
+
+	hdd_register_rx_ol();
+
+	/*
+	 * This will enable flow steering and Toeplitz hash
+	 * So enable it for LRO or GRO processing.
+	 */
+	if (hdd_napi_enabled(HDD_NAPI_ANY) == 0) {
+		hdd_warn("NAPI is disabled");
+		return 0;
+	}
+
+	lro_config.lro_enable = 1;
+	lro_config.tcp_flag = TCPHDR_ACK;
+	lro_config.tcp_flag_mask = TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST |
+		TCPHDR_ACK | TCPHDR_URG | TCPHDR_ECE | TCPHDR_CWR;
+
+	get_random_bytes(lro_config.toeplitz_hash_ipv4,
+			 (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
+			  LRO_IPV4_SEED_ARR_SZ));
+
+	get_random_bytes(lro_config.toeplitz_hash_ipv6,
+			 (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
+			  LRO_IPV6_SEED_ARR_SZ));
+
+	if (0 != wma_lro_init(&lro_config)) {
+		hdd_err("Failed to send LRO configuration!");
+		hdd_ctx->ol_enable = 0;
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+void hdd_disable_rx_ol_in_concurrency(bool disable)
+{
+	struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
+
+	if (!hdd_ctx) {
+		hdd_err("hdd_ctx is NULL");
+		return;
+	}
+
+	if (disable) {
+		if (hdd_ctx->en_tcp_delack_no_lro) {
+			struct wlan_rx_tp_data rx_tp_data;
+
+			hdd_info("Enable TCP delack as LRO disabled in concurrency");
+			rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
+			rx_tp_data.level = GET_CUR_RX_LVL(hdd_ctx);
+			wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
+						    WLAN_SVC_WLAN_TP_IND,
+						    &rx_tp_data,
+						    sizeof(rx_tp_data));
+			hdd_ctx->en_tcp_delack_no_lro = 1;
+		}
+		qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 1);
+	} else {
+		if (hdd_ctx->en_tcp_delack_no_lro) {
+			hdd_info("Disable TCP delack as LRO is enabled");
+			hdd_ctx->en_tcp_delack_no_lro = 0;
+			hdd_reset_tcp_delack(hdd_ctx);
+		}
+		qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 0);
+	}
+}
+
+void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
+{
+	if (disable)
+		qdf_atomic_set(&hdd_ctx->disable_lro_in_low_tput, 1);
+	else
+		qdf_atomic_set(&hdd_ctx->disable_lro_in_low_tput, 0);
+}
+
+/**
+ * hdd_can_handle_receive_offload() - Check for dynamic disablement
+ * @hdd_ctx: hdd context
+ * @skb: pointer to sk_buff which will be processed by Rx OL
+ *
+ * Check for dynamic disablement of Rx offload
+ *
+ * Return: false if we cannot process otherwise true
+ */
+static bool hdd_can_handle_receive_offload(struct hdd_context *hdd_ctx,
+					   struct sk_buff *skb)
+{
+	if (!QDF_NBUF_CB_RX_TCP_PROTO(skb) ||
+	    qdf_atomic_read(&hdd_ctx->disable_lro_in_concurrency) ||
+	    QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) ||
+	    qdf_atomic_read(&hdd_ctx->disable_lro_in_low_tput))
+		return false;
+	else
+		return true;
+}
+#else /* RECEIVE_OFFLOAD */
+static bool hdd_can_handle_receive_offload(struct hdd_context *hdd_ctx,
+					   struct sk_buff *skb)
+{
+	return false;
+}
+
+int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
+{
+	hdd_err("Rx_OL, LRO/GRO not supported");
+	return -EPERM;
+}
+
+void hdd_disable_rx_ol_in_concurrency(bool disable)
+{
+}
+
+void hdd_disable_rx_ol_for_low_tput(struct hdd_context *hdd_ctx, bool disable)
+{
+}
+#endif /* RECEIVE_OFFLOAD */
+
 #ifdef WLAN_FEATURE_TSF_PLUS
 static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
 					qdf_nbuf_t netbuf,
@@ -1556,7 +1753,8 @@ QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
 {
 	struct hdd_adapter *adapter = NULL;
 	struct hdd_context *hdd_ctx = NULL;
-	int rxstat;
+	int rxstat = 0;
+	QDF_STATUS rx_ol_status = QDF_STATUS_E_FAILURE;
 	struct sk_buff *skb = NULL;
 	struct sk_buff *next = NULL;
 	struct hdd_station_ctx *sta_ctx = NULL;
@@ -1712,40 +1910,21 @@ QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
 
 		hdd_tsf_timestamp_rx(hdd_ctx, skb, ktime_to_us(skb->tstamp));
 
-		if (HDD_LRO_NO_RX ==
-			 hdd_lro_rx(hdd_ctx, adapter, skb)) {
+		if (hdd_can_handle_receive_offload(hdd_ctx, skb) &&
+		    hdd_ctx->receive_offload_cb)
+			rx_ol_status = hdd_ctx->receive_offload_cb(adapter,
+								   skb);
+
+		if (rx_ol_status != QDF_STATUS_SUCCESS) {
 			if (hdd_napi_enabled(HDD_NAPI_ANY) &&
 				!hdd_ctx->enable_rxthread &&
 				!QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb))
 				rxstat = netif_receive_skb(skb);
 			else
 				rxstat = netif_rx_ni(skb);
+		}
 
-			if (NET_RX_SUCCESS == rxstat) {
-				++adapter->hdd_stats.tx_rx_stats.
-					 rx_delivered[cpu_index];
-				if (track_arp)
-					++adapter->hdd_stats.hdd_arp_stats.
-						rx_delivered;
-				/* track connectivity stats */
-				if (adapter->pkt_type_bitmap)
-					hdd_tx_rx_collect_connectivity_stats_info(
-						skb, adapter,
-						PKT_TYPE_RX_DELIVERED,
-						&pkt_type);
-			} else {
-				++adapter->hdd_stats.tx_rx_stats.
-					 rx_refused[cpu_index];
-				if (track_arp)
-					++adapter->hdd_stats.hdd_arp_stats.
-						rx_refused;
-				/* track connectivity stats */
-				if (adapter->pkt_type_bitmap)
-					hdd_tx_rx_collect_connectivity_stats_info(
-						skb, adapter,
-						PKT_TYPE_RX_REFUSED, &pkt_type);
-			}
-		} else {
+		if (!rxstat) {
 			++adapter->hdd_stats.tx_rx_stats.
 						rx_delivered[cpu_index];
 			if (track_arp)
@@ -1756,6 +1935,17 @@ QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
 				hdd_tx_rx_collect_connectivity_stats_info(
 					skb, adapter,
 					PKT_TYPE_RX_DELIVERED, &pkt_type);
+		} else {
+			++adapter->hdd_stats.tx_rx_stats.rx_refused[cpu_index];
+			if (track_arp)
+				++adapter->hdd_stats.hdd_arp_stats.rx_refused;
+
+			/* track connectivity stats */
+			if (adapter->pkt_type_bitmap)
+				hdd_tx_rx_collect_connectivity_stats_info(
+					skb, adapter,
+					PKT_TYPE_RX_REFUSED, &pkt_type);
+
 		}
 	}
 

+ 8 - 2
core/wma/inc/wma_api.h

@@ -190,9 +190,15 @@ int wma_unified_radio_tx_mem_free(void *handle);
 QDF_STATUS wma_form_unit_test_cmd_and_send(uint32_t vdev_id,
 		uint32_t module_id, uint32_t arg_count, uint32_t *arg);
 
-#if defined(FEATURE_LRO)
+/**
+ * wma_lro_init() - sends LRO configuration to FW
+ * @lro_config:         pointer to the config parameters
+ *
+ * This function ends LRO configuration to FW.
+ *
+ * Return: 0 for success or reasons for failure
+ */
 int wma_lro_init(struct cdp_lro_hash_config *lro_config);
-#endif
 
 QDF_STATUS wma_remove_beacon_filter(WMA_HANDLE wma,
 				struct beacon_filter_param *filter_params);

+ 1 - 9
core/wma/src/wma_main.c

@@ -8766,15 +8766,7 @@ QDF_STATUS wma_crash_inject(WMA_HANDLE wma_handle, uint32_t type,
 	return wmi_crash_inject(wma->wmi_handle, &param);
 }
 
-#if defined(FEATURE_LRO)
-/**
- * wma_lro_init() - sends LRO configuration to FW
- * @lro_config:         pointer to the config parameters
- *
- * This function ends LRO configuration to FW.
- *
- * Return: 0 for success or reasons for failure
- */
+#ifdef RECEIVE_OFFLOAD
 int wma_lro_init(struct cdp_lro_hash_config *lro_config)
 {
 	struct scheduler_msg msg = {0};