Răsfoiți Sursa

Fastforwarding dataipa CRT:data-kernel.lnx.2.0-221001 to data-kernel.lnx.3.0

Arnav Sharma 2 ani în urmă
părinte
comite
ff1ca3fa02

+ 5 - 0
config/dataipa_GKI_ipav4.conf

@@ -0,0 +1,5 @@
+export CONFIG_GSI=m
+export CONFIG_IPA_CLIENTS_MANAGER=m
+export CONFIG_IPA_WDI_UNIFIED_API=y
+export CONFIG_RMNET_IPA3=y
+export CONFIG_RNDIS_IPA=m

+ 10 - 0
config/dataipa_vendor_ipav4.h

@@ -0,0 +1,10 @@
+/*
+* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+*/
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#define CONFIG_GSI 1
+#define CONFIG_RMNET_IPA3 1
+#define CONFIG_RNDIS_IPA 1
+#define CONFIG_IPA_WDI_UNIFIED_API 1
+#define CONFIG_IPA_VENDOR_DLKM 1

+ 1 - 1
dataipa_dlkm_vendor_board.mk

@@ -9,7 +9,7 @@ else
 endif
 
 ifeq ($(TARGET_DATAIPA_DLKM_ENABLE), true)
-DATA_DLKM_BOARD_PLATFORMS_LIST := taro kalama
+DATA_DLKM_BOARD_PLATFORMS_LIST := taro kalama bengal
 ifneq ($(TARGET_BOARD_AUTO),true)
 ifeq ($(call is-board-platform-in-list,$(DATA_DLKM_BOARD_PLATFORMS_LIST)),true)
 BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/gsim.ko

+ 1 - 1
drivers/platform/msm/Android.mk

@@ -1,5 +1,5 @@
 ifneq ($(TARGET_BOARD_PLATFORM),qssi)
-GSI_DLKM_PLATFORMS_LIST := taro kalama
+GSI_DLKM_PLATFORMS_LIST := taro kalama bengal
 
 ifeq ($(call is-board-platform-in-list, $(GSI_DLKM_PLATFORMS_LIST)),true)
 #Make file to create GSI DLKM

+ 10 - 0
drivers/platform/msm/Kbuild

@@ -36,6 +36,16 @@ LINUXINCLUDE    += -include $(srctree)/../../vendor/qcom/opensource/dataipa/conf
 endif
 endif
 
+ifeq ($(CONFIG_ARCH_KHAJE), y)
+DATAIPADRVTOP = $(srctree)/../../vendor/qcom/opensource/dataipa/drivers/platform/msm
+LINUXINCLUDE    += -include $(srctree)/../../vendor/qcom/opensource/dataipa/config/dataipa_vendor_ipav4.h
+include $(srctree)/../../vendor/qcom/opensource/dataipa/config/dataipa_GKI_ipav4.conf
+ifeq ($(CONFIG_LOCALVERSION), "-gki-consolidate")
+include $(srctree)/../../vendor/qcom/opensource/dataipa/config/dataipa_GKI_consolidate.conf
+LINUXINCLUDE    += -include $(srctree)/../../vendor/qcom/opensource/dataipa/config/dataipa_debug.h
+endif
+endif
+
 #MDMs
 ifeq ($(CONFIG_ARCH_SDXLEMUR), y)
 LINUXINCLUDE    += -include $(srctree)/techpack/dataipa/config/dataipa.h

+ 0 - 9
drivers/platform/msm/gsi/gsi.c

@@ -52,15 +52,6 @@
 #define GSI_MSB(num) ((u32)((num & GSI_MSB_MASK) >> 32))
 #define GSI_LSB(num) ((u32)(num & GSI_LSB_MASK))
 
-#define GSI_INST_RAM_FW_VER_OFFSET			(0)
-#define GSI_INST_RAM_FW_VER_GSI_3_0_OFFSET	(64)
-#define GSI_INST_RAM_FW_VER_HW_MASK			(0xFC00)
-#define GSI_INST_RAM_FW_VER_HW_SHIFT		(10)
-#define GSI_INST_RAM_FW_VER_FLAVOR_MASK		(0x380)
-#define GSI_INST_RAM_FW_VER_FLAVOR_SHIFT	(7)
-#define GSI_INST_RAM_FW_VER_FW_MASK			(0x7f)
-#define GSI_INST_RAM_FW_VER_FW_SHIFT		(0)
-
 #define GSI_FC_NUM_WORDS_PER_CHNL_SHRAM		(20)
 #define GSI_FC_STATE_INDEX_SHRAM			(7)
 #define GSI_FC_PENDING_MASK					(0x00080000)

+ 12 - 0
drivers/platform/msm/gsi/gsi.h

@@ -36,6 +36,16 @@
 #define MAX_CHANNELS_SHARING_EVENT_RING 2
 #define MINIDUMP_MASK 0x10000
 
+#define GSI_INST_RAM_FW_VER_OFFSET                      (0)
+#define GSI_INST_RAM_FW_VER_GSI_3_0_OFFSET      (64)
+#define GSI_INST_RAM_FW_VER_GSI_5_5_OFFSET      (66)
+#define GSI_INST_RAM_FW_VER_HW_MASK                     (0xFC00)
+#define GSI_INST_RAM_FW_VER_HW_SHIFT            (10)
+#define GSI_INST_RAM_FW_VER_FLAVOR_MASK         (0x380)
+#define GSI_INST_RAM_FW_VER_FLAVOR_SHIFT        (7)
+#define GSI_INST_RAM_FW_VER_FW_MASK                     (0x7f)
+#define GSI_INST_RAM_FW_VER_FW_SHIFT            (0)
+
 #define GSI_IPC_LOGGING(buf, fmt, args...) \
 	do { \
 		if (buf) \
@@ -328,6 +338,8 @@ struct gsi_per_props {
 	void *user_data;
 	int (*clk_status_cb)(void);
 	void (*enable_clk_bug_on)(void);
+	void (*vote_clk_cb)(void);
+	void (*unvote_clk_cb)(void);
 	bool skip_ieob_mask_wa;
 	bool tx_poll;
 };

+ 24 - 5
drivers/platform/msm/gsi/gsi_dbg.c

@@ -71,6 +71,8 @@ static ssize_t gsi_dump_evt(struct file *file,
 		return -EINVAL;
 	}
 
+	gsi_ctx->per.vote_clk_cb();
+
 	val = gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_0,
 		gsi_ctx->per.ee, arg1);
 	TERR("EV%2d CTX0  0x%x\n", arg1, val);
@@ -120,6 +122,8 @@ static ssize_t gsi_dump_evt(struct file *file,
 		gsi_ctx->per.ee, arg1);
 	TERR("EV%2d SCR1  0x%x\n", arg1, val);
 
+	gsi_ctx->per.unvote_clk_cb();
+
 	if (arg2) {
 		ctx = &gsi_ctx->evtr[arg1];
 
@@ -183,7 +187,9 @@ static ssize_t gsi_dump_ch(struct file *file,
 		return -EINVAL;
 	}
 
+	gsi_ctx->per.vote_clk_cb();
 	gsi_dump_ch_info(arg1);
+	gsi_ctx->per.unvote_clk_cb();
 
 	if (arg2) {
 		ctx = &gsi_ctx->chan[arg1];
@@ -354,7 +360,6 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
 	unsigned long missing;
 	char *sptr, *token;
 
-
 	if (count >= sizeof(dbg_buff))
 		goto error;
 
@@ -432,6 +437,8 @@ static void gsi_dbg_update_ch_dp_stats(struct gsi_chan_ctx *ctx)
 	int ee = gsi_ctx->per.ee;
 	uint16_t used_hw;
 
+	gsi_ctx->per.vote_clk_cb();
+
 	rp_hw = gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_4,
 		ee, ctx->props.ch_id);
 	rp_hw |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_5,
@@ -442,6 +449,8 @@ static void gsi_dbg_update_ch_dp_stats(struct gsi_chan_ctx *ctx)
 	wp_hw |= ((uint64_t)gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_7,
 		ee, ctx->props.ch_id)) << 32;
 
+	gsi_ctx->per.unvote_clk_cb();
+
 	start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
 	end_hw = gsi_find_idx_from_addr(&ctx->ring, wp_hw);
 
@@ -590,7 +599,7 @@ static ssize_t gsi_read_gsi_hw_profiling_stats(struct file *file,
 	char __user *buf, size_t count, loff_t *ppos)
 {
 	struct gsi_hw_profiling_data stats;
-	int nbytes, cnt = 0;
+	int ret, nbytes, cnt = 0;
 	u64 totalCycles = 0, util = 0;
 
 	if (gsi_ctx->per.ver < GSI_VER_2_9) {
@@ -599,7 +608,12 @@ static ssize_t gsi_read_gsi_hw_profiling_stats(struct file *file,
 		cnt += nbytes;
 		goto done;
 	}
-	if (!gsi_get_hw_profiling_stats(&stats)) {
+
+	gsi_ctx->per.vote_clk_cb();
+	ret = gsi_get_hw_profiling_stats(&stats);
+	gsi_ctx->per.unvote_clk_cb();
+
+	if (!ret) {
 		totalCycles = stats.mcs_busy_cnt + stats.mcs_idle_cnt +
 			stats.bp_and_pending_cnt;
 		if (totalCycles != 0)
@@ -636,7 +650,7 @@ static ssize_t gsi_read_gsi_fw_version(struct file *file,
 	char __user *buf, size_t count, loff_t *ppos)
 {
 	struct gsi_fw_version ver;
-	int nbytes, cnt = 0;
+	int ret, nbytes, cnt = 0;
 
 	if (gsi_ctx->per.ver < GSI_VER_2_9) {
 		nbytes = scnprintf(dbg_buff, GSI_MAX_MSG_LEN,
@@ -644,7 +658,12 @@ static ssize_t gsi_read_gsi_fw_version(struct file *file,
 		cnt += nbytes;
 		goto done;
 	}
-	if (!gsi_get_fw_version(&ver)) {
+
+	gsi_ctx->per.vote_clk_cb();
+	ret = gsi_get_fw_version(&ver);
+	gsi_ctx->per.unvote_clk_cb();
+
+	if (!ret) {
 		nbytes = scnprintf(dbg_buff, GSI_MAX_MSG_LEN,
 			"hw=%d\nflavor=%d\nfw=%d\n",
 			ver.hw,

+ 5 - 3
drivers/platform/msm/ipa/ipa_clients/ipa_eth.c

@@ -429,6 +429,7 @@ static enum ipa_client_type
 	return ipa_client_type;
 }
 
+#if  IPA_ETH_API_VER < 2
 static struct ipa_eth_client_pipe_info
 	*ipa_eth_get_pipe_from_hdl(ipa_eth_hdl_t hdl)
 {
@@ -440,6 +441,7 @@ static struct ipa_eth_client_pipe_info
 
 	return pipe;
 }
+#endif
 
 
 static int ipa_eth_client_connect_pipe(
@@ -859,19 +861,19 @@ static int ipa_eth_client_reg_intf_internal(struct ipa_eth_intf_info *intf)
 	IPA_ETH_DBG("register interface for netdev %s\n", intf->net_dev->name);
 	/* multiple attach support */
 	if (strnstr(intf->net_dev->name, STR_ETH0_IFACE, strlen(intf->net_dev->name))) {
-		ret = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH0, &vlan_mode);
+		ret = ipa_is_vlan_mode(IPA_VLAN_IF_ETH0, &vlan_mode);
 		if (ret) {
 			IPA_ETH_ERR("Could not determine IPA VLAN mode\n");
 			return ret;
 		}
 	} else if (strnstr(intf->net_dev->name, STR_ETH1_IFACE, strlen(intf->net_dev->name))) {
-		ret = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH1, &vlan_mode);
+		ret = ipa_is_vlan_mode(IPA_VLAN_IF_ETH1, &vlan_mode);
 		if (ret) {
 			IPA_ETH_ERR("Could not determine IPA VLAN mode\n");
 			return ret;
 		}
 	} else {
-		ret = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
+		ret = ipa_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
 		if (ret) {
 			IPA_ETH_ERR("Could not determine IPA VLAN mode\n");
 			return ret;

Fișier diff suprimat deoarece este prea mare
+ 617 - 212
drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c


+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/ipa_hw_common_ex.h

@@ -445,7 +445,7 @@ enum ipa_hw_irq_srcs_e {
 /*
  * Total number of channel contexts that need to be saved for APPS
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          20
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          27
 
 /*
  * Total number of channel contexts that need to be saved for UC

+ 12 - 2
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/ipa_reg_dump.c

@@ -1075,15 +1075,25 @@ static void out_dword(
  */
 void ipa_save_gsi_ver(void)
 {
+	u32 gsi_fw_ver;
+
 	if (!ipa3_ctx->do_register_collection_on_crash)
 		return;
 
 	if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
-		ipa_reg_save.gsi.fw_ver =
+		gsi_fw_ver =
 		IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 0);
 	if (ipa3_ctx->ipa_hw_type == IPA_HW_v5_0)
-		ipa_reg_save.gsi.fw_ver =
+		gsi_fw_ver =
 		IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 64);
+
+	ipa_reg_save.gsi.fw_ver.raw_version = gsi_fw_ver;
+	ipa_reg_save.gsi.fw_ver.hw_version = (gsi_fw_ver & GSI_INST_RAM_FW_VER_HW_MASK) >>
+					GSI_INST_RAM_FW_VER_HW_SHIFT;
+	ipa_reg_save.gsi.fw_ver.flavor = (gsi_fw_ver & GSI_INST_RAM_FW_VER_FLAVOR_MASK) >>
+					GSI_INST_RAM_FW_VER_FLAVOR_SHIFT;
+	ipa_reg_save.gsi.fw_ver.fw_version = (gsi_fw_ver & GSI_INST_RAM_FW_VER_FW_MASK) >>
+					GSI_INST_RAM_FW_VER_FW_SHIFT;
 }
 
 /*

+ 56 - 3
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/ipa_reg_dump.h

@@ -470,10 +470,31 @@ struct map_src_dst_addr_s {
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 19), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[19].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 20), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[20].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 21), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[21].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 22), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[22].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 23), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[23].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 24), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[24].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 25), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[25].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 26), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[26].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 1),	\
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[0].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 3), \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 3), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0),	\
@@ -628,6 +649,30 @@ struct map_src_dst_addr_s {
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[18].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 19), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[19].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 20), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[20].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 21), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[21].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 22), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[22].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 23), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[23].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 24), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[24].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 25), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[25].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 26), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[26].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
@@ -1565,6 +1610,14 @@ struct ipa_reg_save_gsi_gen_s {
 	  ipa_gsi_top_gsi_inst_ram_n;
 };
 
+/* GSI fw version data */
+struct ipa_reg_save_gsi_fw_version_s {
+	u32 raw_version;
+	u32 hw_version;
+	u32 flavor;
+	u32 fw_version;
+};
+
 /* GSI General EE register save data struct */
 struct ipa_reg_save_gsi_gen_ee_s {
 	struct gsi_hwio_def_gsi_manager_ee_qos_n_s
@@ -1997,7 +2050,7 @@ struct ipa_regs_save_hierarchy_s {
 
 /* Top level GSI register save data struct */
 struct gsi_regs_save_hierarchy_s {
-	u32 fw_ver;
+	struct ipa_reg_save_gsi_fw_version_s	fw_ver;
 	struct ipa_reg_save_gsi_gen_s		gen;
 	struct ipa_reg_save_gsi_gen_ee_s	gen_ee[IPA_REG_SAVE_GSI_NUM_EE];
 	struct ipa_reg_save_gsi_ch_cntxt_s	ch_cntxt;

+ 2 - 2
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.5/ipa_hw_common_ex.h

@@ -444,7 +444,7 @@ enum ipa_hw_irq_srcs_e {
 /*
  * Total number of channel contexts that need to be saved for APPS
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          20
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          25
 
 /*
  * Total number of channel contexts that need to be saved for UC
@@ -459,7 +459,7 @@ enum ipa_hw_irq_srcs_e {
 /*
  * Total number of event ring contexts that need to be saved for APPS
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         27
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         25
 
 /*
  * Total number of event ring contexts that need to be saved for UC

+ 11 - 2
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.5/ipa_reg_dump.c

@@ -985,12 +985,21 @@ static void out_dword(
  */
 void ipa_save_gsi_ver(void)
 {
+	u32 gsi_fw_ver;
+
 	if (!ipa3_ctx->do_register_collection_on_crash)
 		return;
 
 	/* IPA_HW_v5_5 */
-	ipa_reg_save.gsi.fw_ver =
-	IPA_READ_1xVECTOR_REG(IPA_0_GSI_TOP_GSI_INST_RAM_n, 66);
+	gsi_fw_ver = IPA_READ_1xVECTOR_REG(IPA_0_GSI_TOP_GSI_INST_RAM_n, 66);
+
+	ipa_reg_save.gsi.fw_ver.raw_version = gsi_fw_ver;
+	ipa_reg_save.gsi.fw_ver.hw_version = (gsi_fw_ver & GSI_INST_RAM_FW_VER_HW_MASK) >>
+					GSI_INST_RAM_FW_VER_HW_SHIFT;
+	ipa_reg_save.gsi.fw_ver.flavor = (gsi_fw_ver & GSI_INST_RAM_FW_VER_FLAVOR_MASK) >>
+					GSI_INST_RAM_FW_VER_FLAVOR_SHIFT;
+	ipa_reg_save.gsi.fw_ver.fw_version = (gsi_fw_ver & GSI_INST_RAM_FW_VER_FW_MASK) >>
+					GSI_INST_RAM_FW_VER_FW_SHIFT;
 }
 
 /*

+ 44 - 3
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.5/ipa_reg_dump.h

@@ -443,10 +443,25 @@ struct map_src_dst_addr_s {
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 19), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[19].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1),	\
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 20), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[20].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 21), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[21].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 22), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[22].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 23), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[23].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 24), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[24].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 0),	\
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[0].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 3), \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_UC_EE, 3), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0),	\
@@ -601,6 +616,24 @@ struct map_src_dst_addr_s {
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[18].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 19), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[19].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 20), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[20].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 21), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[21].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 22), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[22].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 23), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[23].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 24), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[24].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name, \
 		GEN_REG_ATTR(reg_name) }, \
@@ -1465,6 +1498,14 @@ struct ipa_reg_save_dst_rsrc_cnt_s {
 	  ipa_dst_rsrc_grp_4567_rsrc_type_cnt_n;
 };
 
+/* GSI fw version data */
+struct ipa_reg_save_gsi_fw_version_s {
+	u32 raw_version;
+	u32 hw_version;
+	u32 flavor;
+	u32 fw_version;
+};
+
 /* GSI General register save data struct */
 struct ipa_reg_save_gsi_gen_s {
 	struct gsi_hwio_def_gsi_cfg_s
@@ -1867,7 +1908,7 @@ struct ipa_regs_save_hierarchy_s {
 
 /* Top level GSI register save data struct */
 struct gsi_regs_save_hierarchy_s {
-	u32 fw_ver;
+	struct ipa_reg_save_gsi_fw_version_s	fw_ver;
 	struct ipa_reg_save_gsi_gen_s		gen;
 	struct ipa_reg_save_gsi_gen_ee_s	gen_ee[IPA_REG_SAVE_GSI_NUM_EE];
 	struct ipa_reg_save_gsi_ch_cntxt_s	ch_cntxt;

+ 63 - 7
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -507,7 +507,7 @@ static int ipa_pm_notify(struct notifier_block *b, unsigned long event, void *p)
 	switch (event) {
 		case PM_POST_SUSPEND:
 #ifdef CONFIG_DEEPSLEEP
-			if (mem_sleep_current == PM_SUSPEND_MEM && ipa3_ctx->deepsleep) {
+			if (pm_suspend_via_firmware() && ipa3_ctx->deepsleep) {
 				IPADBG("Enter deepsleep resume\n");
 				ipa3_deepsleep_resume();
 				IPADBG("Exit deepsleep resume\n");
@@ -979,6 +979,7 @@ struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type cb_type)
 {
 	return &smmu_cb[cb_type];
 }
+EXPORT_SYMBOL(ipa3_get_smmu_ctx);
 
 static int ipa3_open(struct inode *inode, struct file *filp)
 {
@@ -2815,7 +2816,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	int retval = 0;
 	u32 pyld_sz;
-	u8 header[256] = { 0 };
+	u8 header[512] = { 0 };
 	u8 *param = NULL;
 	bool is_vlan_mode;
 	struct ipa_ioc_coal_evict_policy evict_pol;
@@ -6924,17 +6925,16 @@ void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
 }
 
 /**
- * ipa3_inc_client_enable_clks() - Increase active clients counter, and
+ * ipa3_inc_client_enable_clks_no_log() - Increase active clients counter, and
  * enable ipa clocks if necessary
  *
  * Return codes:
  * None
  */
-void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+static void ipa3_inc_client_enable_clks_no_log(void)
 {
 	int ret;
 
-	ipa3_active_clients_log_inc(id, false);
 	ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
 	if (ret) {
 		IPADBG_LOW("active clients = %d\n",
@@ -6960,6 +6960,19 @@ void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
 		atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
 	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
 }
+
+/**
+ * ipa3_inc_client_enable_clks() - Increase active clients counter and
+ * enable ipa clocks if necessary, log the caller
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+	ipa3_active_clients_log_inc(id, false);
+	ipa3_inc_client_enable_clks_no_log();
+}
 EXPORT_SYMBOL(ipa3_inc_client_enable_clks);
 
 static void ipa3_handle_gsi_differ_irq(void)
@@ -7064,7 +7077,23 @@ bail:
 }
 
 /**
- * ipa3_dec_client_disable_clks() - Decrease active clients counter
+ * ipa3_dec_client_disable_clks_no_log() - Decrease active clients counter
+ *
+ * In case that there are no active clients this function also starts
+ * TAG process. When TAG progress ends ipa clocks will be gated.
+ * start_tag_process_again flag is set during this function to signal TAG
+ * process to start again as there was another client that may send data to ipa
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_dec_client_disable_clks_no_log(void)
+{
+	__ipa3_dec_client_disable_clks();
+}
+
+/**
+ * ipa3_dec_client_disable_clks() - Decrease active clients counter and log caller
  *
  * In case that there are no active clients this function also starts
  * TAG process. When TAG progress ends ipa clocks will be gated.
@@ -8102,6 +8131,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	gsi_props.rel_clk_cb = NULL;
 	gsi_props.clk_status_cb = ipa3_active_clks_status;
 	gsi_props.enable_clk_bug_on = ipa3_handle_gsi_differ_irq;
+	gsi_props.vote_clk_cb = ipa3_inc_client_enable_clks_no_log;
+	gsi_props.unvote_clk_cb = ipa3_dec_client_disable_clks_no_log;
 
 	if (ipa3_ctx->ipa_config_is_mhi) {
 		gsi_props.mhi_er_id_limits_valid = true;
@@ -8397,6 +8428,11 @@ static int ipa_firmware_load(const char *sub_sys)
 	scnprintf(fw_name, ARRAY_SIZE(fw_name), "%s.mdt", sub_sys);
 	ret = of_property_read_u32_index(dev->of_node, "pas-ids", index,
 					  &pas_id);
+	if(ret) {
+		dev_err(dev, "error %d getting \"pass-ids\" property\n",
+			ret);
+		return ret;
+	}
 
 	ret = request_firmware(&fw, fw_name, dev);
 	if (ret) {
@@ -9485,6 +9521,11 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		result = -ENOMEM;
 		goto fail_gsi_map;
 	}
+	mutex_init(&ipa3_ctx->recycle_stats_collection_lock);
+	memset(&ipa3_ctx->recycle_stats, 0, sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+	memset(&ipa3_ctx->prev_coal_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
+	memset(&ipa3_ctx->prev_default_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
+	memset(&ipa3_ctx->prev_low_lat_data_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats));
 
 	ipa3_ctx->transport_power_mgmt_wq =
 		create_singlethread_workqueue("transport_power_mgmt");
@@ -9494,6 +9535,17 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		goto fail_create_transport_wq;
 	}
 
+	/* Create workqueue for recycle stats collection */
+	ipa3_ctx->collect_recycle_stats_wq =
+			create_singlethread_workqueue("page_recycle_stats_collection");
+	if (!ipa3_ctx->collect_recycle_stats_wq) {
+		IPAERR("failed to create page recycling stats collection wq\n");
+		result = -ENOMEM;
+		goto fail_create_recycle_stats_wq;
+	}
+	memset(&ipa3_ctx->recycle_stats, 0,
+		   sizeof(ipa3_ctx->recycle_stats));
+
 	mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
 
 	/* init the lookaside cache */
@@ -9828,6 +9880,8 @@ fail_hdr_cache:
 fail_rt_rule_cache:
 	kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
 fail_flt_rule_cache:
+	destroy_workqueue(ipa3_ctx->collect_recycle_stats_wq);
+fail_create_recycle_stats_wq:
 	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
 fail_create_transport_wq:
 	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
@@ -11180,6 +11234,7 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 		IPADBG("ipa q6 smem size = %u\n", ipa_smem_size);
 	}
 
+	ipa3_ctx->ipa_smem_size = ipa_smem_size;
 	if (ipa3_ctx->platform_type != IPA_PLAT_TYPE_APQ) {
 		/* map SMEM memory for IPA table accesses */
 		ret = qcom_smem_alloc(SMEM_MODEM,
@@ -11702,7 +11757,7 @@ int ipa3_ap_suspend(struct device *dev)
 	}
 
 #ifdef CONFIG_DEEPSLEEP
-	if (mem_sleep_current == PM_SUSPEND_MEM) {
+	if (pm_suspend_via_firmware()) {
 		IPADBG("Enter deepsleep suspend\n");
 		ipa3_deepsleep_suspend();
 		IPADBG("Exit deepsleep suspend\n");
@@ -11896,6 +11951,7 @@ int ipa3_iommu_map(struct iommu_domain *domain,
 
 	return iommu_map(domain, iova, paddr, size, prot);
 }
+EXPORT_SYMBOL(ipa3_iommu_map);
 
 /**
  * ipa3_get_smmu_params()- Return the ipa3 smmu related params.

+ 160 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -26,6 +26,7 @@
 #include "ipa_trace.h"
 #include "ipahal.h"
 #include "ipahal_fltrt.h"
+#include "ipa_stats.h"
 
 #define IPA_GSI_EVENT_RP_SIZE 8
 #define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
@@ -155,6 +156,156 @@ static int ipa3_rmnet_ll_rx_poll(struct napi_struct *napi_rx, int budget);
 
 struct gsi_chan_xfer_notify g_lan_rx_notify[IPA_LAN_NAPI_MAX_FRAMES];
 
+static void ipa3_collect_default_coal_recycle_stats_wq(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_collect_default_coal_recycle_stats_wq_work,
+	ipa3_collect_default_coal_recycle_stats_wq);
+
+static void ipa3_collect_low_lat_data_recycle_stats_wq(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_collect_low_lat_data_recycle_stats_wq_work,
+	ipa3_collect_low_lat_data_recycle_stats_wq);
+
+static void ipa3_collect_default_coal_recycle_stats_wq(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+	int stat_interval_index;
+	int ep_idx = -1;
+
+	/* For targets which don't require coalescing pipe */
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	if (ep_idx == -1)
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+	if (ep_idx == -1)
+		sys = NULL;
+	else
+		sys = ipa3_ctx->ep[ep_idx].sys;
+
+	mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+	stat_interval_index = ipa3_ctx->recycle_stats.default_coal_stats_index;
+	ipa3_ctx->recycle_stats.interval_time_in_ms = IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_TIME;
+
+	/* Coalescing pipe page recycling stats */
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[0].total_replenished;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[0].page_recycled;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc;
+
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative
+			- ipa3_ctx->prev_coal_recycle_stats.total_replenished;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative
+			- ipa3_ctx->prev_coal_recycle_stats.page_recycled;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative
+			- ipa3_ctx->prev_coal_recycle_stats.tmp_alloc;
+
+	ipa3_ctx->prev_coal_recycle_stats.total_replenished
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative;
+	ipa3_ctx->prev_coal_recycle_stats.page_recycled
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative;
+	ipa3_ctx->prev_coal_recycle_stats.tmp_alloc
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative;
+
+	/* Default pipe page recycling stats */
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[1].total_replenished;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[1].page_recycled;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc;
+
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative
+			- ipa3_ctx->prev_default_recycle_stats.total_replenished;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative
+			- ipa3_ctx->prev_default_recycle_stats.page_recycled;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative
+			- ipa3_ctx->prev_default_recycle_stats.tmp_alloc;
+
+	ipa3_ctx->prev_default_recycle_stats.total_replenished
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative;
+	ipa3_ctx->prev_default_recycle_stats.page_recycled
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative;
+	ipa3_ctx->prev_default_recycle_stats.tmp_alloc
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative;
+
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].valid = 1;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].valid = 1;
+
+	/* Single Indexing for coalescing and default pipe */
+	ipa3_ctx->recycle_stats.default_coal_stats_index =
+			(ipa3_ctx->recycle_stats.default_coal_stats_index + 1) % IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
+	if (sys && atomic_read(&sys->curr_polling_state))
+		queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+				&ipa3_collect_default_coal_recycle_stats_wq_work, msecs_to_jiffies(10));
+
+	mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+	return;
+
+}
+
+static void ipa3_collect_low_lat_data_recycle_stats_wq(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+	int stat_interval_index;
+	int ep_idx;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+	if (ep_idx == -1)
+		sys = NULL;
+	else
+		sys = ipa3_ctx->ep[ep_idx].sys;
+
+	mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+	stat_interval_index = ipa3_ctx->recycle_stats.low_lat_stats_index;
+
+	/* Low latency data pipe page recycling stats */
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[2].total_replenished;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[2].page_recycled;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative
+			= ipa3_ctx->stats.page_recycle_stats[2].tmp_alloc;
+
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative
+			- ipa3_ctx->prev_low_lat_data_recycle_stats.total_replenished;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative
+			- ipa3_ctx->prev_low_lat_data_recycle_stats.page_recycled;
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_diff
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative
+			- ipa3_ctx->prev_low_lat_data_recycle_stats.tmp_alloc;
+
+	ipa3_ctx->prev_low_lat_data_recycle_stats.total_replenished
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative;
+	ipa3_ctx->prev_low_lat_data_recycle_stats.page_recycled
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative;
+	ipa3_ctx->prev_low_lat_data_recycle_stats.tmp_alloc
+			= ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative;
+
+	ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].valid = 1;
+
+	/* Indexing for low lat data stats pipe */
+	ipa3_ctx->recycle_stats.low_lat_stats_index =
+			(ipa3_ctx->recycle_stats.low_lat_stats_index + 1) % IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
+	if (sys && atomic_read(&sys->curr_polling_state))
+		queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+				&ipa3_collect_low_lat_data_recycle_stats_wq_work, msecs_to_jiffies(10));
+
+	mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+	return;
+}
+
 /**
  * ipa3_write_done_common() - this function is responsible on freeing
  * all tx_pkt_wrappers related to a skb
@@ -5467,7 +5618,9 @@ static void ipa3_set_aggr_limit(struct ipa_sys_connect_params *in,
 	/* disable ipa_status */
 	sys->ep->status.status_en = false;
 
-	if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+	if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
+		(in->client == IPA_CLIENT_APPS_WAN_CONS &&
+			ipa3_ctx->ipa_hw_type <= IPA_HW_v4_2))
 		in->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
 
 	IPADBG("set aggr_limit %lu\n", (unsigned long) *aggr_byte_limit);
@@ -7001,6 +7154,9 @@ start_poll:
 	/* call repl_hdlr before napi_reschedule / napi_complete */
 	ep->sys->repl_hdlr(ep->sys);
 	wan_def_sys->repl_hdlr(wan_def_sys);
+	/* Scheduling WAN and COAL collect stats work wueue */
+	queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+		&ipa3_collect_default_coal_recycle_stats_wq_work, msecs_to_jiffies(10));
 	/* When not able to replenish enough descriptors, keep in polling
 	 * mode, wait for napi-poll and replenish again.
 	 */
@@ -7189,7 +7345,6 @@ static int ipa3_rmnet_ll_rx_poll(struct napi_struct *napi_rx, int budget)
 
 	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI_LL");
 
-
 	remain_aggr_weight = budget / ipa3_ctx->ipa_wan_aggr_pkt_cnt;
 	if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) {
 		IPAERR("NAPI weight is higher than expected\n");
@@ -7229,6 +7384,9 @@ start_poll:
 	cnt += budget - remain_aggr_weight * ipa3_ctx->ipa_wan_aggr_pkt_cnt;
 	/* call repl_hdlr before napi_reschedule / napi_complete */
 	sys->repl_hdlr(sys);
+	/* Scheduling RMNET LOW LAT DATA collect stats work queue */
+	queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq,
+		&ipa3_collect_low_lat_data_recycle_stats_wq_work, msecs_to_jiffies(10));
 	/* When not able to replenish enough descriptors, keep in polling
 	 * mode, wait for napi-poll and replenish again.
 	 */

+ 13 - 9
drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c

@@ -719,7 +719,7 @@ fail_get_gsi_ep_info:
 	return result;
 }
 
-static int ipa_eth_setup_ntn3_gsi_channel(
+static int ipa_eth_setup_ntn_gsi_channel(
 	struct ipa_eth_client_pipe_info *pipe,
 	struct ipa3_ep_context *ep)
 {
@@ -751,11 +751,8 @@ static int ipa_eth_setup_ntn3_gsi_channel(
 	gsi_evt_ring_props.int_modt = IPA_ETH_NTN_MODT;
 	/* len / RE_SIZE == len in counts (convert from bytes) */
 	len = pipe->info.transfer_ring_size;
-	/*
-	 * int_modc = 2 is experiments based best value for tput.
-	 * we shall use a framework setup in the future.
-	 */
-	gsi_evt_ring_props.int_modc = 2;
+	gsi_evt_ring_props.int_modc = len * IPA_ETH_AQC_MODC_FACTOR /
+		(100 * GSI_EVT_RING_RE_SIZE_16B);
 	gsi_evt_ring_props.exclusive = true;
 	gsi_evt_ring_props.err_cb = ipa_eth_gsi_evt_ring_err_cb;
 	gsi_evt_ring_props.user_data = NULL;
@@ -836,8 +833,15 @@ static int ipa_eth_setup_ntn3_gsi_channel(
 			(u32)((u64)(pipe->info.data_buff_list[0].iova) >> 32);
 	}
 
-	if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
-		ch_scratch.ntn.ioc_mod_threshold = IPA_ETH_NTN_MODT;
+	if (pipe->dir == IPA_ETH_PIPE_DIR_TX) {
+		if (pipe->info.client_info.ntn.ioc_mod_threshold &&
+		    pipe->info.client_info.ntn.ioc_mod_threshold < len / GSI_EVT_RING_RE_SIZE_16B) {
+			ch_scratch.ntn.ioc_mod_threshold =
+				pipe->info.client_info.ntn.ioc_mod_threshold;
+		} else {
+			ch_scratch.ntn.ioc_mod_threshold = IPA_ETH_NTN_MODT;
+		}
+	}
 
 	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
 	if (result != GSI_STATUS_SUCCESS) {
@@ -1018,7 +1022,7 @@ int ipa3_eth_connect(
 		result = ipa_eth_setup_aqc_gsi_channel(pipe, ep);
 		break;
 	case IPA_HW_PROTOCOL_NTN3:
-		result = ipa_eth_setup_ntn3_gsi_channel(pipe, ep);
+		result = ipa_eth_setup_ntn_gsi_channel(pipe, ep);
 		break;
 	default:
 		IPAERR("unknown protocol %d\n", prot);

+ 8 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -2584,6 +2584,14 @@ struct ipa3_context {
 	struct list_head minidump_list_head;
 	phys_addr_t per_stats_smem_pa;
 	void *per_stats_smem_va;
+	u32 ipa_smem_size;
+	bool is_dual_pine_config;
+	struct workqueue_struct *collect_recycle_stats_wq;
+	struct ipa_lnx_pipe_page_recycling_stats recycle_stats;
+	struct ipa3_page_recycle_stats prev_coal_recycle_stats;
+	struct ipa3_page_recycle_stats prev_default_recycle_stats;
+	struct ipa3_page_recycle_stats prev_low_lat_data_recycle_stats;
+	struct mutex recycle_stats_collection_lock;
 };
 
 struct ipa3_plat_drv_res {

+ 6 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c

@@ -718,6 +718,10 @@ static int ipa3_qmi_init_modem_send_sync_msg(void)
 	req.hw_filter_stats_info.hw_filter_stats_size = IPA_Q6_FNR_STATS_SIZE;
 	req.hw_filter_stats_info.hw_filter_stats_start_index = IPA_Q6_FNR_START_IDX;
 	req.hw_filter_stats_info.hw_filter_stats_end_index = IPA_Q6_FNR_END_IDX;
+
+	req.smem_info_valid = true;
+	req.smem_info.size = ipa3_ctx->ipa_smem_size;
+
 	IPAWANDBG("hw_flt stats: hw_filter_start_address = %u", req.hw_filter_stats_info.hw_filter_stats_start_addr);
 	IPAWANDBG("hw_flt stats: hw_filter_stats_size = %u", req.hw_filter_stats_info.hw_filter_stats_size);
 	IPAWANDBG("hw_flt stats: hw_filter_stats_start_index  = %u", req.hw_filter_stats_info.hw_filter_stats_start_index);
@@ -758,6 +762,8 @@ static int ipa3_qmi_init_modem_send_sync_msg(void)
 		req.v4_hash_filter_tbl_start_addr);
 	IPAWANDBG("v6_hash_filter_tbl_start_addr %d\n",
 		req.v6_hash_filter_tbl_start_addr);
+	IPAWANDBG("ipa_smem_info.size %d\n",
+			req.smem_info.size);
 
 	req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
 	req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;

+ 76 - 30
drivers/platform/msm/ipa/ipa_v3/ipa_stats.c

@@ -926,7 +926,7 @@ static int ipa_get_eth_inst_stats(unsigned long arg)
 						IPA_CLIENT_AQC_ETHERNET_CONS;
 #if IPA_ETH_API_VER >= 2
 				/* Get the client pipe info[0] from the allocation info context only if it is NTN3 */
-				if ((instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3)) {
+				if (instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3) {
 						tx_instance_ptr_local->tx_client =
 							ipa_lnx_agent_ctx.alloc_info.eth_inst_info[
 							i].pipes_client_type[0];
@@ -1025,7 +1025,7 @@ static int ipa_get_eth_inst_stats(unsigned long arg)
 						IPA_CLIENT_AQC_ETHERNET_PROD;
 #if IPA_ETH_API_VER >= 2
 				/* Get the client pipe info[1] from the allocation info context only if it is NTN3 */
-				if ((instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3)) {
+				if (instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3) {
 						rx_instance_ptr_local->rx_client =
 							ipa_lnx_agent_ctx.alloc_info.eth_inst_info[
 							i].pipes_client_type[1];
@@ -1472,6 +1472,42 @@ success:
 }
 #endif
 
+static int ipa_get_page_recycle_stats(unsigned long arg)
+{
+	struct ipa_lnx_pipe_page_recycling_stats *page_recycle_stats;
+	int alloc_size;
+
+	alloc_size = sizeof(struct ipa_lnx_pipe_page_recycling_stats);
+
+	page_recycle_stats = (struct ipa_lnx_pipe_page_recycling_stats *) memdup_user((
+		const void __user *)arg, alloc_size);
+	if (IS_ERR(page_recycle_stats)) {
+		IPA_STATS_ERR("copy from user failed");
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ipa3_ctx->recycle_stats_collection_lock);
+	memcpy(page_recycle_stats, &ipa3_ctx->recycle_stats,
+		sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+
+	/* Clear all the data and valid bits */
+	memset(&ipa3_ctx->recycle_stats, 0,
+		sizeof(struct ipa_lnx_pipe_page_recycling_stats));
+
+	mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock);
+
+	if(copy_to_user((void __user *)arg,
+		(u8 *)page_recycle_stats,
+		alloc_size)) {
+		IPA_STATS_ERR("copy to user failed");
+		kfree(page_recycle_stats);
+		return -EFAULT;
+	}
+
+	kfree(page_recycle_stats);
+	return 0;
+}
+
 static int ipa_stats_get_alloc_info(unsigned long arg)
 {
 	int i = 0;
@@ -1665,41 +1701,44 @@ static int ipa_stats_get_alloc_info(unsigned long arg)
 #if IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER)
 		if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
 			ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0;
-			goto success;
+		} else {
+			if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS))
+				ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS;
+			else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET))
+				ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RMNET;
+			else ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_MAX_TETH_PROT_SIZE;
+			ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 1;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_pipes = 4;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_tx_instances = 2;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_rx_instances = 2;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[0] =
+				IPA_CLIENT_MHI_PRIME_TETH_CONS;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[1] =
+				IPA_CLIENT_MHI_PRIME_TETH_PROD;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[2] =
+				IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[3] =
+				IPA_CLIENT_MHI_PRIME_RMNET_PROD;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[0]
+				= IPA_CLIENT_MHI_PRIME_TETH_CONS;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[1]
+				= IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[0]
+				= IPA_CLIENT_MHI_PRIME_TETH_PROD;
+			ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[1]
+				= IPA_CLIENT_MHI_PRIME_RMNET_PROD;
 		}
-		if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS))
-			ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS;
-		else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET))
-			ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RMNET;
-		else ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_MAX_TETH_PROT_SIZE;
-		ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 1;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_pipes = 4;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_tx_instances = 2;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_rx_instances = 2;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[0] =
-			IPA_CLIENT_MHI_PRIME_TETH_CONS;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[1] =
-			IPA_CLIENT_MHI_PRIME_TETH_PROD;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[2] =
-			IPA_CLIENT_MHI_PRIME_RMNET_CONS;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[3] =
-			IPA_CLIENT_MHI_PRIME_RMNET_PROD;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[0]
-			= IPA_CLIENT_MHI_PRIME_TETH_CONS;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[1]
-			= IPA_CLIENT_MHI_PRIME_RMNET_CONS;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[0]
-			= IPA_CLIENT_MHI_PRIME_TETH_PROD;
-		ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[1]
-			= IPA_CLIENT_MHI_PRIME_RMNET_PROD;
-
-success:
 #else
 		/* MHI Prime is not enabled */
 		ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0;
 #endif
 	}
 
+	/* For Page recycling stats for default, coal and Low lat pipes */
+	if (ipa_lnx_agent_ctx.log_type_mask & SPRHD_IPA_LOG_TYPE_RECYCLE_STATS)
+		ipa_lnx_agent_ctx.alloc_info.num_page_rec_interval =
+			IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT;
+
 	if(copy_to_user((u8 *)arg,
 		&ipa_lnx_agent_ctx,
 		sizeof(struct ipa_lnx_stats_spearhead_ctx))) {
@@ -1818,6 +1857,13 @@ static long ipa_lnx_stats_ioctl(struct file *filp,
 			}
 #endif
 		}
+		if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_RECYCLE_STATS) {
+			retval = ipa_get_page_recycle_stats((unsigned long) consolidated_stats->recycle_stats);
+			if (retval) {
+				IPA_STATS_ERR("ipa get page recycle stats fail\n");
+				break;
+			}
+		}
 		break;
 	default:
 		retval = -ENOTTY;

+ 39 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_stats.h

@@ -56,6 +56,9 @@
 
 #define SPEARHEAD_NUM_MAX_INSTANCES 2
 
+#define IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT 5
+#define IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_TIME 10 /* In milli second */
+
 /**
  * This is used to indicate which set of logs is enabled from IPA
  * These bitmapped macros are copied from
@@ -67,6 +70,7 @@
 #define SPRHD_IPA_LOG_TYPE_ETH_STATS       0x00008
 #define SPRHD_IPA_LOG_TYPE_USB_STATS       0x00010
 #define SPRHD_IPA_LOG_TYPE_MHIP_STATS      0x00020
+#define SPRHD_IPA_LOG_TYPE_RECYCLE_STATS   0x00040
 
 
 /**
@@ -340,7 +344,6 @@ struct ipa_lnx_mhip_inst_stats {
 };
 #define IPA_LNX_MHIP_INST_STATS_STRUCT_LEN_INT (8 + 248)
 
-
 struct ipa_lnx_consolidated_stats {
 	uint64_t log_type_mask;
 	struct ipa_lnx_generic_stats *generic_stats;
@@ -349,9 +352,43 @@ struct ipa_lnx_consolidated_stats {
 	struct ipa_lnx_eth_inst_stats *eth_stats;
 	struct ipa_lnx_usb_inst_stats *usb_stats;
 	struct ipa_lnx_mhip_inst_stats *mhip_stats;
+	struct ipa_lnx_pipe_page_recycling_stats *recycle_stats;
 };
 #define IPA_LNX_CONSOLIDATED_STATS_STRUCT_LEN_INT (8 + 48)
 
+enum rx_channel_type {
+	RX_WAN_COALESCING,
+	RX_WAN_DEFAULT,
+	RX_WAN_LOW_LAT_DATA,
+	RX_CHANNEL_MAX,
+};
+
+struct ipa_lnx_recycling_stats {
+	uint64_t total_cumulative;
+	uint64_t recycle_cumulative;
+	uint64_t temp_cumulative;
+	uint64_t total_diff;
+	uint64_t recycle_diff;
+	uint64_t temp_diff;
+	uint64_t valid;
+};
+
+/**
+ * The consolidated stats will be in the 0th index.
+ * Diff. between each interval values will be in
+ * indices 1 to (IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT - 1)
+ * @new_set: Indicates if this is the new set of data or previous data.
+ * @interval_time_ms: Interval time in millisecond
+ */
+struct ipa_lnx_pipe_page_recycling_stats {
+	uint32_t interval_time_in_ms;
+	uint32_t default_coal_stats_index;
+	uint32_t low_lat_stats_index;
+	uint32_t sequence_id;
+	uint64_t reserved;
+	struct ipa_lnx_recycling_stats rx_channel[RX_CHANNEL_MAX][IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT];
+};
+
 /* Explain below structures */
 struct ipa_lnx_each_inst_alloc_info {
 	uint32_t pipes_client_type[SPEARHEAD_NUM_MAX_PIPES];
@@ -372,7 +409,7 @@ struct ipa_lnx_stats_alloc_info {
 	uint32_t num_eth_instances;
 	uint32_t num_usb_instances;
 	uint32_t num_mhip_instances;
-	uint32_t reserved;
+	uint32_t num_page_rec_interval;
 	struct ipa_lnx_each_inst_alloc_info wlan_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];
 	struct ipa_lnx_each_inst_alloc_info eth_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];
 	struct ipa_lnx_each_inst_alloc_info usb_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];

+ 19 - 13
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -989,6 +989,8 @@ static const struct ipa_qmb_outstanding ipa3_qmb_outstanding
 	[IPA_4_7][IPA_QMB_INSTANCE_DDR]	        = {13, 12, 120},
 	[IPA_4_9][IPA_QMB_INSTANCE_DDR]	        = {16, 8, 120},
 	[IPA_4_11][IPA_QMB_INSTANCE_DDR] = {13, 12, 120},
+	[IPA_5_5][IPA_QMB_INSTANCE_DDR]		= {16, 12, 0},
+	[IPA_5_5][IPA_QMB_INSTANCE_PCIE]	= {16, 8, 0},
 };
 
 enum ipa_tx_instance {
@@ -4184,7 +4186,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			IPA_TX_INSTANCE_NA },
 	[IPA_5_0][IPA_CLIENT_WLAN3_PROD] = {
 			true,   IPA_v5_0_GROUP_UL,
-			false,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1 , 0, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2},
@@ -7531,15 +7533,17 @@ int ipa3_init_hw(void)
 	}
 
 	/* Configure COAL_MASTER_CFG */
-	memset(&master_cfg, 0, sizeof(master_cfg));
-	ipahal_read_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
-	master_cfg.coal_ipv4_id_ignore = ipa3_ctx->coal_ipv4_id_ignore;
-	ipahal_write_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
+	if(ipa3_ctx->ipa_hw_type >= IPA_HW_v5_5) {
+		memset(&master_cfg, 0, sizeof(master_cfg));
+		ipahal_read_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
+		master_cfg.coal_ipv4_id_ignore = ipa3_ctx->coal_ipv4_id_ignore;
+		ipahal_write_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
 
-	IPADBG(
-		": coal-ipv4-id-ignore = %s\n",
-		master_cfg.coal_ipv4_id_ignore ?
-		"True" : "False");
+		IPADBG(
+			": coal-ipv4-id-ignore = %s\n",
+			master_cfg.coal_ipv4_id_ignore ?
+			"True" : "False");
+	}
 
 	ipa_comp_cfg();
 
@@ -9358,11 +9362,13 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
 		param_in->client == IPA_CLIENT_RTK_ETHERNET_PROD) {
 		result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
 	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD ||
-			   param_in->client == IPA_CLIENT_WLAN2_PROD) {
+			   param_in->client == IPA_CLIENT_WLAN2_PROD ||
+				param_in->client == IPA_CLIENT_WLAN3_PROD) {
 		ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
-		if (param_in->client == IPA_CLIENT_WLAN2_PROD)
-			result = ipa3_write_qmapid_wdi3_gsi_pipe(
-				ipa_ep_idx, meta.qmap_id);
+		if (param_in->client == IPA_CLIENT_WLAN2_PROD ||
+			param_in->client == IPA_CLIENT_WLAN3_PROD)
+				result = ipa3_write_qmapid_wdi3_gsi_pipe(
+					ipa_ep_idx, meta.qmap_id);
 		else
 			result = ipa3_write_qmapid_wdi_pipe(
 				ipa_ep_idx, meta.qmap_id);

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c

@@ -2223,6 +2223,7 @@ static int handle3_ingress_format_v2(struct net_device *dev,
 
 			if (rc == -EFAULT) {
 				IPAWANERR("Failed to setup wan/coal cons pipes\n");
+				mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
 				return rc;
 			}
 
@@ -2300,6 +2301,7 @@ static int handle3_ingress_format_v2(struct net_device *dev,
 		sizeof(struct rmnet_ingress_param) *
 			ingress_ioctl_v2_data.number_of_eps)) {
 		IPAWANERR("Ingress copy to user failed\n");
+		mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
 		return -EFAULT;
 	}
 
@@ -2523,6 +2525,7 @@ static int handle3_egress_format_v2(struct net_device *dev,
 
 			if (rc == -EFAULT) {
 				IPAWANERR("Failed to setup wan prod pipes\n");
+				mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
 				return rc;
 			}
 

+ 76 - 30
drivers/platform/msm/ipa/test/ipa_test_ntn.c

@@ -105,11 +105,12 @@ struct rx_event_ring_ele
 }__packed;
 
 static inline void ipa_test_ntn_set_client_params(enum ipa_client_type cons_type,
-	enum ipa_client_type prod_type, int inst_id)
+	enum ipa_client_type prod_type, int inst_id, enum ipa_eth_client_type eth_client_type)
 {
 	test_ntn_ctx->cons_client_type = cons_type;
 	test_ntn_ctx->prod_client_type = prod_type;
 	test_ntn_ctx->eth_client_inst_id = inst_id;
+	test_ntn_ctx->client.client_type = eth_client_type;
 }
 
 static void ipa_test_ntn_free_dma_buff(struct ipa_mem_buffer *mem)
@@ -408,7 +409,8 @@ static int ipa_test_ntn_suite_setup(void **priv)
 		return -ENOMEM;
 	}
 
-	ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET_CONS, IPA_CLIENT_ETHERNET_PROD, 0);
+	ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET_CONS, IPA_CLIENT_ETHERNET_PROD, 0,
+			IPA_ETH_CLIENT_NTN3);
 
 	init_completion(&test_ntn_ctx->init_completion_obj);
 
@@ -546,13 +548,20 @@ static int ipa_ntn_test_setup_pipes(void)
 	struct ipa_eth_client *client;
 	int ret, i;
 #if IPA_ETH_API_VER >= 2
-	struct net_device dummy_net_dev;
+	struct net_device *dummy_net_dev;
 	unsigned char dummy_dev_addr = 1;
 
-	memset(dummy_net_dev.name, 0, sizeof(dummy_net_dev.name));
-	dummy_net_dev.dev_addr = &dummy_dev_addr;
+	dummy_net_dev = kzalloc(sizeof(*dummy_net_dev), GFP_KERNEL);
+	if (dummy_net_dev == NULL){
+		IPA_UT_ERR("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	memset(dummy_net_dev->name, 0, sizeof(dummy_net_dev->name));
+	dummy_net_dev->dev_addr = &dummy_dev_addr;
+
 
-	test_ntn_ctx->client.client_type = IPA_ETH_CLIENT_NTN3;
+	/* client_type is set in ipa_test_ntn_set_client_params */
 	test_ntn_ctx->client.inst_id = test_ntn_ctx->eth_client_inst_id;
 #else
 	test_ntn_ctx->client.client_type = IPA_ETH_CLIENT_NTN;
@@ -560,7 +569,7 @@ static int ipa_ntn_test_setup_pipes(void)
 #endif
 	test_ntn_ctx->client.traffic_type = IPA_ETH_PIPE_BEST_EFFORT;
 #if IPA_ETH_API_VER >= 2
-	test_ntn_ctx->client.net_dev = &dummy_net_dev;
+	test_ntn_ctx->client.net_dev = dummy_net_dev;
 #endif
 
 	/* RX pipe */
@@ -688,21 +697,22 @@ static int ipa_ntn_test_setup_pipes(void)
 	ret = ipa_eth_client_conn_pipes(client);
 	if(ret) {
 		IPA_UT_ERR("ipa_eth_client_conn_pipes failed ret %d\n", ret);
-		goto conn_failed;
+		ipa_ntn_test_del_client_list();
 	}
 
-	return 0;
+#if IPA_ETH_API_VER >= 2
+	kfree(dummy_net_dev);
+#endif
 
-conn_failed:
-	ipa_ntn_test_del_client_list();
 	return ret;
+
 }
 
 static int ipa_ntn_test_reg_intf(void)
 {
 	struct ipa_eth_intf_info intf;
 #if IPA_ETH_API_VER >= 2
-	struct net_device dummy_net_dev;
+	struct net_device *dummy_net_dev;
 	unsigned char dummy_dev_addr[ETH_ALEN] = { 0 };
 #else
 	char netdev_name[IPA_RESOURCE_NAME_MAX] = { 0 };
@@ -712,11 +722,17 @@ static int ipa_ntn_test_reg_intf(void)
 
 	memset(&intf, 0, sizeof(intf));
 #if IPA_ETH_API_VER >= 2
-	memset(dummy_net_dev.name, 0, sizeof(dummy_net_dev.name));
+	dummy_net_dev = kzalloc(sizeof(*dummy_net_dev), GFP_KERNEL);
+	if (dummy_net_dev == NULL){
+		IPA_UT_ERR("kzalloc failed\n");
+		return -ENOMEM;
+	}
 
-	intf.net_dev = &dummy_net_dev;
+	memset(dummy_net_dev->name, 0, sizeof(dummy_net_dev->name));
+	intf.net_dev = dummy_net_dev;
 	intf.net_dev->dev_addr = (unsigned char *)dummy_dev_addr;
 	intf.is_conn_evt = true;
+	intf.client = &test_ntn_ctx->client;
 
 	snprintf(intf.net_dev->name, sizeof(intf.net_dev->name), "ntn_test");
 	IPA_UT_INFO("netdev name: %s strlen: %lu\n", intf.net_dev->name, strlen(intf.net_dev->name));
@@ -756,6 +772,7 @@ static int ipa_ntn_test_reg_intf(void)
 	}
 
 #if IPA_ETH_API_VER >= 2
+	kfree(dummy_net_dev);
 #else
 	kfree(intf.pipe_hdl_list);
 #endif
@@ -766,17 +783,24 @@ static int ipa_ntn_test_reg_intf(void)
 static int ipa_ntn_test_unreg_intf(void)
 {
 	struct ipa_eth_intf_info intf;
+	int ret = 0;
 #if IPA_ETH_API_VER >= 2
-	struct net_device dummy_net_dev;
+	struct net_device *dummy_net_dev;
 #else
 	char netdev_name[IPA_RESOURCE_NAME_MAX] = { 0 };
 #endif
 
 	memset(&intf, 0, sizeof(intf));
 #if IPA_ETH_API_VER >= 2
-	memset(dummy_net_dev.name, 0, sizeof(dummy_net_dev.name));
+	dummy_net_dev = kzalloc(sizeof(*dummy_net_dev), GFP_KERNEL);
+	if (dummy_net_dev == NULL){
+		IPA_UT_ERR("kzalloc failed\n");
+		return -ENOMEM;
+	}
 
-	intf.net_dev = &dummy_net_dev;
+	memset(dummy_net_dev->name, 0, sizeof(dummy_net_dev->name));
+	intf.net_dev = dummy_net_dev;
+	intf.client = &test_ntn_ctx->client;
 
 	snprintf(intf.net_dev->name, sizeof(intf.net_dev->name), "ntn_test");
 	IPA_UT_INFO("netdev name: %s strlen: %lu\n", intf.net_dev->name, strlen(intf.net_dev->name));
@@ -787,7 +811,14 @@ static int ipa_ntn_test_unreg_intf(void)
 		strlen(intf.netdev_name));
 #endif
 
-	return (ipa_eth_client_unreg_intf(&intf));
+	ret = ipa_eth_client_unreg_intf(&intf);
+
+#if IPA_ETH_API_VER >= 2
+	kfree(dummy_net_dev);
+#endif
+
+	return ret;
+
 }
 
 static void ipa_ntn_test_advance_db(u32 *db, int steps,
@@ -970,7 +1001,7 @@ static int ipa_ntn_teardown_pipes(void)
 }
 static int ipa_ntn_test_prepare_test(void)
 {
-	struct ipa_ep_cfg ep_cfg = { { 0 } };
+	struct ipa_ep_cfg *ep_cfg;
 	int offset = 0;
 	int ret = 0;
 
@@ -1013,21 +1044,29 @@ static int ipa_ntn_test_prepare_test(void)
 	}
 
 	/* configure NTN RX EP in DMA mode */
-	ep_cfg.mode.mode = IPA_DMA;
-	ep_cfg.mode.dst = test_ntn_ctx->cons_client_type;
+	ep_cfg = kzalloc(sizeof(*ep_cfg), GFP_KERNEL);
+	if (ep_cfg == NULL){
+		IPA_UT_ERR("kzalloc failed\n");
+		return -ENOMEM;
+	}
 
-	ep_cfg.seq.set_dynamic = true;
+	ep_cfg->mode.mode = IPA_DMA;
+	ep_cfg->mode.dst = test_ntn_ctx->cons_client_type;
+	ep_cfg->seq.set_dynamic = true;
 
-	if (ipa3_cfg_ep(ipa_get_ep_mapping(test_ntn_ctx->prod_client_type),
-		&ep_cfg)) {
+
+	if (ipa3_cfg_ep(ipa_get_ep_mapping(test_ntn_ctx->prod_client_type), ep_cfg)) {
 		IPA_UT_ERR("fail to configure DMA mode.\n");
 		ret = -EFAULT;
 		goto unreg;
 	}
 
+	kfree(ep_cfg);
+
 	return 0;
 
 unreg:
+	kfree(ep_cfg);
 	if (ipa_ntn_test_unreg_intf()) {
 		IPA_UT_ERR("fail to unregister interface.\n");
 		ret = -EFAULT;
@@ -1418,6 +1457,7 @@ static int ipa_ntn_test_multi_transfer_burst(void *priv)
 	}
 	IPA_UT_INFO("sent the last packet succesfully!\n");
 
+
 	ipa_ntn_test_print_stats();
 
 fail:
@@ -1431,16 +1471,20 @@ fail:
 	return ret;
 }
 
-static int ipa_ntn_test_clients2_multi_transfer_burst(void *priv)
+#if IPA_ETH_API_VER >= 2
+static int ipa_ntn_test_eth1_multi_transfer_burst(void *priv)
 {
 	int ret;
 
-	ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET2_CONS, IPA_CLIENT_ETHERNET2_PROD, 1);
+	ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET2_CONS, IPA_CLIENT_ETHERNET2_PROD, 1,
+		IPA_ETH_CLIENT_NTN3);
 	ret = ipa_ntn_test_multi_transfer_burst(priv);
-	ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET_CONS, IPA_CLIENT_ETHERNET_PROD, 0);
+	ipa_test_ntn_set_client_params(IPA_CLIENT_ETHERNET_CONS, IPA_CLIENT_ETHERNET_PROD, 0,
+		IPA_ETH_CLIENT_NTN3);
 
 	return ret;
 }
+#endif
 
 /* Suite definition block */
 IPA_UT_DEFINE_SUITE_START(ntn, "NTN3 tests",
@@ -1471,10 +1515,12 @@ IPA_UT_DEFINE_SUITE_START(ntn, "NTN3 tests",
 			ipa_ntn_test_multi_transfer_burst,
 			true, IPA_HW_v5_0, IPA_HW_MAX),
 
-	IPA_UT_ADD_TEST(clients2_multi_transfer_burst,
-			"Clients pair 2 send entire ring in one shot",
-			ipa_ntn_test_clients2_multi_transfer_burst,
+#if IPA_ETH_API_VER >= 2
+	IPA_UT_ADD_TEST(eth1_multi_transfer_burst,
+			"eth1: send entire ring in one shot",
+			ipa_ntn_test_eth1_multi_transfer_burst,
 			true, IPA_HW_v5_0, IPA_HW_MAX),
+#endif
 } IPA_UT_DEFINE_SUITE_END(ntn);
 
 

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff