Browse Source

Fastforwarding dataipa CRT:data-kernel.lnx.1.2-211122 to data-kernel.lnx.2.0

Arnav Sharma 3 years ago
parent
commit
9d2143602c
35 changed files with 2106 additions and 1004 deletions
  1. 10 0
      config/sa410mdataipa.h
  2. 6 0
      config/sa410mdataipa_QGKI.conf
  3. 9 0
      drivers/platform/msm/Kbuild
  4. 367 47
      drivers/platform/msm/gsi/gsi.c
  5. 34 1
      drivers/platform/msm/gsi/gsi.h
  6. 3 0
      drivers/platform/msm/ipa/ipa_clients/ipa_eth.c
  7. 47 11
      drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
  8. 161 0
      drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/gsi_hwio_def.h
  9. 14 5
      drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/ipa_hw_common_ex.h
  10. 43 0
      drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/ipa_hwio_def.h
  11. 113 22
      drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
  12. 273 82
      drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
  13. 34 89
      drivers/platform/msm/ipa/ipa_v3/ipa.c
  14. 4 2
      drivers/platform/msm/ipa/ipa_v3/ipa_client.c
  15. 159 67
      drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
  16. 136 32
      drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
  17. 10 6
      drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
  18. 3 0
      drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
  19. 31 12
      drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
  20. 51 22
      drivers/platform/msm/ipa/ipa_v3/ipa_i.h
  21. 6 5
      drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
  22. 1 0
      drivers/platform/msm/ipa/ipa_v3/ipa_odl.h
  23. 24 1
      drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
  24. 6 3
      drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
  25. 153 155
      drivers/platform/msm/ipa/ipa_v3/ipa_stats.c
  26. 38 344
      drivers/platform/msm/ipa/ipa_v3/ipa_stats.h
  27. 2 2
      drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
  28. 112 42
      drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
  29. 8 3
      drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
  30. 97 12
      drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
  31. 14 6
      drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
  32. 2 1
      drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
  33. 130 27
      drivers/platform/msm/ipa/ipa_v3/rmnet_ll_ipa.c
  34. 1 1
      kernel-tests/network_traffic/UlsoPacket.h
  35. 4 4
      kernel-tests/network_traffic/main.cpp

+ 10 - 0
config/sa410mdataipa.h

@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+*/
+
+#define CONFIG_GSI 1
+#define CONFIG_RMNET_IPA3 1
+#define CONFIG_RNDIS_IPA 1
+#define CONFIG_ECM_IPA 1
+#define CONFIG_IPA_WDI_UNIFIED_API 1

+ 6 - 0
config/sa410mdataipa_QGKI.conf

@@ -0,0 +1,6 @@
+export CONFIG_GSI=y
+export CONFIG_IPA_CLIENTS_MANAGER=y
+export CONFIG_IPA_WDI_UNIFIED_API=y
+export CONFIG_RMNET_IPA3=y
+export CONFIG_RNDIS_IPA=y
+export CONFIG_ECM_IPA=y

+ 9 - 0
drivers/platform/msm/Kbuild

@@ -45,6 +45,15 @@ LINUXINCLUDE    += -include $(srctree)/techpack/dataipa/config/dataipa_debug.h
 endif
 endif
 
+ifeq ($(CONFIG_ARCH_SCUBA), y)
+LINUXINCLUDE    += -include $(srctree)/techpack/dataipa/config/sa410mdataipa.h
+include $(srctree)/techpack/dataipa/config/sa410mdataipa_QGKI.conf
+ifneq ($(CONFIG_LOCALVERSION), "-perf")
+include $(srctree)/techpack/dataipa/config/dataipa_debug.conf
+LINUXINCLUDE    += -include $(srctree)/techpack/dataipa/config/dataipa_debug.h
+endif
+endif
+
 ifneq (,$(filter $(CONFIG_IPA3) $(CONFIG_GSI),y m))
 LINUXINCLUDE += -I$(DATAIPADRVTOP)/gsi
 LINUXINCLUDE += -I$(DATAIPADRVTOP)/gsi/gsihal

+ 367 - 47
drivers/platform/msm/gsi/gsi.c

@@ -11,6 +11,8 @@
 #include <linux/msm_gsi.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
+#include <linux/msi.h>
+#include <linux/smp.h>
 #include "gsi.h"
 #include "gsi_emulation.h"
 #include "gsihal.h"
@@ -58,6 +60,11 @@
 #define GSI_FC_STATE_INDEX_SHRAM			(7)
 #define GSI_FC_PENDING_MASK					(0x00080000)
 
+#define GSI_NTN3_PENDING_DB_AFTER_RB_MASK 18
+#define GSI_NTN3_PENDING_DB_AFTER_RB_SHIFT 1
+/* FOR_SEQ_HIGH channel scratch: (((8 * (pipe_id * ctx_size + offset_lines)) + 4) / 4) */
+#define GSI_GSI_SHRAM_n_EP_FOR_SEQ_HIGH_N_GET(ep_id) (((8 * (ep_id * 10 + 9)) + 4) / 4)
+
 #ifndef CONFIG_DEBUG_FS
 void gsi_debugfs_init(void)
 {
@@ -1112,23 +1119,25 @@ static irqreturn_t gsi_msi_isr(int irq, void *ctxt)
 	unsigned long flags;
 	unsigned long cntr;
 	bool empty;
+	uint8_t evt;
+	unsigned long msi;
 	struct gsi_evt_ctx *evt_ctxt;
-	void __iomem *msi_clear_add;
-	void __iomem *msi_add;
 
-	evt_ctxt = (struct gsi_evt_ctx *)(ctxt);
+	/* Determine which event channel to handle */
+	for (msi = 0; msi < gsi_ctx->msi.num; msi++) {
+		if (gsi_ctx->msi.irq[msi] == irq)
+			break;
+	}
+
+	evt = gsi_ctx->msi.evt[msi];
+	evt_ctxt = &gsi_ctx->evtr[evt];
 
 	if (evt_ctxt->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
 		GSIERR("Unexpected irq intf %d\n",
 			evt_ctxt->props.intf);
 		GSI_ASSERT();
 	}
-	/* Clear IRQ by writing irq number to the MSI clear address */
-	msi_clear_add = (void __iomem *)evt_ctxt->props.msi_clear_addr;
-	iowrite32(evt_ctxt->props.intvec, msi_clear_add);
-	/* Writing zero to MSI address as well */
-	msi_add = (void __iomem *)evt_ctxt->props.msi_addr_iore_mapped;
-	iowrite32(0, msi_add);
+
 	/* Clearing IEOB irq if there are any genereated for MSI channel */
 	gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee,
 		gsihal_get_ch_reg_idx(evt_ctxt->id),
@@ -1309,9 +1318,123 @@ int gsi_unmap_base(void)
 }
 EXPORT_SYMBOL(gsi_unmap_base);
 
+static void __gsi_msi_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+	u16 msi = 0;
+
+	if (IS_ERR_OR_NULL(desc) || IS_ERR_OR_NULL(msg) || IS_ERR_OR_NULL(gsi_ctx))
+		BUG();
+
+	msi = desc->platform.msi_index;
+
+	/* MSI should be valid and unallocated */
+	if ((msi >= gsi_ctx->msi.num) || (test_bit(msi, gsi_ctx->msi.allocated)))
+		BUG();
+
+	/* Save the message for later use */
+	memcpy(&gsi_ctx->msi.msg[msi], msg, sizeof(*msg));
+
+	dev_notice(gsi_ctx->dev,
+		"saved msi %u msg data %u addr 0x%08x%08x\n", msi,
+		msg->data, msg->address_hi, msg->address_lo);
+
+	/* Single MSI control is used. So MSI address will be same. */
+	if (!gsi_ctx->msi_addr_set) {
+		gsi_ctx->msi_addr = gsi_ctx->msi.msg[msi].address_hi;
+		gsi_ctx->msi_addr = (gsi_ctx->msi_addr << 32) |
+			gsi_ctx->msi.msg[msi].address_lo;
+		gsi_ctx->msi_addr_set = true;
+	}
+
+	GSIDBG("saved msi %u msg data %u addr 0x%08x%08x, MSI:0x%lx\n", msi,
+		msg->data, msg->address_hi, msg->address_lo, gsi_ctx->msi_addr);
+}
+
+static int __gsi_request_msi_irq(unsigned long msi)
+{
+	int result = 0;
+
+	/* Ensure this is not already allocated */
+	if (test_bit((int)msi, gsi_ctx->msi.allocated)) {
+		GSIERR("MSI %lu already allocated\n", msi);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/* Request MSI IRQ
+	 * NOTE: During the call to devm_request_irq, the
+	 * __gsi_msi_write_msg callback is triggered.
+	 */
+	result = devm_request_irq(gsi_ctx->dev, gsi_ctx->msi.irq[msi],
+			(irq_handler_t)gsi_msi_isr, IRQF_TRIGGER_NONE,
+			"gsi_msi", gsi_ctx);
+
+	if (result) {
+		GSIERR("failed to register msi irq %u idx %lu\n",
+			gsi_ctx->msi.irq[msi], msi);
+		return -GSI_STATUS_ERROR;
+	}
+
+	set_bit(msi, gsi_ctx->msi.allocated);
+	return result;
+}
+
+static int __gsi_allocate_msis(void)
+{
+	int result = 0;
+	struct msi_desc *desc = NULL;
+	size_t size = 0;
+
+	/* Allocate all MSIs */
+	GSIDBG("gsi_ctx->dev = %lu, gsi_ctx->msi.num = %d", gsi_ctx->dev, gsi_ctx->msi.num);
+	result = platform_msi_domain_alloc_irqs(gsi_ctx->dev, gsi_ctx->msi.num,
+			__gsi_msi_write_msg);
+	if (result) {
+		GSIERR("error allocating platform MSIs - %d\n", result);
+		return -GSI_STATUS_ERROR;
+	}
+	GSIDBG("MSI allocating is succesful\n");
+
+	/* Loop through the allocated MSIs and save the info, then
+	 * request the IRQ.
+	 */
+	for_each_msi_entry(desc, gsi_ctx->dev) {
+		unsigned long msi = desc->platform.msi_index;
+
+		/* Ensure a valid index */
+		if (msi >= gsi_ctx->msi.num) {
+			GSIERR("error invalid MSI %lu\n", msi);
+			result = -GSI_STATUS_ERROR;
+			goto err_free_msis;
+		}
+
+		/* Save IRQ */
+		gsi_ctx->msi.irq[msi] = desc->irq;
+		GSIDBG("desc->irq =%d\n", desc->irq);
+
+		/* Request the IRQ */
+		if (__gsi_request_msi_irq(msi)) {
+			GSIERR("error requesting IRQ for MSI %lu\n",
+				msi);
+			result = -GSI_STATUS_ERROR;
+			goto err_free_msis;
+		}
+		GSIDBG("Requesting IRQ succesful\n");
+	}
+
+	return result;
+
+err_free_msis:
+	size = sizeof(unsigned long) * BITS_TO_LONGS(gsi_ctx->msi.num);
+	platform_msi_domain_free_irqs(gsi_ctx->dev);
+	memset(gsi_ctx->msi.allocated, 0, size);
+
+	return result;
+}
+
 int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 {
 	int res;
+	int result = GSI_STATUS_SUCCESS;
 	struct gsihal_reg_gsi_status gsi_status;
 	struct gsihal_reg_gsi_ee_n_cntxt_gsi_irq gen_irq;
 
@@ -1415,14 +1538,24 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
+	/* If MSIs are enabled, make sure they are set up */
+	if (gsi_ctx->msi.num) {
+		if (__gsi_allocate_msis()) {
+			GSIERR("failed to allocate MSIs\n");
+			goto err_free_irq;
+		}
+	}
+
 	/*
 	 * If base not previously mapped via gsi_map_base(), map it
 	 * now...
 	 */
 	if (!gsi_ctx->base) {
 		res = gsi_map_base(props->phys_addr, props->size, props->ver);
-		if (res)
-			return res;
+		if (res) {
+			result = res;
+			goto err_free_msis;
+		}
 	}
 
 	if (running_emulation) {
@@ -1444,7 +1577,8 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 			  "failed to remap emulator's interrupt controller HW\n");
 			gsi_unmap_base();
 			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
-			return -GSI_STATUS_RES_ALLOC_FAILURE;
+			result = -GSI_STATUS_RES_ALLOC_FAILURE;
+			goto err_iounmap;
 		}
 
 		GSIDBG(
@@ -1470,7 +1604,8 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("failed to get max channels\n");
-		return -GSI_STATUS_ERROR;
+		result = -GSI_STATUS_ERROR;
+		goto err_iounmap;
 	}
 	gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
 	if (gsi_ctx->max_ev == 0) {
@@ -1480,12 +1615,14 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("failed to get max event rings\n");
-		return -GSI_STATUS_ERROR;
+		result = -GSI_STATUS_ERROR;
+		goto err_iounmap;
 	}
 
 	if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
 		GSIERR("max event rings are beyond absolute maximum\n");
-		return -GSI_STATUS_ERROR;
+		result = -GSI_STATUS_ERROR;
+		goto err_iounmap;
 	}
 
 	if (props->mhi_er_id_limits_valid &&
@@ -1497,7 +1634,8 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("MHI event ring start id %u is beyond max %u\n",
 			props->mhi_er_id_limits[0], gsi_ctx->max_ev);
-		return -GSI_STATUS_ERROR;
+		result = -GSI_STATUS_ERROR;
+		goto err_iounmap;
 	}
 
 	gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
@@ -1566,19 +1704,34 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		res = setup_emulator_cntrlr(
 		    gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
 		if (res != 0) {
-			gsi_unmap_base();
-			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
-			gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
-			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 			GSIERR("setup_emulator_cntrlr() failed\n");
-			return res;
+			result = res;
+			goto err_iounmap;
 		}
 	}
 
 	*dev_hdl = (uintptr_t)gsi_ctx;
 	gsi_ctx->gsi_isr_cache_index = 0;
 
-	return GSI_STATUS_SUCCESS;
+	return result;
+err_iounmap:
+	gsi_unmap_base();
+	if (running_emulation && gsi_ctx->intcntrlr_base != NULL)
+		devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+	gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+
+err_free_msis:
+	if (gsi_ctx->msi.num) {
+		size_t size =
+			sizeof(unsigned long) * BITS_TO_LONGS(gsi_ctx->msi.num);
+		platform_msi_domain_free_irqs(gsi_ctx->dev);
+		memset(gsi_ctx->msi.allocated, 0, size);
+	}
+
+err_free_irq:
+	devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+
+	return result;
 }
 EXPORT_SYMBOL(gsi_register_device);
 
@@ -1678,6 +1831,9 @@ int gsi_deregister_device(unsigned long dev_hdl, bool force)
 	__gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
 	__gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
 
+	if (gsi_ctx->msi.num)
+		platform_msi_domain_free_irqs(gsi_ctx->dev);
+
 	devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
 	gsihal_destroy();
 	gsi_unmap_base();
@@ -1946,6 +2102,49 @@ static inline uint64_t gsi_read_event_ring_rp_reg(struct gsi_evt_ring_props* pro
 	return gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4, ee, id);
 }
 
+static int __gsi_pair_msi(struct gsi_evt_ctx *ctx,
+		struct gsi_evt_ring_props *props)
+{
+	int result = GSI_STATUS_SUCCESS;
+	unsigned long msi = 0;
+
+	if (IS_ERR_OR_NULL(ctx) || IS_ERR_OR_NULL(props) || IS_ERR_OR_NULL(gsi_ctx))
+		BUG();
+
+	/* Find the first unused MSI */
+	msi = find_first_zero_bit(gsi_ctx->msi.used, gsi_ctx->msi.num);
+	if (msi >= gsi_ctx->msi.num) {
+		GSIERR("No free MSIs for evt %u\n", ctx->id);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/* Ensure it's been allocated */
+	if (!test_bit((int)msi, gsi_ctx->msi.allocated)) {
+		GSIDBG("MSI %lu not allocated\n", msi);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/* Save the event ID for later lookup */
+	gsi_ctx->msi.evt[msi] = ctx->id;
+
+	/* Add this event to the IRQ mask */
+	set_bit((int)ctx->id, &gsi_ctx->msi.mask);
+
+	props->intvec = gsi_ctx->msi.msg[msi].data;
+	props->msi_addr = (uint64_t)gsi_ctx->msi.msg[msi].address_hi << 32 |
+			(uint64_t)gsi_ctx->msi.msg[msi].address_lo;
+
+	GSIDBG("props->intvec = %d, props->msi_addr = %lu\n", props->intvec, props->msi_addr);
+
+	if (props->msi_addr == 0)
+		BUG();
+
+	/* Mark MSI as used */
+	set_bit(msi, gsi_ctx->msi.used);
+
+	return result;
+}
+
 int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 		unsigned long *evt_ring_hdl)
 {
@@ -2008,25 +2207,25 @@ int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 	init_completion(&ctx->compl);
 	atomic_set(&ctx->chan_ref_cnt, 0);
 	ctx->num_of_chan_allocated = 0;
-	ctx->props = *props;
+	ctx->id = evt_id;
 
-	if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
-		ctx->props.intr == GSI_INTR_MSI) {
-		GSIERR("Registering MSI Interrupt for intvec = %d\n",
-			ctx->props.intvec);
-		res = devm_request_irq(gsi_ctx->dev, ctx->props.msi_irq,
-				gsi_msi_isr,
-				IRQF_TRIGGER_HIGH,
-				"gsi",
-				ctx);
-		if (res) {
-			GSIERR("MSI interrupt reg fails res = %d, intvec = %d\n",
-				res, ctx->props.intvec);
-			GSI_ASSERT();
+	mutex_lock(&gsi_ctx->mlock);
+	/* Pair an MSI with this event if this is an MSI and GPI event channel
+	 * NOTE: This modifies props, so must be before props are saved to ctx.
+	 */
+	if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
+		props->intr == GSI_INTR_MSI) {
+		if (__gsi_pair_msi(ctx, props)) {
+			GSIERR("evt_id=%lu failed to pair MSI\n", evt_id);
+			if (!props->evchid_valid)
+				clear_bit(evt_id, &gsi_ctx->evt_bmap);
+			mutex_unlock(&gsi_ctx->mlock);
+			return -GSI_STATUS_NODEV;
 		}
+		GSIDBG("evt_id=%lu pair MSI succesful\n", evt_id);
 	}
+	ctx->props = *props;
 
-	mutex_lock(&gsi_ctx->mlock);
 	ee = gsi_ctx->per.ee;
 	ev_ch_cmd.opcode = op;
 	ev_ch_cmd.chid = evt_id;
@@ -2144,6 +2343,7 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
 	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
 	struct gsi_evt_ctx *ctx;
 	int res = 0;
+	u32 msi;
 
 	if (!gsi_ctx) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -2169,10 +2369,20 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
+	/* Unpair the MSI */
 	if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
 		ctx->props.intr == GSI_INTR_MSI) {
 		GSIERR("Interrupt dereg for msi_irq = %d\n", ctx->props.msi_irq);
-		devm_free_irq(gsi_ctx->dev, ctx->props.msi_irq, ctx);
+
+		for (msi = 0; msi < gsi_ctx->msi.num; msi++) {
+			if (gsi_ctx->msi.msg[msi].data == ctx->props.intvec) {
+				mutex_lock(&gsi_ctx->mlock);
+				clear_bit(msi, gsi_ctx->msi.used);
+				gsi_ctx->msi.evt[msi] = 0;
+				clear_bit(evt_ring_hdl, &gsi_ctx->msi.mask);
+				mutex_unlock(&gsi_ctx->mlock);
+			}
+		}
 	}
 
 	mutex_lock(&gsi_ctx->mlock);
@@ -4263,7 +4473,6 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	if (curr == GSI_CHAN_MODE_CALLBACK &&
 			mode == GSI_CHAN_MODE_POLL) {
 		if (gsi_ctx->per.ver >= GSI_VER_3_0) {
-			/* Masking/Unmasking of intrpts is not allowed for MSI chanls */
 			if (ctx->evtr->props.intr != GSI_INTR_MSI) {
 				__gsi_config_ieob_irq_k(gsi_ctx->per.ee,
 				gsihal_get_ch_reg_idx(ctx->evtr->id),
@@ -4314,7 +4523,6 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 				atomic_set(&coal_ctx->poll_mode, mode);
 		}
 		if (gsi_ctx->per.ver >= GSI_VER_3_0) {
-			/* Masking/Unmasking of intrpts is not allowed for MSI chanls */
 			if (ctx->evtr->props.intr != GSI_INTR_MSI) {
 				__gsi_config_ieob_irq_k(gsi_ctx->per.ee,
 				gsihal_get_ch_reg_idx(ctx->evtr->id),
@@ -4895,6 +5103,8 @@ int gsi_flow_control_ee(unsigned int chan_idx, int ep_id, unsigned int ee,
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
+	GSIDBG("GSI flow control opcode=%d, ch_id=%d\n", op, chan_idx);
+
 	mutex_lock(&gsi_ctx->mlock);
 	__gsi_config_glob_irq(gsi_ctx->per.ee,
 			gsihal_get_glob_irq_en_gp_int1_mask(), ~0);
@@ -4939,6 +5149,11 @@ wait_again:
 	gsi_ctx->scratch.word0.val = gsihal_read_reg_n(GSI_EE_n_CNTXT_SCRATCH_0,
 					gsi_ctx->per.ee);
 
+	GSIDBG(
+		"Flow control command response GENERIC_CMD_RESPONSE_CODE = %u, val = %u\n",
+		gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code,
+		gsi_ctx->scratch.word0.val);
+
 	if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
 		GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
 		GSIDBG("chan_idx=%u ee=%u not in correct state\n",
@@ -4959,6 +5174,7 @@ wait_again:
 	if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
 		GSIERR("No response received\n");
 		res = -GSI_STATUS_ERROR;
+		GSI_ASSERT();
 		goto free_lock;
 	}
 
@@ -5097,6 +5313,80 @@ int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp)
 }
 EXPORT_SYMBOL(gsi_get_refetch_reg);
 
+/*
+ * ; +------------------------------------------------------+
+ * ; | NTN3 Rx Channel Scratch                              |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 32-bit word | Field                          | Bits  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 4           | NTN_PENDING_DB_AFTER_ROLLBACK  | 18-18 |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 5           | NTN_MSI_DB_INDEX_VALUE         | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 6           | NTN_RX_CHAIN_COUNTER           | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 7           | NTN_RX_ERR_COUNTER             | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 8           | NTN_ACCUMULATED_TRES_HANDLED   | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 9           | NTN_ROLLBACKS_COUNTER          | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | FOR_SEQ_HIGH| NTN_MSI_DB_COUNT               | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ *
+ * ; +------------------------------------------------------+
+ * ; | NTN3 Tx Channel Scratch                              |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 32-bit word | Field                          | Bits  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 4           | NTN_PENDING_DB_AFTER_ROLLBACK  | 18-18 |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 5           | NTN_MSI_DB_INDEX_VALUE         | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 6           | TX_DERR_COUNTER                | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 7           | NTN_TX_OOB_COUNTER             | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 8           | NTN_ACCUMULATED_TRES_HANDLED   | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | 9           | NTN_ROLLBACKS_COUNTER          | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ * ; | FOR_SEQ_HIGH| NTN_MSI_DB_COUNT               | 0-31  |
+ * ; +-------------+--------------------------------+-------+
+ */
+int gsi_ntn3_client_stats_get(unsigned ep_id, int scratch_id, unsigned chan_hdl)
+{
+	switch (scratch_id) {
+	case -1:
+		return gsihal_read_reg_n(GSI_GSI_SHRAM_n, GSI_GSI_SHRAM_n_EP_FOR_SEQ_HIGH_N_GET(ep_id));
+	case 4:
+		return (gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_4, gsi_ctx->per.ee,
+			chan_hdl) >> GSI_NTN3_PENDING_DB_AFTER_RB_MASK) &
+			GSI_NTN3_PENDING_DB_AFTER_RB_SHIFT;
+		break;
+	case 5:
+		return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_5, gsi_ctx->per.ee, chan_hdl);
+		break;
+	case 6:
+		return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_6, gsi_ctx->per.ee, chan_hdl);
+		break;
+	case 7:
+		return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_7, gsi_ctx->per.ee, chan_hdl);
+		break;
+	case 8:
+		return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_8, gsi_ctx->per.ee, chan_hdl);
+		break;
+	case 9:
+		return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_SCRATCH_9, gsi_ctx->per.ee, chan_hdl);
+		break;
+	default:
+		GSIERR("invalid scratch id %d\n", scratch_id);
+		return 0;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(gsi_ntn3_client_stats_get);
+
 int gsi_get_drop_stats(unsigned long ep_id, int scratch_id,
 	unsigned long chan_hdl)
 {
@@ -5256,6 +5546,24 @@ int gsi_query_msi_addr(unsigned long chan_hdl, phys_addr_t *addr)
 }
 EXPORT_SYMBOL(gsi_query_msi_addr);
 
+int gsi_query_device_msi_addr(u64 *addr)
+{
+    if (!gsi_ctx) {
+            pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+            return -GSI_STATUS_NODEV;
+    }
+
+	if (gsi_ctx->msi_addr_set)
+		*addr = gsi_ctx->msi_addr;
+	else
+		*addr = 0;
+
+	GSIDBG("Device MSI Addr: 0x%lx", *addr);
+    return 0;
+}
+EXPORT_SYMBOL(gsi_query_device_msi_addr);
+
+
 uint64_t gsi_read_event_ring_wp(int evtr_id, int ee)
 {
 	uint64_t wp;
@@ -5382,6 +5690,13 @@ uint32_t gsi_get_evt_ring_len(int evt_hdl)
 }
 EXPORT_SYMBOL(gsi_get_evt_ring_len);
 
+void gsi_update_almst_empty_thrshold(unsigned long chan_hdl, unsigned short threshold)
+{
+	gsihal_write_reg_nk(GSI_EE_n_CH_k_CH_ALMST_EMPTY_THRSHOLD,
+		gsi_ctx->per.ee, chan_hdl, threshold);
+}
+EXPORT_SYMBOL(gsi_update_almst_empty_thrshold);
+
 static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch(
 	unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr)
 {
@@ -5488,16 +5803,10 @@ int gsi_get_fw_version(struct gsi_fw_version *ver)
 	return 0;
 }
 
-void gsi_update_almst_empty_thrshold(unsigned long chan_hdl, unsigned short threshold)
-{
-	gsihal_write_reg_nk(GSI_EE_n_CH_k_CH_ALMST_EMPTY_THRSHOLD,
-		gsi_ctx->per.ee, chan_hdl, threshold);
-}
-EXPORT_SYMBOL(gsi_update_almst_empty_thrshold);
-
 static int msm_gsi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
+	int result;
 
 	pr_debug("gsi_probe\n");
 	gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
@@ -5511,6 +5820,17 @@ static int msm_gsi_probe(struct platform_device *pdev)
 	if (gsi_ctx->ipc_logbuf == NULL)
 		GSIERR("failed to create IPC log, continue...\n");
 
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,num-msi",
+			&gsi_ctx->msi.num);
+	if (result)
+		GSIERR("No MSIs configured\n");
+	else {
+		if (gsi_ctx->msi.num > GSI_MAX_NUM_MSI) {
+			GSIERR("Num MSIs %u larger than max %u, normalizing\n");
+			gsi_ctx->msi.num = GSI_MAX_NUM_MSI;
+		} else GSIDBG("Num MSIs=%u\n", gsi_ctx->msi.num);
+	}
+
 	gsi_ctx->dev = dev;
 	init_completion(&gsi_ctx->gen_ee_cmd_compl);
 	gsi_debugfs_init();

+ 34 - 1
drivers/platform/msm/gsi/gsi.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
 #ifndef GSI_H
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/ipc_logging.h>
 #include <linux/iommu.h>
+#include <linux/msi.h>
 
 /*
  * The following for adding code (ie. for EMULATION) not found on x86.
@@ -74,6 +75,7 @@
 	} while (0)
 
 #define GSI_IPC_LOG_PAGES 50
+#define GSI_MAX_NUM_MSI 2
 
 enum gsi_ver {
 	GSI_VER_ERR = 0,
@@ -1430,6 +1432,16 @@ struct gsi_log_ts {
 	u32 interrupt_type;
 };
 
+struct gsi_msi {
+	u32 num;
+	DECLARE_BITMAP(allocated, GSI_MAX_NUM_MSI);
+	DECLARE_BITMAP(used, GSI_MAX_NUM_MSI);
+	struct msi_msg msg[GSI_MAX_NUM_MSI];
+	u32 irq[GSI_MAX_NUM_MSI];
+	u32 evt[GSI_MAX_NUM_MSI];
+	unsigned long mask;
+};
+
 struct gsi_ctx {
 	void __iomem *base;
 	struct device *dev;
@@ -1454,6 +1466,9 @@ struct gsi_ctx {
 	void *ipc_logbuf;
 	void *ipc_logbuf_low;
 	struct gsi_coal_chan_info coal_info;
+	bool msi_addr_set;
+	uint64_t msi_addr;
+	struct gsi_msi msi;
 	/*
 	 * The following used only on emulation systems.
 	 */
@@ -2263,6 +2278,15 @@ void gsi_wdi3_write_evt_ring_db(unsigned long chan_hdl, uint32_t db_addr_low,
  */
 int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp);
 
+/**
+ * gsi_ntn3_client_stats_get - get ntn3 stats
+ *
+ * @ep_id: ep index
+ * @scratch_id: scratch register number
+ * @chan_hdl: gsi channel handle
+ */
+int gsi_ntn3_client_stats_get(unsigned ep_id, int scratch_id, unsigned chan_hdl);
+
 /**
  * gsi_get_drop_stats - get drop stats by GSI
  *
@@ -2357,6 +2381,15 @@ int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
 */
 int gsi_query_msi_addr(unsigned long chan_hdl, phys_addr_t *addr);
 
+/**
+* gsi_query_device_msi_addr - get gsi device msi address
+*
+* @addr: [out] msi address
+*
+* @Return gsi_status
+*/
+int gsi_query_device_msi_addr(u64 *addr);
+
 /**
 * gsi_update_almst_empty_thrshold - update almst_empty_thrshold
 *

+ 3 - 0
drivers/platform/msm/ipa/ipa_clients/ipa_eth.c

@@ -1223,6 +1223,9 @@ enum ipa_client_type ipa_eth_get_ipa_client_type_from_eth_type_internal(
 		break;
 	case IPA_ETH_CLIENT_NTN:
 	case IPA_ETH_CLIENT_EMAC:
+#if IPA_ETH_API_VER >= 2
+	case IPA_ETH_CLIENT_NTN3:
+#endif
 			if (dir == IPA_ETH_PIPE_DIR_TX) {
 				ipa_client_type =
 					IPA_CLIENT_ETHERNET_CONS;

+ 47 - 11
drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ipa_uc_offload.h>
@@ -121,7 +121,11 @@ static int ipa_uc_offload_ntn_register_pm_client(
 	struct ipa_pm_register_params params;
 
 	memset(&params, 0, sizeof(params));
-	params.name = "ETH";
+
+	if (ntn_ctx->proto == IPA_UC_NTN_V2X)
+		params.name = "ETH_v2x";
+	else
+		params.name = "ETH";
 	params.callback = ipa_uc_offload_ntn_pm_cb;
 	params.user_data = ntn_ctx;
 	params.group = IPA_PM_GROUP_DEFAULT;
@@ -130,11 +134,15 @@ static int ipa_uc_offload_ntn_register_pm_client(
 		IPA_UC_OFFLOAD_ERR("fail to register with PM %d\n", res);
 		return res;
 	}
-
-	res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl,
-		IPA_CLIENT_ETHERNET_CONS);
+	if (ntn_ctx->proto == IPA_UC_NTN_V2X)
+		res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl,
+			IPA_CLIENT_ETHERNET2_CONS);
+	else
+		res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl,
+			IPA_CLIENT_ETHERNET_CONS);
 	if (res) {
-		IPA_UC_OFFLOAD_ERR("fail to associate cons with PM %d\n", res);
+		IPA_UC_OFFLOAD_ERR("fail to associate. PM (%d) Prot: %d\n",
+			res, ntn_ctx->proto);
 		ipa_pm_deregister(ntn_ctx->pm_hdl);
 		ntn_ctx->pm_hdl = ~0;
 		return res;
@@ -322,12 +330,28 @@ static int ipa_uc_offload_reg_intf_internal(
 		return -EINVAL;
 	}
 
+	/* only register IPA properties for uc_ntn */
 	if (ctx->proto == IPA_UC_NTN) {
 		ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
 		if (!ret)
 			outp->clnt_hndl = IPA_UC_NTN;
 	}
 
+	/* only register IPA-pm for uc_ntn_v2x */
+	if (ctx->proto == IPA_UC_NTN_V2X) {
+		/* always in vlan mode */
+		IPA_UC_OFFLOAD_INFO("v2x hdr_len %d\n",
+			inp->hdr_info[0].hdr_len);
+		ctx->hdr_len = inp->hdr_info[0].hdr_len;
+		ret = ipa_uc_offload_ntn_register_pm_client(ctx);
+		if (!ret)
+			outp->clnt_hndl = IPA_UC_NTN_V2X;
+		else
+			IPA_UC_OFFLOAD_ERR("fail to create pm resource\n");
+		/* set to initialized state */
+		ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+	}
+
 	return ret;
 }
 
@@ -461,7 +485,7 @@ static int ipa_uc_offload_conn_pipes_internal(struct ipa_uc_offload_conn_in_para
 
 	offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
 	if (!offload_ctx) {
-		IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
+		IPA_UC_OFFLOAD_ERR("Invalid ctx %d\n", inp->clnt_hndl);
 		return -EINVAL;
 	}
 
@@ -471,6 +495,7 @@ static int ipa_uc_offload_conn_pipes_internal(struct ipa_uc_offload_conn_in_para
 	}
 
 	switch (offload_ctx->proto) {
+	case IPA_UC_NTN_V2X:
 	case IPA_UC_NTN:
 		ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
 						offload_ctx);
@@ -503,8 +528,13 @@ static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
 		return -EFAULT;
 	}
 
-	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
-	ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
+	if (ntn_ctx->proto == IPA_UC_NTN_V2X) {
+		ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD);
+		ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET2_CONS);
+	} else {
+		ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
+		ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
+	}
 	ret = ipa3_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl,
 		&ntn_ctx->conn);
 	if (ret) {
@@ -543,6 +573,7 @@ static int ipa_uc_offload_disconn_pipes_internal(u32 clnt_hdl)
 	}
 
 	switch (offload_ctx->proto) {
+	case IPA_UC_NTN_V2X:
 	case IPA_UC_NTN:
 		ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
 		break;
@@ -617,6 +648,11 @@ static int ipa_uc_offload_cleanup_internal(u32 clnt_hdl)
 		ret = ipa_uc_ntn_cleanup(offload_ctx);
 		break;
 
+	case IPA_UC_NTN_V2X:
+		/* only clean-up pm_handle */
+		ipa_uc_offload_ntn_deregister_pm_client(offload_ctx);
+		break;
+
 	default:
 		IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
 		ret = -EINVAL;
@@ -650,7 +686,7 @@ int ipa_uc_offload_reg_rdyCB_internal(struct ipa_uc_ready_params *inp)
 		return -EINVAL;
 	}
 
-	if (inp->proto == IPA_UC_NTN)
+	if (inp->proto == IPA_UC_NTN || inp->proto == IPA_UC_NTN_V2X)
 		ret = ipa3_ntn_uc_reg_rdyCB(inp->notify, inp->priv);
 
 	if (ret == -EEXIST) {
@@ -664,7 +700,7 @@ int ipa_uc_offload_reg_rdyCB_internal(struct ipa_uc_ready_params *inp)
 
 void ipa_uc_offload_dereg_rdyCB_internal(enum ipa_uc_offload_proto proto)
 {
-	if (proto == IPA_UC_NTN)
+	if (proto == IPA_UC_NTN || proto == IPA_UC_NTN_V2X)
 		ipa3_ntn_uc_dereg_rdyCB();
 }
 

+ 161 - 0
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/gsi_hwio_def.h

@@ -4987,5 +4987,166 @@ union gsi_hwio_def_ipa_0_gsi_top_xpu3_rgn_wacr_u
   u32 value;
 };
 
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_MCS_PROFILING_BP_CNT_LSB
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_cnt_lsb_s
+{
+  u32 bp_cnt_lsb : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_cnt_lsb_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_cnt_lsb_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_MCS_PROFILING_BP_CNT_MSB
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_cnt_msb_s
+{
+  u32 bp_cnt_msb : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_cnt_msb_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_cnt_msb_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_MCS_PROFILING_BP_AND_PENDING_CNT_LSB
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_lsb_s
+{
+  u32 bp_and_pending_cnt_lsb : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_lsb_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_lsb_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_MCS_PROFILING_BP_AND_PENDING_CNT_MSB
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_msb_s
+{
+  u32 bp_and_pending_cnt_msb : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_msb_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_msb_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_MCS_PROFILING_MCS_BUSY_CNT_LSB
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_busy_cnt_lsb_s
+{
+  u32 mcs_busy_cnt_lsb : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_busy_cnt_lsb_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_busy_cnt_lsb_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_MCS_PROFILING_MCS_BUSY_CNT_MSB
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_busy_cnt_msb_s
+{
+  u32 mcs_busy_cnt_msb : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_busy_cnt_msb_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_busy_cnt_msb_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_MCS_PROFILING_MCS_IDLE_CNT_LSB
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_idle_cnt_lsb_s
+{
+  u32 mcs_idle_cnt_lsb : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_idle_cnt_lsb_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_idle_cnt_lsb_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_MCS_PROFILING_MCS_IDLE_CNT_MSB
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_idle_cnt_msb_s
+{
+  u32 mcs_idle_cnt_msb : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_idle_cnt_msb_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_idle_cnt_msb_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_GSI_TOP_GSI_DEBUG_SW_MSK_REG_n_SEC_k_RD
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct gsi_hwio_def_ipa_0_gsi_top_gsi_debug_sw_msk_reg_n_sec_k_rd_s
+{
+  u32 msk_reg : 32;
+};
+
+/* Union definition of register */
+union gsi_hwio_def_ipa_0_gsi_top_gsi_debug_sw_msk_reg_n_sec_k_rd_u
+{
+  struct gsi_hwio_def_ipa_0_gsi_top_gsi_debug_sw_msk_reg_n_sec_k_rd_s def;
+  u32 value;
+};
 
 #endif /* __GSI_HWIO_DEF_H__ */

+ 14 - 5
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/ipa_hw_common_ex.h

@@ -467,6 +467,11 @@ enum ipa_hw_irq_srcs_e {
  */
 #define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC         2
 
+/*
+ * Total number of event ring contexts that need to be saved for Q6
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_Q6         11
+
 /*
  * Total number of endpoints for which ipa_reg_save.pipes[endp_number]
  * are not saved by default (only if ipa_cfg.gen.full_reg_trace =
@@ -483,7 +488,11 @@ enum ipa_hw_irq_srcs_e {
 /*
  * SHRAM Bytes per ch
  */
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM         20
+#else
 #define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM         12
+#endif
 
 /*
  * Total number of rx splt cmdq's see:
@@ -497,19 +506,19 @@ enum ipa_hw_irq_srcs_e {
  * have to be a multiple of four bytes, because the IPA memory reads
  * that they drive are always 32 bits...
  */
-#define IPA_IU_ADDR   0x000A0000
+#define IPA_IU_ADDR   0x001A0000
 #define IPA_IU_SIZE   round_up(40704, sizeof(u32))
 
-#define IPA_SRAM_ADDR 0x00050000
+#define IPA_SRAM_ADDR 0x00150000
 #define IPA_SRAM_SIZE round_up(19232, sizeof(u32))
 
-#define IPA_MBOX_ADDR 0x000C2000
+#define IPA_MBOX_ADDR 0x001C2000
 #define IPA_MBOX_SIZE round_up(256, sizeof(u32))
 
-#define IPA_HRAM_ADDR 0x00060000
+#define IPA_HRAM_ADDR 0x00160000
 #define IPA_HRAM_SIZE round_up(47536, sizeof(u32))
 
-#define IPA_SEQ_ADDR  0x00081000
+#define IPA_SEQ_ADDR  0x00181000
 #define IPA_SEQ_SIZE  round_up(768, sizeof(u32))
 
 #define IPA_GSI_ADDR  0x00006000

+ 43 - 0
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/ipa_hwio_def.h

@@ -19044,5 +19044,48 @@ union ipa_hwio_def_ipa_ms_mpu_cfg_xpu3_rgn_end0_u
   u32 value;
 };
 
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of register: IPA_0_IPA_RSRC_GRP_CFG_EXT
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct ipa_hwio_def_ipa_0_ipa_rsrc_grp_cfg_ext_s
+{
+  u32 src_grp_2nd_priority_special_valid : 1;
+  u32 reserved0 : 3;
+  u32 src_grp_2nd_priority_special_index : 3;
+  u32 reserved1 : 25;
+};
+
+/* Union definition of register */
+union ipa_hwio_def_ipa_0_ipa_rsrc_grp_cfg_ext_u
+{
+  struct ipa_hwio_def_ipa_0_ipa_rsrc_grp_cfg_ext_s def;
+  u32 value;
+};
+
+/*===========================================================================*/
+/*!
+  @brief Bit Field definition of fc_stats
+*/
+/*===========================================================================*/
+/* Structure definition of register */
+struct ipa_hwio_def_fc_stats_state_s
+{
+  u32 reserved0 : 16;
+  u32 flow_control : 1;
+  u32 flow_control_primary : 1;
+  u32 flow_control_secondary : 1;
+  u32 pending_flow_control : 1;
+  u32 reserved1 : 12;
+};
+
+/* Union definition of register */
+union ipa_hwio_def_fc_stats_state_u
+{
+  struct ipa_hwio_def_fc_stats_state_s def;
+  u32 value;
+};
 
 #endif /* __IPA_HWIO_DEF_H__ */

+ 113 - 22
drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c

@@ -325,6 +325,11 @@ static struct map_src_dst_addr_s ipa_regs_to_save_array[] = {
 	GEN_SRC_DST_ADDR_MAP(IPA_FILT_ROUT_CFG,
 			     ipa.gen,
 			     ipa_filt_rout_cfg),
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+	GEN_SRC_DST_ADDR_MAP(IPA_RSRC_GRP_CFG_EXT,
+			     ipa.gen,
+			     ipa_rsrc_grp_cfg_ext),
+#endif
 #endif
 
 	/* Debug Registers */
@@ -644,7 +649,32 @@ static struct map_src_dst_addr_s ipa_regs_to_save_array[] = {
 	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID,
 			     gsi.debug,
 			     ipa_gsi_top_gsi_debug_qsb_log_err_trns_id),
-
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+	GEN_SRC_DST_ADDR_MAP(GSI_MCS_PROFILING_BP_CNT_LSB,
+			     gsi.debug.gsi_mcs_prof_regs,
+			     gsi_top_gsi_mcs_profiling_bp_cnt_lsb),
+	GEN_SRC_DST_ADDR_MAP(GSI_MCS_PROFILING_BP_CNT_MSB,
+			     gsi.debug.gsi_mcs_prof_regs,
+			     gsi_top_gsi_mcs_profiling_bp_cnt_msb),
+	GEN_SRC_DST_ADDR_MAP(GSI_MCS_PROFILING_BP_AND_PENDING_CNT_LSB,
+			     gsi.debug.gsi_mcs_prof_regs,
+			     gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_lsb),
+	GEN_SRC_DST_ADDR_MAP(GSI_MCS_PROFILING_BP_AND_PENDING_CNT_MSB,
+			     gsi.debug.gsi_mcs_prof_regs,
+			     gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_msb),
+	GEN_SRC_DST_ADDR_MAP(GSI_MCS_PROFILING_MCS_BUSY_CNT_LSB,
+			     gsi.debug.gsi_mcs_prof_regs,
+			     gsi_top_gsi_mcs_profiling_mcs_busy_cnt_lsb),
+	GEN_SRC_DST_ADDR_MAP(GSI_MCS_PROFILING_MCS_BUSY_CNT_MSB,
+			     gsi.debug.gsi_mcs_prof_regs,
+			     gsi_top_gsi_mcs_profiling_mcs_busy_cnt_msb),
+	GEN_SRC_DST_ADDR_MAP(GSI_MCS_PROFILING_MCS_IDLE_CNT_LSB,
+			     gsi.debug.gsi_mcs_prof_regs,
+			     gsi_top_gsi_mcs_profiling_mcs_idle_cnt_lsb),
+	GEN_SRC_DST_ADDR_MAP(GSI_MCS_PROFILING_MCS_IDLE_CNT_MSB,
+			     gsi.debug.gsi_mcs_prof_regs,
+			     gsi_top_gsi_mcs_profiling_mcs_idle_cnt_msb),
+#endif
 	IPA_REG_SAVE_CFG_ENTRY_GSI_QSB_DEBUG(
 		GSI_DEBUG_QSB_LOG_LAST_MISC_IDn, qsb_log_last_misc),
 
@@ -811,6 +841,20 @@ static struct map_src_dst_addr_s ipa_regs_to_save_array[] = {
 					    ee_n_gsi_ch_k_scratch_2),
 	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_3,
 					    ee_n_gsi_ch_k_scratch_3),
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_4,
+					    ee_n_gsi_ch_k_scratch_4),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_5,
+					    ee_n_gsi_ch_k_scratch_5),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_6,
+					    ee_n_gsi_ch_k_scratch_6),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_7,
+					    ee_n_gsi_ch_k_scratch_7),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_8,
+					    ee_n_gsi_ch_k_scratch_8),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_9,
+					    ee_n_gsi_ch_k_scratch_9),
+#endif
 	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(GSI_MAP_EE_n_CH_k_VP_TABLE,
 					    gsi_map_ee_n_ch_k_vp_table),
 
@@ -850,6 +894,12 @@ static struct map_src_dst_addr_s ipa_regs_to_save_array[] = {
 	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(GSI_DEBUG_EE_n_EV_k_VP_TABLE,
 					     gsi_debug_ee_n_ev_k_vp_table),
 
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+/* GSI Debug SW MSK Registers */
+	IPA_REG_SAVE_GSI_DEBUG_MSK_REG_ENTRY(GSI_DEBUG_SW_MSK_REG_n_SEC_k_RD,
+			                    regs),
+#endif
+
 #if defined(CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS) && \
 	CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS > 0
 	/* Endp Registers for remaining pipes */
@@ -1047,6 +1097,8 @@ void ipa_save_gsi_ver(void)
 void ipa_save_registers(void)
 {
 	u32 i = 0;
+	u32 phys_ch_idx = 0;
+	u32 n = 0;
 	/* Fetch the number of registers configured to be saved */
 	u32 num_regs = ARRAY_SIZE(ipa_regs_to_save_array);
 	u32 num_uc_per_regs = ARRAY_SIZE(ipa_uc_regs_to_save_array);
@@ -1156,48 +1208,90 @@ void ipa_save_registers(void)
 			(u16)IPA_READ_1xVECTOR_REG(GSI_DEBUG_COUNTERn, i);
 
 	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7; i++) {
-		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.a7[
+		phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.a7[
 			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
-		u32 n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+		n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
 
 		if (!ipa_reg_save.gsi.ch_cntxt.a7[
 				i].gsi_map_ee_n_ch_k_vp_table.valid)
 			continue;
 
 		ipa_reg_save.gsi.ch_cntxt.a7[
-			i].mcs_channel_scratch.scratch4.shram =
+			i].mcs_channel_scratch.scratch_for_seq_low.shram =
 			IPA_READ_1xVECTOR_REG(
 				GSI_SHRAM_n,
-				n + IPA_GSI_OFFSET_WORDS_SCRATCH4);
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_LOW);
 
 		ipa_reg_save.gsi.ch_cntxt.a7[
-			i].mcs_channel_scratch.scratch5.shram =
+			i].mcs_channel_scratch.scratch_for_seq_high.shram =
 			IPA_READ_1xVECTOR_REG(
 				GSI_SHRAM_n,
-				n + IPA_GSI_OFFSET_WORDS_SCRATCH5);
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_HIGH);
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+		ipa_reg_save.gsi.ch_cntxt.a7[
+			i].fc_stats_state.value = IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_FC_STATE_OFFSET);
 	}
+#endif
 
 	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC; i++) {
-		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.uc[
+		phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.uc[
 			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
-		u32 n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+		n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
 
 		if (!ipa_reg_save.gsi.ch_cntxt.uc[
 				i].gsi_map_ee_n_ch_k_vp_table.valid)
 			continue;
 
 		ipa_reg_save.gsi.ch_cntxt.uc[
-			i].mcs_channel_scratch.scratch4.shram =
+			i].mcs_channel_scratch.scratch_for_seq_low.shram =
 			IPA_READ_1xVECTOR_REG(
 				GSI_SHRAM_n,
-				n + IPA_GSI_OFFSET_WORDS_SCRATCH4);
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_LOW);
 
 		ipa_reg_save.gsi.ch_cntxt.uc[
-			i].mcs_channel_scratch.scratch5.shram =
+			i].mcs_channel_scratch.scratch_for_seq_high.shram =
 			IPA_READ_1xVECTOR_REG(
 				GSI_SHRAM_n,
-				n + IPA_GSI_OFFSET_WORDS_SCRATCH5);
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_HIGH);
+
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+		ipa_reg_save.gsi.ch_cntxt.uc[
+			i].fc_stats_state.value = IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_FC_STATE_OFFSET);
 	}
+#endif
+
+	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6; i++) {
+		phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.q6[
+			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
+		n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+
+		if (!ipa_reg_save.gsi.ch_cntxt.q6[
+				i].gsi_map_ee_n_ch_k_vp_table.valid)
+			continue;
+
+		ipa_reg_save.gsi.ch_cntxt.q6[
+			i].mcs_channel_scratch.scratch_for_seq_low.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_LOW);
+
+		ipa_reg_save.gsi.ch_cntxt.q6[
+			i].mcs_channel_scratch.scratch_for_seq_high.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_HIGH);
+
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+		ipa_reg_save.gsi.ch_cntxt.q6[
+			i].fc_stats_state.value = IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_FC_STATE_OFFSET);
+	}
+#endif
 
 	/*
 	 * On targets that support SSR, we generally want to disable
@@ -1307,20 +1401,17 @@ void ipa_save_registers(void)
  */
 static void ipa_reg_save_gsi_fifo_status(void)
 {
-	union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u gsi_fifo_status_ctrl;
 	u8 i;
-
-	memset(&gsi_fifo_status_ctrl, 0, sizeof(gsi_fifo_status_ctrl));
-
 	for (i = 0; i < IPA_HW_PIPE_ID_MAX; i++) {
-		gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_en = 1;
-		gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_port_sel = i;
+		memset(&ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl,
+		       0, sizeof(ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl));
+
+		ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_en = 1;
+		ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_port_sel = i;
 
 		IPA_MASKED_WRITE_SCALER_REG(IPA_GSI_FIFO_STATUS_CTRL,
-				     gsi_fifo_status_ctrl.value);
+				     ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl.value);
 
-		ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl.value =
-			IPA_READ_SCALER_REG(IPA_GSI_FIFO_STATUS_CTRL);
 		ipa_reg_save.gsi_fifo_status[i].gsi_tlv_fifo_status.value =
 			IPA_READ_SCALER_REG(IPA_GSI_TLV_FIFO_STATUS);
 		ipa_reg_save.gsi_fifo_status[i].gsi_aos_fifo_status.value =

+ 273 - 82
drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h

@@ -25,84 +25,85 @@
 #include "gsi_hwio_def.h"
 #include "ipa_gcc_hwio_def.h"
 
-#define IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS     0x6
-#define IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS     0x4
-#define IPA_DEBUG_TESTBUS_RSRC_NUM_EP            7
-#define IPA_DEBUG_TESTBUS_RSRC_NUM_GRP           3
-#define IPA_TESTBUS_SEL_EP_MAX                   0x1F
-#define IPA_TESTBUS_SEL_EXTERNAL_MAX             0x40
-#define IPA_TESTBUS_SEL_INTERNAL_MAX             0xFF
-#define IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX        0x40
-#define IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS     0x9
-#define IPA_RSCR_MNGR_DB_RSRC_ID_MAX             0x3F
-#define IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX           0xA
-
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_ZEROS   (0x0)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_0   (0x1)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_1   (0x2)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_2   (0x3)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_3   (0x4)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_4   (0x5)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_DB_ENG  (0x9)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_0   (0xB)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1   (0xC)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2   (0xD)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3   (0xE)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_4   (0xF)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_5   (0x10)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_6   (0x11)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_7   (0x12)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0   (0x13)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1   (0x14)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2   (0x15)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_3   (0x16)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_4   (0x17)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_5   (0x18)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_0    (0x1B)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_1    (0x1C)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_0    (0x1F)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_1    (0x20)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_2    (0x21)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_3    (0x22)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_4    (0x23)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_0  (0x27)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_1  (0x28)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_2  (0x29)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_3  (0x2A)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_0   (0x2B)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_1   (0x2C)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_2   (0x2D)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_3   (0x2E)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_0 (0x33)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_1 (0x34)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2 (0x35)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3 (0x36)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR     (0x3A)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_0  (0x3C)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_1  (0x3D)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_2    (0x1D)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_1   (0x3E)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_2   (0x3F)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_5   (0x40)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_5    (0x41)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_3   (0x42)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TLV_0   (0x43)
-#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_8   (0x44)
-
-#define IPA_DEBUG_TESTBUS_DEF_EXTERNAL           50
-#define IPA_DEBUG_TESTBUS_DEF_INTERNAL           6
-
-#define IPA_REG_SAVE_GSI_NUM_EE                  3
-
-#define IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS         22
-
-#define IPA_GSI_OFFSET_WORDS_SCRATCH4            6
-#define IPA_GSI_OFFSET_WORDS_SCRATCH5            7
-
-#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK 0x7E000
-#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT    13
-
-#define IPA_REG_SAVE_HWP_GSI_EE                  2
+#define IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS              0x6
+#define IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS              0x4
+#define IPA_DEBUG_TESTBUS_RSRC_NUM_EP                     7
+#define IPA_DEBUG_TESTBUS_RSRC_NUM_GRP                    3
+#define IPA_TESTBUS_SEL_EP_MAX                            0x1F
+#define IPA_TESTBUS_SEL_EXTERNAL_MAX                      0x40
+#define IPA_TESTBUS_SEL_INTERNAL_MAX                      0xFF
+#define IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX                 0x40
+#define IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS              0x9
+#define IPA_RSCR_MNGR_DB_RSRC_ID_MAX                      0x3F
+#define IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX                    0xA
+#define IPA_REG_SAVE_FC_STATE_OFFSET                      7
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_ZEROS            (0x0)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_0            (0x1)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_1            (0x2)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_2            (0x3)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_3            (0x4)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_4            (0x5)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_DB_ENG           (0x9)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_0            (0xB)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1            (0xC)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2            (0xD)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3            (0xE)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_4            (0xF)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_5            (0x10)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_6            (0x11)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_7            (0x12)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0            (0x13)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1            (0x14)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2            (0x15)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_3            (0x16)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_4            (0x17)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_5            (0x18)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_0             (0x1B)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_1             (0x1C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_0             (0x1F)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_1             (0x20)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_2             (0x21)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_3             (0x22)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_4             (0x23)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_0           (0x27)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_1           (0x28)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_2           (0x29)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_3           (0x2A)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_0            (0x2B)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_1            (0x2C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_2            (0x2D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_3            (0x2E)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_0          (0x33)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_1          (0x34)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2          (0x35)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3          (0x36)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR              (0x3A)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_0           (0x3C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_SDMA_1           (0x3D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_2             (0x1D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_1            (0x3E)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_2            (0x3F)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_5            (0x40)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_5             (0x41)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR_3            (0x42)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TLV_0            (0x43)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_8            (0x44)
+#define IPA_DEBUG_TESTBUS_DEF_EXTERNAL                    50
+#define IPA_DEBUG_TESTBUS_DEF_INTERNAL                    6
+#define IPA_REG_SAVE_GSI_NUM_EE                           3
+#define IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS                  22
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+#define IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_LOW          18
+#define IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_HIGH         19
+#else
+#define IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_LOW          6
+#define IPA_GSI_OFFSET_WORDS_SCRATCH_FOR_SEQ_HIGH         7
+#endif
+#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK          0x7E000
+#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT             13
+#define IPA_REG_SAVE_HWP_GSI_EE                           2
+#define GSI_HW_DEBUG_SW_MSK_REG_ARRAY_LENGTH              9
+#define GSI_HW_DEBUG_SW_MSK_REG_MAXk                      2
 
 /*
  * A structure used to map a source address to destination address...
@@ -474,6 +475,99 @@ struct map_src_dst_addr_s {
 		GEN_REG_ATTR(reg_name) }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 3), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[0].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[1].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[2].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[3].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[4].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[5].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 6),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[6].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 7),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[7].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 8),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[8].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 9),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[9].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 10),	\
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[10].var_name, \
+		GEN_REG_ATTR(reg_name) }
+
+/*
+ * Macro to define a debug SW MSK register entry for all (n, k)
+ * k bound by GSI_HW_DEBUG_SW_MSK_REG_MAXk
+ */
+#define IPA_REG_SAVE_GSI_DEBUG_MSK_REG_ENTRY(reg_name, var_name) \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 0, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[0].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 0, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[0].var_name[1], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 1, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[1].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 1, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[1].var_name[1], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 2, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[2].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 2, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[2].var_name[1], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 3, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[3].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 3, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[3].var_name[1], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 4, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[4].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 4, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[4].var_name[1], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 5, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[5].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 5, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[5].var_name[1], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 6, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[6].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 6, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[6].var_name[1], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 7, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[7].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 7, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[7].var_name[1], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 8, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[8].var_name[0], \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, 8, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.debug_sw_msk.mask_reg[8].var_name[1], \
 		GEN_REG_ATTR(reg_name) }
 
 #define IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(reg_name, var_name) \
@@ -536,6 +630,39 @@ struct map_src_dst_addr_s {
 		GEN_REG_ATTR(reg_name) }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[0].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[1].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[2].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[3].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[4].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[5].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 6),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[6].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 7),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[7].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 8),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[8].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 9),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[9].var_name, \
+		GEN_REG_ATTR(reg_name) }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 10),	\
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[10].var_name, \
 		GEN_REG_ATTR(reg_name) }
 
 /*
@@ -936,6 +1063,10 @@ struct ipa_gen_regs_s {
 	  ipa_local_pkt_proc_cntxt_base;
 	struct ipa_hwio_def_ipa_rsrc_grp_cfg_s
 	  ipa_rsrc_grp_cfg;
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+	struct ipa_hwio_def_ipa_0_ipa_rsrc_grp_cfg_ext_s
+	  ipa_rsrc_grp_cfg_ext;
+#endif
 	struct ipa_hwio_def_ipa_comp_cfg_s
 	  ipa_comp_cfg;
 	struct ipa_hwio_def_ipa_state_dpl_fifo_s
@@ -1573,6 +1704,25 @@ struct ipa_reg_save_gsi_mcs_regs_s {
 		mcs_reg[HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn + 1];
 };
 
+struct ipa_reg_save_gsi_mcs_prof_regs_s {
+        struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_cnt_lsb_s
+	       gsi_top_gsi_mcs_profiling_bp_cnt_lsb;
+	struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_cnt_msb_s
+	       gsi_top_gsi_mcs_profiling_bp_cnt_msb;
+	struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_lsb_s
+	       gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_lsb;
+	struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_msb_s
+	       gsi_top_gsi_mcs_profiling_bp_and_pending_cnt_msb;
+	struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_busy_cnt_lsb_s
+	       gsi_top_gsi_mcs_profiling_mcs_busy_cnt_lsb;
+	struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_busy_cnt_msb_s
+	       gsi_top_gsi_mcs_profiling_mcs_busy_cnt_msb;
+	struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_idle_cnt_lsb_s
+	       gsi_top_gsi_mcs_profiling_mcs_idle_cnt_lsb;
+	struct gsi_hwio_def_ipa_0_gsi_top_gsi_mcs_profiling_mcs_idle_cnt_msb_s
+	       gsi_top_gsi_mcs_profiling_mcs_idle_cnt_msb;
+};
+
 /* GSI debug counters save data struct */
 struct ipa_reg_save_gsi_debug_cnt_s {
 	struct
@@ -1615,6 +1765,17 @@ struct ipa_reg_save_gsi_iram_ptr_regs_s {
 #endif
 };
 
+/* GSI Debug SW registers save data struct */
+struct gsi_hwio_gsi_top_gsi_debug_sw_msk_regs_entry_rd_s{
+	struct gsi_hwio_def_ipa_0_gsi_top_gsi_debug_sw_msk_reg_n_sec_k_rd_s
+		regs[GSI_HW_DEBUG_SW_MSK_REG_MAXk];
+};
+
+struct gsi_hwio_gsi_top_gsi_debug_sw_msk_regs_rd_s{
+	struct gsi_hwio_gsi_top_gsi_debug_sw_msk_regs_entry_rd_s
+		mask_reg[GSI_HW_DEBUG_SW_MSK_REG_ARRAY_LENGTH];
+};
+
 /* GSI SHRAM pointers save data struct */
 struct ipa_reg_save_gsi_shram_ptr_regs_s {
 	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s
@@ -1651,20 +1812,27 @@ struct ipa_reg_save_gsi_debug_s {
 	  ipa_gsi_top_gsi_debug_pc_for_debug;
 	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s
 	  ipa_gsi_top_gsi_debug_qsb_log_err_trns_id;
-	struct ipa_reg_save_gsi_qsb_debug_s	gsi_qsb_debug;
+	struct ipa_reg_save_gsi_qsb_debug_s		gsi_qsb_debug;
 	struct ipa_reg_save_gsi_test_bus_s		gsi_test_bus;
 	struct ipa_reg_save_gsi_mcs_regs_s		gsi_mcs_regs;
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+	struct ipa_reg_save_gsi_mcs_prof_regs_s		gsi_mcs_prof_regs;
+#endif
 	struct ipa_reg_save_gsi_debug_cnt_s		gsi_cnt_regs;
 	struct ipa_reg_save_gsi_iram_ptr_regs_s		gsi_iram_ptrs;
 	struct ipa_reg_save_gsi_shram_ptr_regs_s	gsi_shram_ptrs;
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+	struct gsi_hwio_gsi_top_gsi_debug_sw_msk_regs_rd_s
+	       debug_sw_msk;
+#endif
 };
 
 /* GSI MCS channel scratch registers save data struct */
 struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s {
 	struct gsi_hwio_def_gsi_shram_n_s
-	  scratch4;
+	  scratch_for_seq_low;
 	struct gsi_hwio_def_gsi_shram_n_s
-	  scratch5;
+	  scratch_for_seq_high;
 };
 
 /* GSI Channel Context register save data struct */
@@ -1699,9 +1867,28 @@ struct ipa_reg_save_gsi_ch_cntxt_per_ep_s {
 	  ee_n_gsi_ch_k_scratch_2;
 	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s
 	  ee_n_gsi_ch_k_scratch_3;
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_4_s
+	  ee_n_gsi_ch_k_scratch_4;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_5_s
+	  ee_n_gsi_ch_k_scratch_5;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_6_s
+	  ee_n_gsi_ch_k_scratch_6;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_7_s
+	  ee_n_gsi_ch_k_scratch_7;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_8_s
+	  ee_n_gsi_ch_k_scratch_8;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_9_s
+	  ee_n_gsi_ch_k_scratch_9;
+#endif
 	struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s
 	  gsi_map_ee_n_ch_k_vp_table;
-	struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s mcs_channel_scratch;
+	struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s
+	  mcs_channel_scratch;
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+	union ipa_hwio_def_fc_stats_state_u
+	  fc_stats_state;
+#endif
 };
 
 /* GSI Event Context register save data struct */
@@ -1770,6 +1957,10 @@ struct ipa_reg_save_gsi_evt_cntxt_s {
 		a7[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7];
 	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
 		uc[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC];
+#ifdef CONFIG_IPA3_REGDUMP_IPA_5_0
+    struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
+		q6[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_Q6];
+#endif
 };
 
 /* Top level IPA register save data struct */

+ 34 - 89
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -69,6 +69,8 @@
 #define DEFAULT_MPM_TETH_AGGR_SIZE 24
 #define DEFAULT_MPM_UC_THRESH_SIZE 4
 
+RAW_NOTIFIER_HEAD(ipa_rmnet_notifier_list);
+
 /*
  * The following for adding code (ie. for EMULATION) not found on x86.
  */
@@ -3935,6 +3937,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		break;
 
 	case IPA_IOC_ADD_EoGRE_MAPPING:
+		IPADBG("Got IPA_IOC_ADD_EoGRE_MAPPING\n");
 		if (copy_from_user(
 				&eogre_info,
 				(const void __user *) arg,
@@ -3946,6 +3949,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
 		retval = ipa3_check_eogre(&eogre_info, &send2uC, &send2ipacm);
 
+		ipa3_ctx->eogre_enabled = (retval == 0);
+
 		if (retval == 0 && send2uC == true) {
 			/*
 			 * Send map to uC...
@@ -3961,13 +3966,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 			retval = ipa3_send_eogre_info(IPA_EoGRE_UP_EVENT, &eogre_info);
 		}
 
-		if (retval == 0) {
-			ipa3_ctx->eogre_enabled = true;
+		if (retval != 0) {
+			ipa3_ctx->eogre_enabled = false;
 		}
 
 		break;
 
 	case IPA_IOC_DEL_EoGRE_MAPPING:
+		IPADBG("Got IPA_IOC_DEL_EoGRE_MAPPING\n");
+
 		memset(&eogre_info, 0, sizeof(eogre_info));
 
 		retval = ipa3_check_eogre(&eogre_info, &send2uC, &send2ipacm);
@@ -4329,16 +4336,19 @@ static void ipa3_q6_avoid_holb(void)
 			 * setting HOLB on Q6 pipes, and from APPS perspective
 			 * they are not valid, therefore, the above function
 			 * will fail.
+			 * Also don't reset the HOLB timer to 0 for Q6 pipes.
 			 */
-			ipahal_write_reg_n_fields(
-				IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
-				ep_idx, &ep_holb);
+
+
+
 			ipahal_write_reg_n_fields(
 				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 				ep_idx, &ep_holb);
 
-			/* IPA4.5 issue requires HOLB_EN to be written twice */
-			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+			/* For targets > IPA_4.0 issue requires HOLB_EN to
+			 * be written twice.
+			 */
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 				ipahal_write_reg_n_fields(
 					IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 					ep_idx, &ep_holb);
@@ -7264,6 +7274,10 @@ static inline void ipa3_register_to_fmwk(void)
 	data.ipa_rmnet_ll_xmit = ipa3_rmnet_ll_xmit;
 	data.ipa_register_rmnet_ll_cb = ipa3_register_rmnet_ll_cb;
 	data.ipa_unregister_rmnet_ll_cb = ipa3_unregister_rmnet_ll_cb;
+	data.ipa_register_notifier =
+		ipa3_register_notifier;
+	data.ipa_unregister_notifier =
+		ipa3_unregister_notifier;
 
 	if (ipa_fmwk_register_ipa(&data)) {
 		IPAERR("couldn't register to IPA framework\n");
@@ -7287,7 +7301,7 @@ void ipa3_notify_clients_registered(void)
 }
 EXPORT_SYMBOL(ipa3_notify_clients_registered);
 
-void ipa_gsi_map_unmap_gsi_msi_addr(bool map)
+static void ipa_gsi_map_unmap_gsi_msi_addr(bool map)
 {
 	struct ipa_smmu_cb_ctx *cb;
 	u64 rounddown_addr;
@@ -7303,21 +7317,14 @@ void ipa_gsi_map_unmap_gsi_msi_addr(bool map)
 			IPAERR("iommu mapping failed for gsi_msi_addr\n");
 			ipa_assert();
 		}
-		ipa3_ctx->gsi_msi_clear_addr_io_mapped =
-			(u64)ioremap(ipa3_ctx->gsi_msi_clear_addr, 4);
-		ipa3_ctx->gsi_msi_addr_io_mapped =
-			(u64)ioremap(ipa3_ctx->gsi_msi_addr, 4);
 	} else {
-		iounmap((int *) ipa3_ctx->gsi_msi_clear_addr_io_mapped);
-		iounmap((int *) ipa3_ctx->gsi_msi_addr_io_mapped);
 		res = iommu_unmap(cb->iommu_domain, rounddown_addr, PAGE_SIZE);
-		ipa3_ctx->gsi_msi_clear_addr_io_mapped = 0;
-		ipa3_ctx->gsi_msi_addr_io_mapped = 0;
 		if (res)
 			IPAERR("smmu unmap for gsi_msi_addr failed %d\n", res);
 	}
 }
 
+
 /**
  * ipa3_post_init() - Initialize the IPA Driver (Part II).
  * This part contains all initialization which requires interaction with
@@ -7700,11 +7707,11 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 
 	ipa_ut_module_init();
 
+	/* Query MSI address. */
+	gsi_query_device_msi_addr(&ipa3_ctx->gsi_msi_addr);
 	/* Map the MSI addresses for the GSI to access, for LL and QMAP FC pipe */
-	if (!ipa3_ctx->gsi_msi_addr_io_mapped &&
-		!ipa3_ctx->gsi_msi_clear_addr_io_mapped &&
-		(ipa3_ctx->rmnet_ll_enable || ipa3_ctx->rmnet_ctl_enable))
-			ipa_gsi_map_unmap_gsi_msi_addr(true);
+	if (ipa3_ctx->gsi_msi_addr)
+		ipa_gsi_map_unmap_gsi_msi_addr(true);
 
 	if(!ipa_spearhead_stats_init())
 		IPADBG("Fail to init spearhead ipa lnx module");
@@ -8609,18 +8616,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	ipa3_ctx->ipa_gpi_event_rp_ddr = resource_p->ipa_gpi_event_rp_ddr;
 	ipa3_ctx->rmnet_ctl_enable = resource_p->rmnet_ctl_enable;
 	ipa3_ctx->rmnet_ll_enable = resource_p->rmnet_ll_enable;
-	ipa3_ctx->gsi_msi_addr = resource_p->gsi_msi_addr;
-	ipa3_ctx->gsi_msi_addr_io_mapped = 0;
-	ipa3_ctx->gsi_msi_clear_addr_io_mapped = 0;
-	ipa3_ctx->gsi_msi_clear_addr = resource_p->gsi_msi_clear_addr;
-	ipa3_ctx->gsi_rmnet_ctl_evt_ring_intvec =
-		resource_p->gsi_rmnet_ctl_evt_ring_intvec;
-	ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq =
-		resource_p->gsi_rmnet_ctl_evt_ring_irq;
-	ipa3_ctx->gsi_rmnet_ll_evt_ring_intvec =
-		resource_p->gsi_rmnet_ll_evt_ring_intvec;
-	ipa3_ctx->gsi_rmnet_ll_evt_ring_irq =
-		resource_p->gsi_rmnet_ll_evt_ring_irq;
 	ipa3_ctx->tx_wrapper_cache_max_size = get_tx_wrapper_cache_size(
 			resource_p->tx_wrapper_cache_max_size);
 	ipa3_ctx->ipa_config_is_auto = resource_p->ipa_config_is_auto;
@@ -8827,7 +8822,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	}
 
 	IPADBG(
-	    "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n",
+	    "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%0x%x) with len (0x%x)\n",
 	    resource_p->ipa_mem_base,
 	    ipa3_ctx->ctrl->ipa_reg_base_ofst,
 	    resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst,
@@ -9140,6 +9135,12 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 			goto fail_rmnet_ll_init;
 		}
 	}
+	ipa3_ctx->ipa_rmnet_notifier_list_internal = &ipa_rmnet_notifier_list;
+	spin_lock_init(&ipa3_ctx->notifier_lock);
+	ipa3_ctx->buff_above_thresh_for_def_pipe_notified = false;
+	ipa3_ctx->buff_above_thresh_for_coal_pipe_notified = false;
+	ipa3_ctx->buff_below_thresh_for_def_pipe_notified = false;
+	ipa3_ctx->buff_below_thresh_for_coal_pipe_notified = false;
 
 	mutex_init(&ipa3_ctx->app_clock_vote.mutex);
 	ipa3_ctx->is_modem_up = false;
@@ -9469,10 +9470,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 	u32 ipa_holb_monitor_max_cnt_usb;
 	u32 ipa_holb_monitor_max_cnt_11ad;
 	u32 ipa_wan_aggr_pkt_cnt;
-	u32 gsi_msi_addr;
-	u32 gsi_msi_clear_addr;
-	u32 gsi_rmnet_ctl_evt_ring_intvec;
-	u32 gsi_rmnet_ll_evt_ring_intvec;
 
 	/* initialize ipa3_res */
 	ipa_drv_res->ipa_wdi3_2g_holb_timeout = 0;
@@ -9813,58 +9810,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 			? "True" : "False");
 	}
 
-	result = of_property_read_u32(pdev->dev.of_node,
-		"qcom,gsi-msi-addr",
-		&gsi_msi_addr);
-	IPADBG("GSI MSI addr = %lu\n", gsi_msi_addr);
-	ipa_drv_res->gsi_msi_addr = (u64)gsi_msi_addr;
-
-	result = of_property_read_u32(pdev->dev.of_node,
-		"qcom,gsi-msi-clear-addr",
-		&gsi_msi_clear_addr);
-	IPADBG("GSI MSI clear addr = %lu\n", gsi_msi_clear_addr);
-	ipa_drv_res->gsi_msi_clear_addr = (u64)gsi_msi_clear_addr;
-
-	/* Get IPA MSI IRQ number for rmnet_ctl */
-	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-		"msi-irq-rmnet-ctl");
-	if (!resource) {
-		ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq = 0;
-		IPAERR(":get resource failed for msi-irq-rmnet-ctl\n");
-	} else {
-		ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq = resource->start;
-		IPADBG(": msi-irq-rmnet-ctl = %d\n",
-			ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq);
-	}
-
-	/* Get IPA MSI IRQ number for rmnet_ll */
-	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-		"msi-irq-rmnet-ll");
-	if (!resource) {
-		ipa_drv_res->gsi_rmnet_ll_evt_ring_irq = 0;
-		IPAERR(":get resource failed for msi-irq-rmnet-ll\n");
-	} else {
-		ipa_drv_res->gsi_rmnet_ll_evt_ring_irq = resource->start;
-		IPADBG(": msi-irq-rmnet-ll = %d\n",
-			ipa_drv_res->gsi_rmnet_ll_evt_ring_irq);
-	}
-
-	result = of_property_read_u32(pdev->dev.of_node,
-		"qcom,gsi-rmnet-ctl-evt-ring-intvec",
-		&gsi_rmnet_ctl_evt_ring_intvec);
-	IPADBG("gsi_rmnet_ctl_evt_ring_intvec = %u\n",
-		gsi_rmnet_ctl_evt_ring_intvec);
-	ipa_drv_res->gsi_rmnet_ctl_evt_ring_intvec =
-		gsi_rmnet_ctl_evt_ring_intvec;
-
-	result = of_property_read_u32(pdev->dev.of_node,
-		"qcom,gsi-rmnet-ll-evt-ring-intvec",
-		&gsi_rmnet_ll_evt_ring_intvec);
-	IPADBG("gsi_rmnet_ll_evt_ring_intvec = %u\n",
-		gsi_rmnet_ll_evt_ring_intvec);
-	ipa_drv_res->gsi_rmnet_ll_evt_ring_intvec =
-		gsi_rmnet_ll_evt_ring_intvec;
-
 	result = of_property_read_string(pdev->dev.of_node,
 			"qcom,use-gsi-ipa-fw", &ipa_drv_res->gsi_fw_file_name);
 	if (!result)

+ 4 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_client.c

@@ -1481,8 +1481,10 @@ int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset,
 			IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 			pipe_idx, &ep_holb);
 
-		/* IPA4.5 issue requires HOLB_EN to be written twice */
-		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+		/* For targets > IPA_4.0 issue requires HOLB_EN to be
+		 * written twice.
+		 */
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 			ipahal_write_reg_n_fields(
 				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 				pipe_idx, &ep_holb);

+ 159 - 67
drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c

@@ -40,6 +40,7 @@ static const char * const ipa_eth_clients_strings[] = {
 	__stringify(RTK8111K),
 	__stringify(RTK8125B),
 	__stringify(NTN),
+	__stringify(NTN3),
 	__stringify(EMAC),
 };
 
@@ -1553,7 +1554,14 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
 		"lan_repl_rx_empty=%u\n"
 		"flow_enable=%u\n"
 		"flow_disable=%u\n"
-		"rx_page_drop_cnt=%u\n",
+		"rx_page_drop_cnt=%u\n"
+		"lower_order=%u\n"
+		"rmnet_notifier_enabled=%u\n"
+		"num_buff_above_thresh_for_def_pipe_notified=%u\n"
+		"num_buff_below_thresh_for_def_pipe_notified=%u\n"
+		"num_buff_above_thresh_for_coal_pipe_notified=%u\n"
+		"num_buff_below_thresh_for_coal_pipe_notified=%u\n"
+		"pipe_setup_fail_cnt=%u\n",
 		ipa3_ctx->stats.tx_sw_pkts,
 		ipa3_ctx->stats.tx_hw_pkts,
 		ipa3_ctx->stats.tx_non_linear,
@@ -1573,7 +1581,14 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
 		ipa3_ctx->stats.lan_repl_rx_empty,
 		ipa3_ctx->stats.flow_enable,
 		ipa3_ctx->stats.flow_disable,
-		ipa3_ctx->stats.rx_page_drop_cnt
+		ipa3_ctx->stats.rx_page_drop_cnt,
+		ipa3_ctx->stats.lower_order,
+		ipa3_ctx->ipa_rmnet_notifier_enabled,
+		atomic_read(&ipa3_ctx->stats.num_buff_above_thresh_for_def_pipe_notified),
+		atomic_read(&ipa3_ctx->stats.num_buff_below_thresh_for_def_pipe_notified),
+		atomic_read(&ipa3_ctx->stats.num_buff_above_thresh_for_coal_pipe_notified),
+		atomic_read(&ipa3_ctx->stats.num_buff_below_thresh_for_coal_pipe_notified),
+		ipa3_ctx->stats.pipe_setup_fail_cnt
 		);
 	cnt += nbytes;
 
@@ -1820,76 +1835,83 @@ nxt_clnt_cons:
 static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
 		size_t count, loff_t *ppos)
 {
-#define TX_STATS(y) \
-	stats.tx_ch_stats[0].y
-#define RX_STATS(y) \
-	stats.rx_ch_stats[0].y
+#define TX_STATS(x, y) \
+	stats.tx_ch_stats[x].y
+#define RX_STATS(x, y) \
+	stats.rx_ch_stats[x].y
 
 	struct Ipa3HwStatsNTNInfoData_t stats;
 	int nbytes;
-	int cnt = 0;
+	int cnt = 0, i = 0;
 
 	if (!ipa3_get_ntn_stats(&stats)) {
-		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
-			"TX num_pkts_processed=%u\n"
-			"TX ringFull=%u\n"
-			"TX ringEmpty=%u\n"
-			"TX ringUsageHigh=%u\n"
-			"TX ringUsageLow=%u\n"
-			"TX RingUtilCount=%u\n"
-			"TX bamFifoFull=%u\n"
-			"TX bamFifoEmpty=%u\n"
-			"TX bamFifoUsageHigh=%u\n"
-			"TX bamFifoUsageLow=%u\n"
-			"TX bamUtilCount=%u\n"
-			"TX num_db=%u\n"
-			"TX num_qmb_int_handled=%u\n"
-			"TX ipa_pipe_number=%u\n",
-			TX_STATS(num_pkts_processed),
-			TX_STATS(ring_stats.ringFull),
-			TX_STATS(ring_stats.ringEmpty),
-			TX_STATS(ring_stats.ringUsageHigh),
-			TX_STATS(ring_stats.ringUsageLow),
-			TX_STATS(ring_stats.RingUtilCount),
-			TX_STATS(gsi_stats.bamFifoFull),
-			TX_STATS(gsi_stats.bamFifoEmpty),
-			TX_STATS(gsi_stats.bamFifoUsageHigh),
-			TX_STATS(gsi_stats.bamFifoUsageLow),
-			TX_STATS(gsi_stats.bamUtilCount),
-			TX_STATS(num_db),
-			TX_STATS(num_qmb_int_handled),
-			TX_STATS(ipa_pipe_number));
-		cnt += nbytes;
-		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
-			"RX num_pkts_processed=%u\n"
-			"RX ringFull=%u\n"
-			"RX ringEmpty=%u\n"
-			"RX ringUsageHigh=%u\n"
-			"RX ringUsageLow=%u\n"
-			"RX RingUtilCount=%u\n"
-			"RX bamFifoFull=%u\n"
-			"RX bamFifoEmpty=%u\n"
-			"RX bamFifoUsageHigh=%u\n"
-			"RX bamFifoUsageLow=%u\n"
-			"RX bamUtilCount=%u\n"
-			"RX num_db=%u\n"
-			"RX num_qmb_int_handled=%u\n"
-			"RX ipa_pipe_number=%u\n",
-			RX_STATS(num_pkts_processed),
-			RX_STATS(ring_stats.ringFull),
-			RX_STATS(ring_stats.ringEmpty),
-			RX_STATS(ring_stats.ringUsageHigh),
-			RX_STATS(ring_stats.ringUsageLow),
-			RX_STATS(ring_stats.RingUtilCount),
-			RX_STATS(gsi_stats.bamFifoFull),
-			RX_STATS(gsi_stats.bamFifoEmpty),
-			RX_STATS(gsi_stats.bamFifoUsageHigh),
-			RX_STATS(gsi_stats.bamFifoUsageLow),
-			RX_STATS(gsi_stats.bamUtilCount),
-			RX_STATS(num_db),
-			RX_STATS(num_qmb_int_handled),
-			RX_STATS(ipa_pipe_number));
-		cnt += nbytes;
+		for (i = 0; i < IPA_UC_MAX_NTN_TX_CHANNELS; i++) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt,
+				"TX%d num_pkts_psr=%u\n"
+				"TX%d ringFull=%u\n"
+				"TX%d ringEmpty=%u\n"
+				"TX%d ringUsageHigh=%u\n"
+				"TX%d ringUsageLow=%u\n"
+				"TX%d RingUtilCount=%u\n"
+				"TX%d bamFifoFull=%u\n"
+				"TX%d bamFifoEmpty=%u\n"
+				"TX%d bamFifoUsageHigh=%u\n"
+				"TX%d bamFifoUsageLow=%u\n"
+				"TX%d bamUtilCount=%u\n"
+				"TX%d num_db=%u\n"
+				"TX%d num_qmb_int_handled=%u\n"
+				"TX%d ipa_pipe_number=%u\n",
+				i, TX_STATS(i, num_pkts_processed),
+				i, TX_STATS(i, ring_stats.ringFull),
+				i, TX_STATS(i, ring_stats.ringEmpty),
+				i, TX_STATS(i, ring_stats.ringUsageHigh),
+				i, TX_STATS(i, ring_stats.ringUsageLow),
+				i, TX_STATS(i, ring_stats.RingUtilCount),
+				i, TX_STATS(i, gsi_stats.bamFifoFull),
+				i, TX_STATS(i, gsi_stats.bamFifoEmpty),
+				i, TX_STATS(i, gsi_stats.bamFifoUsageHigh),
+				i, TX_STATS(i, gsi_stats.bamFifoUsageLow),
+				i, TX_STATS(i, gsi_stats.bamUtilCount),
+				i, TX_STATS(i, num_db),
+				i, TX_STATS(i, num_qmb_int_handled),
+				i, TX_STATS(i, ipa_pipe_number));
+			cnt += nbytes;
+		}
+
+		for (i = 0; i < IPA_UC_MAX_NTN_RX_CHANNELS; i++) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt,
+				"RX%d num_pkts_psr=%u\n"
+				"RX%d ringFull=%u\n"
+				"RX%d ringEmpty=%u\n"
+				"RX%d ringUsageHigh=%u\n"
+				"RX%d ringUsageLow=%u\n"
+				"RX%d RingUtilCount=%u\n"
+				"RX%d bamFifoFull=%u\n"
+				"RX%d bamFifoEmpty=%u\n"
+				"RX%d bamFifoUsageHigh=%u\n"
+				"RX%d bamFifoUsageLow=%u\n"
+				"RX%d bamUtilCount=%u\n"
+				"RX%d num_db=%u\n"
+				"RX%d num_qmb_int_handled=%u\n"
+				"RX%d ipa_pipe_number=%u\n",
+				i, RX_STATS(i, num_pkts_processed),
+				i, RX_STATS(i, ring_stats.ringFull),
+				i, RX_STATS(i, ring_stats.ringEmpty),
+				i, RX_STATS(i, ring_stats.ringUsageHigh),
+				i, RX_STATS(i, ring_stats.ringUsageLow),
+				i, RX_STATS(i, ring_stats.RingUtilCount),
+				i, RX_STATS(i, gsi_stats.bamFifoFull),
+				i, RX_STATS(i, gsi_stats.bamFifoEmpty),
+				i, RX_STATS(i, gsi_stats.bamFifoUsageHigh),
+				i, RX_STATS(i, gsi_stats.bamFifoUsageLow),
+				i, RX_STATS(i, gsi_stats.bamUtilCount),
+				i, RX_STATS(i, num_db),
+				i, RX_STATS(i, num_qmb_int_handled),
+				i, RX_STATS(i, ipa_pipe_number));
+			cnt += nbytes;
+		}
 	} else {
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
 				"Fail to read NTN stats\n");
@@ -3636,6 +3658,55 @@ done:
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
 }
 
+#if IPA_ETH_API_VER >= 2
+static void __ipa_ntn3_client_stats_read(int *cnt, struct ipa_ntn3_client_stats *s,
+	const char *str_client_tx, const char *str_client_rx)
+{
+	int nbytes;
+
+	nbytes = scnprintf(dbg_buff + *cnt, IPA_MAX_MSG_LEN - *cnt,
+		"%s_RP=0x%x\n"
+		"%s_WP=0x%x\n"
+		"%s_ntn_pending_db_after_rollback:%u\n"
+		"%s_msi_db_idx_val:%u\n"
+		"%s_tx_derr_counter:%u\n"
+		"%s_ntn_tx_oob_counter:%u\n"
+		"%s_ntn_accumulated_tres_handled:%u\n"
+		"%s_ntn_rollbacks_counter:%u\n"
+		"%s_ntn_msi_db_count:%u\n",
+		str_client_tx, s->tx_stats.rp,
+		str_client_tx, s->tx_stats.wp,
+		str_client_tx, s->tx_stats.pending_db_after_rollback,
+		str_client_tx, s->tx_stats.msi_db_idx,
+		str_client_tx, s->tx_stats.derr_cnt,
+		str_client_tx, s->tx_stats.oob_cnt,
+		str_client_tx, s->tx_stats.tres_handled,
+		str_client_tx, s->tx_stats.rollbacks_cnt,
+		str_client_tx, s->tx_stats.msi_db_cnt);
+	*cnt += nbytes;
+	nbytes = scnprintf(dbg_buff + *cnt, IPA_MAX_MSG_LEN - *cnt,
+		"%s_RP=0x%x\n"
+		"%s_WP=0x%x\n"
+		"%s_ntn_pending_db_after_rollback:%u\n"
+		"%s_msi_db_idx_val:%u\n"
+		"%s_ntn_rx_chain_counter:%u\n"
+		"%s_ntn_rx_err_counter:%u\n"
+		"%s_ntn_accumulated_tres_handled:%u\n"
+		"%s_ntn_rollbacks_counter:%u\n"
+		"%s_ntn_msi_db_count:%u\n",
+		str_client_rx, s->rx_stats.rp,
+		str_client_rx, s->rx_stats.wp,
+		str_client_rx, s->rx_stats.pending_db_after_rollback,
+		str_client_rx, s->rx_stats.msi_db_idx,
+		str_client_rx, s->rx_stats.chain_cnt,
+		str_client_rx, s->rx_stats.err_cnt,
+		str_client_rx, s->rx_stats.tres_handled,
+		str_client_rx, s->rx_stats.rollbacks_cnt,
+		str_client_rx, s->rx_stats.msi_db_cnt);
+	*cnt += nbytes;
+}
+#endif
+
 static ssize_t ipa3_eth_read_err_status(struct file *file,
 	char __user *ubuf, size_t count, loff_t *ppos)
 {
@@ -3646,6 +3717,10 @@ static ssize_t ipa3_eth_read_err_status(struct file *file,
 	struct ipa3_eth_error_stats tx_stats;
 	struct ipa3_eth_error_stats rx_stats;
 	int scratch_num;
+#if IPA_ETH_API_VER >= 2
+	struct ipa_ntn3_client_stats ntn3_stats;
+	const char *str_client_tx, *str_client_rx;
+#endif
 
 	memset(&tx_stats, 0, sizeof(struct ipa3_eth_error_stats));
 	memset(&rx_stats, 0, sizeof(struct ipa3_eth_error_stats));
@@ -3659,6 +3734,7 @@ static ssize_t ipa3_eth_read_err_status(struct file *file,
 		goto done;
 	}
 	client = (struct ipa_eth_client *)file->private_data;
+
 	switch (client->client_type) {
 	case IPA_ETH_CLIENT_AQC107:
 	case IPA_ETH_CLIENT_AQC113:
@@ -3675,6 +3751,22 @@ static ssize_t ipa3_eth_read_err_status(struct file *file,
 		tx_ep = IPA_CLIENT_ETHERNET_CONS;
 		rx_ep = IPA_CLIENT_ETHERNET_PROD;
 		scratch_num = 6;
+#if IPA_ETH_API_VER >= 2
+	case IPA_ETH_CLIENT_NTN3:
+
+		memset(&ntn3_stats, 0, sizeof(ntn3_stats));
+		if (strstr(file->f_path.dentry->d_name.name, "0_status")) {
+			ipa_eth_ntn3_get_status(&ntn3_stats, 0);
+			str_client_tx = ipa_clients_strings[IPA_CLIENT_ETHERNET_CONS];
+			str_client_rx = ipa_clients_strings[IPA_CLIENT_ETHERNET_PROD];
+		} else {
+			ipa_eth_ntn3_get_status(&ntn3_stats, 1);
+			str_client_tx = ipa_clients_strings[IPA_CLIENT_ETHERNET2_CONS];
+			str_client_rx = ipa_clients_strings[IPA_CLIENT_ETHERNET2_PROD];
+		}
+		__ipa_ntn3_client_stats_read(&cnt, &ntn3_stats, str_client_tx, str_client_rx);
+		goto done;
+#endif
 	default:
 		IPAERR("Not supported\n");
 		return 0;

+ 136 - 32
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -14,6 +14,7 @@
 #include <net/sock.h>
 #include <net/ipv6.h>
 #include <asm/page.h>
+#include <linux/mutex.h>
 #include "gsi.h"
 #include "ipa_i.h"
 #include "ipa_trace.h"
@@ -42,7 +43,7 @@
 
 #define IPA_GSB_AGGR_BYTE_LIMIT 14
 #define IPA_GSB_RX_BUFF_BASE_SZ 16384
-#define IPA_QMAP_RX_BUFF_BASE_SZ 512
+#define IPA_QMAP_RX_BUFF_BASE_SZ 576
 #define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
 #define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
 		(X) + NET_SKB_PAD) +\
@@ -79,6 +80,8 @@
 #define IPA_GSI_CH_20_WA_VIRT_CHAN 29
 
 #define IPA_DEFAULT_SYS_YELLOW_WM 32
+/* High threshold is set for 50% of the buffer */
+#define IPA_BUFF_THRESHOLD_HIGH 112
 #define IPA_REPL_XFER_THRESH 20
 #define IPA_REPL_XFER_MAX 36
 
@@ -1586,6 +1589,9 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 fail_gen3:
 	ipa3_disable_data_path(ipa_ep_idx);
 fail_repl:
+	if (IPA_CLIENT_IS_CONS(ep->client) && !ep->sys->common_buff_pool)
+		ipa3_cleanup_rx(ep->sys);
+
 	ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
 	if (ep->sys->repl && !ep->sys->common_buff_pool) {
 		kfree(ep->sys->repl);
@@ -1617,6 +1623,7 @@ fail_wq:
 fail_and_disable_clocks:
 	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
 fail_gen:
+	IPA_STATS_INC_CNT(ipa3_ctx->stats.pipe_setup_fail_cnt);
 	return result;
 }
 
@@ -2286,6 +2293,27 @@ fail_kmem_cache_alloc:
 	}
 }
 
+static struct page *ipa3_alloc_page(
+	gfp_t flag, u32 *page_order, bool try_lower)
+{
+	struct page *page = NULL;
+	u32 p_order = *page_order;
+
+	page = __dev_alloc_pages(flag, p_order);
+	/* We will only try 1 page order lower. */
+	if (unlikely(!page)) {
+		if (try_lower && p_order > 0) {
+			p_order = p_order - 1;
+			page = __dev_alloc_pages(flag, p_order);
+			if (likely(page))
+				ipa3_ctx->stats.lower_order++;
+		}
+	}
+	*page_order = p_order;
+	return page;
+}
+
+
 static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
 	gfp_t flag, bool is_tmp_alloc, struct ipa3_sys_context *sys)
 {
@@ -2296,13 +2324,18 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
 		flag);
 	if (unlikely(!rx_pkt))
 		return NULL;
-	rx_pkt->len = PAGE_SIZE << sys->page_order;
-	rx_pkt->page_data.page = __dev_alloc_pages(flag,
-				sys->page_order);
+
+	rx_pkt->page_data.page_order = sys->page_order;
+	/* Try a lower order page for order 3 pages in case allocation fails. */
+	rx_pkt->page_data.page = ipa3_alloc_page(flag,
+				&rx_pkt->page_data.page_order,
+				(is_tmp_alloc && rx_pkt->page_data.page_order == 3));
 
 	if (unlikely(!rx_pkt->page_data.page))
 		goto fail_page_alloc;
 
+	rx_pkt->len = PAGE_SIZE << rx_pkt->page_data.page_order;
+
 	rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
 			rx_pkt->page_data.page, 0,
 			rx_pkt->len, DMA_FROM_DEVICE);
@@ -2320,7 +2353,7 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
 	return rx_pkt;
 
 fail_dma_mapping:
-	__free_pages(rx_pkt->page_data.page, sys->page_order);
+	__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
 fail_page_alloc:
 	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	return NULL;
@@ -2440,6 +2473,43 @@ static struct ipa3_rx_pkt_wrapper * ipa3_get_free_page
 	return NULL;
 }
 
+int ipa3_register_notifier(void *fn_ptr)
+{
+	if (fn_ptr == NULL)
+		return -EFAULT;
+	spin_lock(&ipa3_ctx->notifier_lock);
+	atomic_set(&ipa3_ctx->stats.num_buff_above_thresh_for_def_pipe_notified, 0);
+	atomic_set(&ipa3_ctx->stats.num_buff_above_thresh_for_coal_pipe_notified, 0);
+	atomic_set(&ipa3_ctx->stats.num_buff_below_thresh_for_def_pipe_notified, 0);
+	atomic_set(&ipa3_ctx->stats.num_buff_below_thresh_for_coal_pipe_notified, 0);
+	ipa3_ctx->ipa_rmnet_notifier.notifier_call = fn_ptr;
+	if (!ipa3_ctx->ipa_rmnet_notifier_enabled)
+		raw_notifier_chain_register(ipa3_ctx->ipa_rmnet_notifier_list_internal,
+			&ipa3_ctx->ipa_rmnet_notifier);
+	else {
+		IPAWANERR("rcvd notifier reg again, changing the cb function\n");
+		ipa3_ctx->ipa_rmnet_notifier.notifier_call = fn_ptr;
+	}
+	ipa3_ctx->ipa_rmnet_notifier_enabled = true;
+	spin_unlock(&ipa3_ctx->notifier_lock);
+	return 0;
+}
+
+int ipa3_unregister_notifier(void *fn_ptr)
+{
+	if (fn_ptr == NULL)
+		return -EFAULT;
+	spin_lock(&ipa3_ctx->notifier_lock);
+	ipa3_ctx->ipa_rmnet_notifier.notifier_call = fn_ptr;
+	if (ipa3_ctx->ipa_rmnet_notifier_enabled)
+		raw_notifier_chain_unregister(ipa3_ctx->ipa_rmnet_notifier_list_internal,
+			&ipa3_ctx->ipa_rmnet_notifier);
+	else IPAWANERR("rcvd notifier unreg again\n");
+	ipa3_ctx->ipa_rmnet_notifier_enabled = false;
+	spin_unlock(&ipa3_ctx->notifier_lock);
+	return 0;
+}
+
 static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 {
 	struct ipa3_rx_pkt_wrapper *rx_pkt;
@@ -2538,10 +2608,32 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 		__trigger_repl_work(sys);
 
 	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
-		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
-		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS)
+			spin_lock(&ipa3_ctx->notifier_lock);
+			if (ipa3_ctx->ipa_rmnet_notifier_enabled
+				&& !ipa3_ctx->buff_below_thresh_for_def_pipe_notified) {
+				atomic_inc(&ipa3_ctx->stats.num_buff_below_thresh_for_def_pipe_notified);
+				raw_notifier_call_chain(ipa3_ctx->ipa_rmnet_notifier_list_internal,
+					BUFF_BELOW_LOW_THRESHOLD_FOR_DEFAULT_PIPE, &rx_len_cached);
+				ipa3_ctx->buff_above_thresh_for_def_pipe_notified = false;
+				ipa3_ctx->buff_below_thresh_for_def_pipe_notified = true;
+			}
+			spin_unlock(&ipa3_ctx->notifier_lock);
+		}
+		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty_coal);
+			spin_lock(&ipa3_ctx->notifier_lock);
+			if (ipa3_ctx->ipa_rmnet_notifier_enabled
+				&& !ipa3_ctx->buff_below_thresh_for_coal_pipe_notified) {
+				atomic_inc(&ipa3_ctx->stats.num_buff_below_thresh_for_coal_pipe_notified);
+				raw_notifier_call_chain(ipa3_ctx->ipa_rmnet_notifier_list_internal,
+					BUFF_BELOW_LOW_THRESHOLD_FOR_COAL_PIPE, &rx_len_cached);
+				ipa3_ctx->buff_above_thresh_for_coal_pipe_notified = false;
+				ipa3_ctx->buff_below_thresh_for_coal_pipe_notified = true;
+			}
+			spin_unlock(&ipa3_ctx->notifier_lock);
+		}
 		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
 		else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
@@ -2550,6 +2642,32 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 			WARN_ON(1);
 	}
 
+	if (rx_len_cached >= IPA_BUFF_THRESHOLD_HIGH) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
+			spin_lock(&ipa3_ctx->notifier_lock);
+			if(ipa3_ctx->ipa_rmnet_notifier_enabled &&
+				!ipa3_ctx->buff_above_thresh_for_def_pipe_notified) {
+				atomic_inc(&ipa3_ctx->stats.num_buff_above_thresh_for_def_pipe_notified);
+				raw_notifier_call_chain(ipa3_ctx->ipa_rmnet_notifier_list_internal,
+					BUFF_ABOVE_HIGH_THRESHOLD_FOR_DEFAULT_PIPE, &rx_len_cached);
+				ipa3_ctx->buff_above_thresh_for_def_pipe_notified = true;
+				ipa3_ctx->buff_below_thresh_for_def_pipe_notified = false;
+			}
+			spin_unlock(&ipa3_ctx->notifier_lock);
+		} else if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+			spin_lock(&ipa3_ctx->notifier_lock);
+			if(ipa3_ctx->ipa_rmnet_notifier_enabled &&
+				!ipa3_ctx->buff_above_thresh_for_coal_pipe_notified) {
+				atomic_inc(&ipa3_ctx->stats.num_buff_above_thresh_for_coal_pipe_notified);
+				raw_notifier_call_chain(ipa3_ctx->ipa_rmnet_notifier_list_internal,
+					BUFF_ABOVE_HIGH_THRESHOLD_FOR_COAL_PIPE, &rx_len_cached);
+				ipa3_ctx->buff_above_thresh_for_coal_pipe_notified = true;
+				ipa3_ctx->buff_below_thresh_for_coal_pipe_notified = false;
+			}
+			spin_unlock(&ipa3_ctx->notifier_lock);
+		}
+	}
+
 	return;
 }
 
@@ -3052,7 +3170,7 @@ static void free_rx_page(void *chan_user_data, void *xfer_user_data)
 	}
 	dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
 		rx_pkt->len, DMA_FROM_DEVICE);
-	__free_pages(rx_pkt->page_data.page, rx_pkt->sys->page_order);
+	__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
 	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 }
 
@@ -3102,7 +3220,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
 					rx_pkt->page_data.dma_addr,
 					rx_pkt->len,
 					DMA_FROM_DEVICE);
-				__free_pages(rx_pkt->page_data.page, sys->page_order);
+				__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
 			}
 			kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
 				rx_pkt);
@@ -3122,7 +3240,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
 				rx_pkt->len,
 				DMA_FROM_DEVICE);
 			__free_pages(rx_pkt->page_data.page,
-				sys->page_order);
+				rx_pkt->page_data.page_order);
 			kmem_cache_free(
 				ipa3_ctx->rx_pkt_wrapper_cache,
 				rx_pkt);
@@ -3922,7 +4040,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
 		} else {
 			dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
 					rx_pkt->len, DMA_FROM_DEVICE);
-			__free_pages(rx_pkt->page_data.page, sys->page_order);
+			__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
 		}
 		rx_pkt->sys->free_rx_wrapper(rx_pkt);
 		IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt);
@@ -3954,7 +4072,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
 				} else {
 					dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
 						rx_pkt->len, DMA_FROM_DEVICE);
-					__free_pages(rx_pkt->page_data.page, sys->page_order);
+					__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
 				}
 				rx_pkt->sys->free_rx_wrapper(rx_pkt);
 			}
@@ -3985,7 +4103,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
 				skb_shinfo(rx_skb)->nr_frags,
 				rx_page.page, 0,
 				size,
-				PAGE_SIZE << sys->page_order);
+				PAGE_SIZE << rx_page.page_order);
 		}
 	} else {
 		return NULL;
@@ -5210,25 +5328,11 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
 	evt_rp_dma_addr = 0;
 	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
 	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
-	if ((ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) &&
-		ipa3_ctx->gsi_rmnet_ll_evt_ring_irq) {
-		gsi_evt_ring_props.intr = GSI_INTR_MSI;
-		gsi_evt_ring_props.msi_addr = ipa3_ctx->gsi_msi_addr;
-		gsi_evt_ring_props.msi_clear_addr = ipa3_ctx->gsi_msi_clear_addr_io_mapped;
-		gsi_evt_ring_props.msi_addr_iore_mapped = ipa3_ctx->gsi_msi_addr_io_mapped;
-		gsi_evt_ring_props.intvec = ipa3_ctx->gsi_rmnet_ll_evt_ring_intvec;
-		gsi_evt_ring_props.msi_irq = ipa3_ctx->gsi_rmnet_ll_evt_ring_irq;
-	} else if ((ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) &&
-		ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq) {
-		gsi_evt_ring_props.intr = GSI_INTR_MSI;
-		gsi_evt_ring_props.msi_addr = ipa3_ctx->gsi_msi_addr;
-		gsi_evt_ring_props.msi_clear_addr = ipa3_ctx->gsi_msi_clear_addr_io_mapped;
-		gsi_evt_ring_props.msi_addr_iore_mapped = ipa3_ctx->gsi_msi_addr_io_mapped;
-		gsi_evt_ring_props.intvec = ipa3_ctx->gsi_rmnet_ctl_evt_ring_intvec;
-		gsi_evt_ring_props.msi_irq = ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq;
-	} else {
-		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
-	}
+	if ((ipa3_ctx->gsi_msi_addr) &&
+		(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS ||
+		ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS))
+		gsi_evt_ring_props.intr = GSI_INTR_MSI; // intvec chosen dynamically.
+	else gsi_evt_ring_props.intr = GSI_INTR_IRQ;
 	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
 	gsi_evt_ring_props.ring_len = ring_size;
 	gsi_evt_ring_props.ring_base_vaddr =

+ 10 - 6
drivers/platform/msm/ipa/ipa_v3/ipa_flt.c

@@ -560,7 +560,8 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 	}
 
 	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
-		ipa_fltrt_get_aligned_lcl_bdy_size(alloc_params.total_sz_lcl_hash_tbls))) {
+		ipa_fltrt_get_aligned_lcl_bdy_size(alloc_params.num_lcl_hash_tbls,
+			alloc_params.total_sz_lcl_hash_tbls))) {
 		IPAERR_RL("Hash filter table for IP:%d too big to fit in lcl memory\n",
 			ip);
 		rc = -EFAULT;
@@ -570,13 +571,14 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 	/* Check Non-Hash filter tables fits in SRAM, if it is not - move some tables to DDR */
 	list_for_each_entry(lcl_tbl, &ipa3_ctx->flt_tbl_nhash_lcl_list[ip], link) {
 		if (ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
-			ipa_fltrt_get_aligned_lcl_bdy_size(alloc_params.total_sz_lcl_nhash_tbls)) ||
+			ipa_fltrt_get_aligned_lcl_bdy_size(alloc_params.num_lcl_nhash_tbls,
+				alloc_params.total_sz_lcl_nhash_tbls)) ||
 			alloc_params.num_lcl_nhash_tbls == 0)
 			break;
 
 		IPADBG("SRAM partition is too small, move one non-hash table in DDR. "
-		       "IP:%d alloc_params.total_sz_lcl_nhash_tbls = %u\n",
-		       ip, alloc_params.total_sz_lcl_nhash_tbls);
+			"IP:%d alloc_params.total_sz_lcl_nhash_tbls = %u\n",
+			ip, alloc_params.total_sz_lcl_nhash_tbls);
 
 		/* Move lowest priority Eth client to DDR */
 		lcl_tbl->tbl->force_sys[IPA_RULE_NON_HASHABLE] = true;
@@ -2239,10 +2241,12 @@ int ipa_flt_sram_set_client_prio_high(enum ipa_client_type client)
 		struct ipa3_flt_tbl_nhash_lcl *lcl_tbl, *tmp;
 		struct ipa3_flt_tbl *flt_tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
 		/* Position filtering table last in the list so, it will have first SRAM priority */
-		list_for_each_entry_safe(lcl_tbl, tmp, &ipa3_ctx->flt_tbl_nhash_lcl_list[ip], link) {
+		list_for_each_entry_safe(
+			lcl_tbl, tmp, &ipa3_ctx->flt_tbl_nhash_lcl_list[ip], link) {
 			if (lcl_tbl->tbl == flt_tbl) {
 				list_del(&lcl_tbl->link);
-				list_add_tail(&lcl_tbl->link, &ipa3_ctx->flt_tbl_nhash_lcl_list[ip]);
+				list_add_tail(&lcl_tbl->link,
+					&ipa3_ctx->flt_tbl_nhash_lcl_list[ip]);
 				break;
 			}
 		}

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c

@@ -1270,6 +1270,7 @@ int ipa3_reset_hdr(bool user_only)
 
 			if (ipa3_id_find(entry->id) == NULL) {
 				mutex_unlock(&ipa3_ctx->lock);
+				IPAERR_RL("Invalid header ID\n");
 				WARN_ON_RATELIMIT_IPA(1);
 				return -EFAULT;
 			}
@@ -1280,6 +1281,7 @@ int ipa3_reset_hdr(bool user_only)
 						entry->phys_base,
 						entry->hdr_len,
 						DMA_TO_DEVICE);
+					entry->proc_ctx->hdr = NULL;
 					entry->proc_ctx = NULL;
 				} else {
 					/* move the offset entry to free list */
@@ -1338,6 +1340,7 @@ int ipa3_reset_hdr(bool user_only)
 
 		if (ipa3_id_find(ctx_entry->id) == NULL) {
 			mutex_unlock(&ipa3_ctx->lock);
+			IPAERR_RL("Invalid proc header ID\n");
 			WARN_ON_RATELIMIT_IPA(1);
 			return -EFAULT;
 		}

+ 31 - 12
drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c

@@ -1863,15 +1863,18 @@ int ipa_drop_stats_init(void)
 
 		}
 	} else {
-		mask = ipa_hw_stats_get_ep_bit_n_idx(
-			IPA_CLIENT_USB_DPL_CONS,
-			&reg_idx);
-		pipe_bitmask[reg_idx] |= mask;
+		/* ADPL pipe hw stats is now taken care by IPA Q6 */
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0) {
+			mask = ipa_hw_stats_get_ep_bit_n_idx(
+				IPA_CLIENT_USB_DPL_CONS,
+				&reg_idx);
+			pipe_bitmask[reg_idx] |= mask;
 
-		mask = ipa_hw_stats_get_ep_bit_n_idx(
-			IPA_CLIENT_ODL_DPL_CONS,
-			&reg_idx);
-		pipe_bitmask[reg_idx] |= mask;
+			mask = ipa_hw_stats_get_ep_bit_n_idx(
+				IPA_CLIENT_ODL_DPL_CONS,
+				&reg_idx);
+			pipe_bitmask[reg_idx] |= mask;
+		}
 	}
 
 	/* Currently we have option to enable drop stats using debugfs.
@@ -2693,6 +2696,7 @@ static ssize_t ipa_debugfs_enable_disable_drop_stats(struct file *file,
 	int i, j;
 	bool is_pipe = false;
 	ssize_t ret;
+	int pipe_num_temp;
 
 	if (ipa3_ctx->hw_stats && ipa3_ctx->hw_stats->enabled) {
 		for (i = 0; i < IPAHAL_IPA5_PIPE_REG_NUM; i++) {
@@ -2734,10 +2738,19 @@ static ssize_t ipa_debugfs_enable_disable_drop_stats(struct file *file,
 			pipe_ep_reg_bit = ipahal_get_ep_bit(pipe_num);
 			is_pipe = true;
 		}
+		pipe_num_temp = ipa3_get_client_by_pipe(pipe_num);
 		if (dbg_buff[i] == seprator) {
-			if (pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes
-				&& ipa3_get_client_by_pipe(pipe_num) <
-				IPA_CLIENT_MAX) {
+			/* Removing ADPL and ODL stats as Q6 supports it from IPA_5_0 */
+			if ((pipe_num_temp == IPA_CLIENT_USB_DPL_CONS ||
+				pipe_num_temp == IPA_CLIENT_ODL_DPL_CONS) &&
+				ipa3_ctx->ipa_hw_type >= IPA_HW_v5_0) {
+				pipe_num = 0;
+				is_pipe = false;
+				continue;
+			}
+
+			else if (pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes
+				&& pipe_num_temp < IPA_CLIENT_MAX) {
 				IPADBG("pipe number %u\n", pipe_num);
 				if (enable_pipe)
 					pipe_bitmask[pipe_ep_reg_idx] |=
@@ -2750,7 +2763,13 @@ static ssize_t ipa_debugfs_enable_disable_drop_stats(struct file *file,
 			is_pipe = false;
 		}
 	}
-	if (is_pipe && pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes &&
+	pipe_num_temp = ipa3_get_client_by_pipe(pipe_num);
+	/* Removing ADPL and ODL stats as Q6 supports it from IPA_5_0 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v5_0 &&
+		(pipe_num_temp == IPA_CLIENT_USB_DPL_CONS ||
+		pipe_num_temp == IPA_CLIENT_ODL_DPL_CONS)) {
+		IPAERR("Enable/Disable hw stats on DPL is not supported");
+	} else if (is_pipe && pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes &&
 		ipa3_get_client_by_pipe(pipe_num) < IPA_CLIENT_MAX) {
 		IPADBG("pipe number %u\n", pipe_num);
 		if (enable_pipe)

+ 51 - 22
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -127,6 +127,8 @@ enum {
 #define IPA_PAGE_POLL_DEFAULT_THRESHOLD 15
 #define IPA_PAGE_POLL_THRESHOLD_MAX 30
 
+#define NTN3_CLIENTS_NUM 2
+
 
 #define IPA_WDI2_OVER_GSI() (ipa3_ctx->ipa_wdi2_over_gsi \
 		&& (ipa_get_wdi_version() == IPA_WDI_2))
@@ -568,11 +570,13 @@ enum ipa_icc_type {
  * @page: skb page
  * @dma_addr: DMA address of this Rx packet
  * @is_tmp_alloc: skb page from tmp_alloc or recycle_list
+ * @page_order: page order associated with the page.
  */
 struct ipa_rx_page_data {
 	struct page *page;
 	dma_addr_t dma_addr;
 	bool is_tmp_alloc;
+	u32 page_order;
 };
 
 struct ipa3_active_client_htable_entry {
@@ -1545,8 +1549,14 @@ struct ipa3_stats {
 	u32 flow_disable;
 	u32 tx_non_linear;
 	u32 rx_page_drop_cnt;
+	u64 lower_order;
+	u32 pipe_setup_fail_cnt;
 	struct ipa3_page_recycle_stats page_recycle_stats[3];
 	u64 page_recycle_cnt[3][IPA_PAGE_POLL_THRESHOLD_MAX];
+	atomic_t num_buff_above_thresh_for_def_pipe_notified;
+	atomic_t num_buff_above_thresh_for_coal_pipe_notified;
+	atomic_t num_buff_below_thresh_for_def_pipe_notified;
+	atomic_t num_buff_below_thresh_for_coal_pipe_notified;
 };
 
 /* offset for each stats */
@@ -1986,6 +1996,35 @@ struct ipa3_eth_error_stats {
 	u32 err;
 };
 
+struct ipa_ntn3_stats_rx {
+	int rp;
+	int wp;
+	bool pending_db_after_rollback;
+	u32 msi_db_idx;
+	u32 chain_cnt;
+	u32 err_cnt;
+	u32 tres_handled;
+	u32 rollbacks_cnt;
+	u32 msi_db_cnt;
+};
+
+struct ipa_ntn3_stats_tx {
+	int rp;
+	int wp;
+	bool pending_db_after_rollback;
+	u32 msi_db_idx;
+	u32 derr_cnt;
+	u32 oob_cnt;
+	u32 tres_handled;
+	u32 rollbacks_cnt;
+	u32 msi_db_cnt;
+};
+
+struct ipa_ntn3_client_stats {
+	struct ipa_ntn3_stats_rx rx_stats;
+	struct ipa_ntn3_stats_tx tx_stats;
+};
+
 
 /**
  * struct ipa3_context - IPA context
@@ -2104,13 +2143,6 @@ struct ipa3_eth_error_stats {
  * @uc_fw_file_name: uC IPA fw file name
  * @eth_info: ethernet client mapping
  * @max_num_smmu_cb: number of smmu s1 cb supported
- * @u64 gsi_msi_addr: MSI SPI set address APSS_GICA_SETSPI_NSR
- * @u64 gsi_msi_clear_addr: MSI SPI clear address APSS_GICA_CLRSPI_NSR
- * @u64 gsi_msi_ioremapped_addr: iore mapped address for debugging purpose
- * @u32 gsi_rmnet_ctl_evt_ring_irq: IRQ number for rmnet_ctl pipe
- * @u32 gsi_rmnet_ll_evt_ring_irq; IRQ number for rmnet_ll pipe
- * @u32 gsi_rmnet_ctl_evt_ring_intvec: HW IRQ number for rmnet_ctl pipe
- * @u32 gsi_rmnet_ll_evt_ring_intvec; HW IRQ number for rmnet_ll pipe
  * @non_hash_flt_lcl_sys_switch: number of times non-hash flt table moved
  */
 struct ipa3_context {
@@ -2332,19 +2364,20 @@ struct ipa3_context {
 	bool use_pm_wrapper;
 	u8 page_poll_threshold;
 	bool wan_common_page_pool;
-	u64 gsi_msi_addr;
-	u64 gsi_msi_clear_addr;
-	u64 gsi_msi_addr_io_mapped;
-	u64 gsi_msi_clear_addr_io_mapped;
-	u32 gsi_rmnet_ctl_evt_ring_intvec;
-	u32 gsi_rmnet_ctl_evt_ring_irq;
-	u32 gsi_rmnet_ll_evt_ring_intvec;
-	u32 gsi_rmnet_ll_evt_ring_irq;
 	bool use_tput_est_ep;
 	struct ipa_ioc_eogre_info eogre_cache;
 	bool eogre_enabled;
 	bool is_device_crashed;
 	bool ulso_wa;
+	u64 gsi_msi_addr;
+	spinlock_t notifier_lock;
+	struct raw_notifier_head *ipa_rmnet_notifier_list_internal;
+	struct notifier_block ipa_rmnet_notifier;
+	bool ipa_rmnet_notifier_enabled;
+	bool buff_above_thresh_for_def_pipe_notified;
+	bool buff_above_thresh_for_coal_pipe_notified;
+	bool buff_below_thresh_for_def_pipe_notified;
+	bool buff_below_thresh_for_coal_pipe_notified;
 };
 
 struct ipa3_plat_drv_res {
@@ -2423,12 +2456,6 @@ struct ipa3_plat_drv_res {
 	u16 ulso_ip_id_min;
 	u16 ulso_ip_id_max;
 	bool use_pm_wrapper;
-	u64 gsi_msi_addr;
-	u64 gsi_msi_clear_addr;
-	u32 gsi_rmnet_ctl_evt_ring_intvec;
-	u32 gsi_rmnet_ctl_evt_ring_irq;
-	u32 gsi_rmnet_ll_evt_ring_intvec;
-	u32 gsi_rmnet_ll_evt_ring_irq;
 	bool use_tput_est_ep;
 	bool ulso_wa;
 };
@@ -3377,6 +3404,8 @@ int ipa3_register_rmnet_ll_cb(
 	void *user_data3);
 int ipa3_unregister_rmnet_ll_cb(void);
 int ipa3_rmnet_ll_xmit(struct sk_buff *skb);
+int ipa3_register_notifier(void *fn_ptr);
+int ipa3_unregister_notifier(void *fn_ptr);
 int ipa3_setup_apps_low_lat_data_prod_pipe(
 	struct rmnet_egress_param *egress_param,
 	struct net_device *dev);
@@ -3416,7 +3445,6 @@ irq_handler_t ipa3_get_isr(void);
 void ipa_pc_qmp_enable(void);
 u32 ipa3_get_r_rev_version(void);
 void ipa3_notify_clients_registered(void);
-void ipa_gsi_map_unmap_gsi_msi_addr(bool map);
 #if defined(CONFIG_IPA3_REGDUMP)
 int ipa_reg_save_init(u32 value);
 void ipa_save_registers(void);
@@ -3445,6 +3473,7 @@ int ipa3_eth_disconnect(
 int ipa3_eth_client_conn_evt(struct ipa_ecm_msg *msg);
 int ipa3_eth_client_disconn_evt(struct ipa_ecm_msg *msg);
 #endif
+void ipa_eth_ntn3_get_status(struct ipa_ntn3_client_stats *s, unsigned inst_id);
 void ipa3_eth_get_status(u32 client, int scratch_id,
 	struct ipa3_eth_error_stats *stats);
 int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,

+ 6 - 5
drivers/platform/msm/ipa/ipa_v3/ipa_odl.c

@@ -250,6 +250,7 @@ int ipa3_send_adpl_msg(unsigned long skb_data)
 	list_add_tail(&msg->link, &ipa3_odl_ctx->adpl_msg_list);
 	atomic_inc(&ipa3_odl_ctx->stats.numer_in_queue);
 	mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock);
+	wake_up(&ipa3_odl_ctx->adpl_msg_waitq);
 	IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_rx_pkt);
 
 	return 0;
@@ -537,7 +538,9 @@ static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count,
 	int ret =  0;
 	char __user *start = buf;
 	struct ipa3_push_msg_odl *msg;
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
+	add_wait_queue(&ipa3_odl_ctx->adpl_msg_waitq, &wait);
 	while (1) {
 		IPADBG_LOW("Writing message to adpl pipe\n");
 		if (!ipa3_odl_ctx->odl_state.odl_open)
@@ -582,9 +585,6 @@ static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count,
 			IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_tx_diag_pkt);
 			kfree(msg);
 			msg = NULL;
-		} else {
-			ret = -EAGAIN;
-			break;
 		}
 
 		ret = -EAGAIN;
@@ -597,9 +597,9 @@ static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count,
 
 		if (start != buf)
 			break;
-
+		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 	}
-
+	remove_wait_queue(&ipa3_odl_ctx->adpl_msg_waitq, &wait);
 	if (start != buf && ret != -EFAULT)
 		ret = buf - start;
 
@@ -675,6 +675,7 @@ int ipa_odl_init(void)
 
 	odl_cdev = ipa3_odl_ctx->odl_cdev;
 	INIT_LIST_HEAD(&ipa3_odl_ctx->adpl_msg_list);
+	init_waitqueue_head(&ipa3_odl_ctx->adpl_msg_waitq);
 	mutex_init(&ipa3_odl_ctx->adpl_msg_lock);
 	mutex_init(&ipa3_odl_ctx->pipe_lock);
 

+ 1 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_odl.h

@@ -58,6 +58,7 @@ struct ipa_odl_context {
 	bool odl_ctl_msg_wq_flag;
 	struct ipa3_odlstats stats;
 	u32 odl_pm_hdl;
+	wait_queue_head_t adpl_msg_waitq;
 };
 
 struct ipa3_push_msg_odl {

+ 24 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_pm.c

@@ -1488,11 +1488,26 @@ int ipa_pm_get_current_clk_vote(void)
 		return ipa3_ctx->app_clock_vote.cnt;
 }
 
+
+static int ipa_get_pm_hdl_from_name(char *client_name)
+{
+	int i;
+	struct pm_client_name_lookup *lookup;
+
+	for (i = 0; i < NUM_PM_CLIENT_NAMES; i++) {
+		lookup = &client_lookup_table[i];
+		if (!strcmp(lookup->name, client_name))
+			return lookup->idx_hdl;
+	}
+	return NUM_PM_CLIENT_NAMES + 2;
+}
+
 bool ipa_get_pm_client_stats_filled(struct pm_client_stats *pm_stats_ptr,
 	int pm_client_index)
 {
 	struct ipa_pm_client *client;
 	unsigned long flags;
+	int i;
 
 	client = ipa_pm_ctx->clients[pm_client_index];
 	mutex_lock(&ipa_pm_ctx->client_mutex);
@@ -1503,13 +1518,21 @@ bool ipa_get_pm_client_stats_filled(struct pm_client_stats *pm_stats_ptr,
 	spin_lock_irqsave(&client->state_lock, flags);
 	pm_stats_ptr->pm_client_group = client->group;
 	pm_stats_ptr->pm_client_state = client->state;
-	pm_stats_ptr->pm_client_hdl = pm_client_index;
+	pm_stats_ptr->pm_client_hdl = ipa_get_pm_hdl_from_name(client->name);
 	if (client->group == IPA_PM_GROUP_DEFAULT)
 		pm_stats_ptr->pm_client_bw = client->throughput;
 	else {
 		pm_stats_ptr->pm_client_bw = ipa_pm_ctx->group_tput[client->group];
 	}
 
+	pm_stats_ptr->pm_client_type = IPA_CLIENT_MAX;
+	for (i = 0; i < ipa3_get_max_num_pipes(); i++) {
+		if (ipa_pm_ctx->clients_by_pipe[i] == client) {
+			pm_stats_ptr->pm_client_type = ipa3_get_client_by_pipe(i);
+			break;
+		}
+	}
+
 	spin_unlock_irqrestore(&client->state_lock, flags);
 	mutex_unlock(&ipa_pm_ctx->client_mutex);
 	return true;

+ 6 - 3
drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c

@@ -566,8 +566,11 @@ static int ipa3_qmi_send_req_wait(struct qmi_handle *client_handle,
 		req_desc->ei_array,
 		req);
 
-	if (unlikely(!ipa_q6_clnt))
+	if (unlikely(!ipa_q6_clnt)) {
+		mutex_unlock(&ipa3_qmi_lock);
 		return -EINVAL;
+	}
+
 	mutex_unlock(&ipa3_qmi_lock);
 
 	if (ret < 0) {
@@ -2107,18 +2110,18 @@ void ipa3_qmi_service_exit(void)
 		ipa3_svc_handle = NULL;
 	}
 
-	/* qmi-client */
-
 	/* Release client handle */
 	mutex_lock(&ipa3_qmi_lock);
 	if (ipa_q6_clnt != NULL) {
 		qmi_handle_release(ipa_q6_clnt);
 		vfree(ipa_q6_clnt);
 		ipa_q6_clnt = NULL;
+		mutex_unlock(&ipa3_qmi_lock);
 		if (ipa_clnt_req_workqueue) {
 			destroy_workqueue(ipa_clnt_req_workqueue);
 			ipa_clnt_req_workqueue = NULL;
 		}
+		mutex_lock(&ipa3_qmi_lock);
 	}
 
 	/* clean the QMI msg cache */

+ 153 - 155
drivers/platform/msm/ipa/ipa_v3/ipa_stats.c

@@ -120,112 +120,50 @@ static int ipa_stats_ioctl_open(struct inode *inode, struct file *filp)
 	return 0;
 }
 
-static bool ipa_stats_struct_mismatch(enum ipa_lnx_stats_ioc_cmd_type type)
+static bool ipa_stats_struct_mismatch()
 {
-	switch (type) {
-	case IPA_LNX_CMD_GET_ALLOC_INFO:
-		if (IPA_LNX_EACH_INST_ALLOC_INFO_STRUCT_LEN_INT !=
-				IPA_LNX_EACH_INST_ALLOC_INFO_STRUCT_LEN ||
-			IPA_LNX_STATS_ALL_INFO_STRUCT_LEN_INT !=
-				IPA_LNX_STATS_ALL_INFO_STRUCT_LEN ||
-			IPA_LNX_STATS_SPEARHEAD_CTX_STRUCT_LEN_INT !=
-				IPA_LNX_STATS_SPEARHEAD_CTX_STRUCT_LEN) {
-				IPA_STATS_ERR("IPA_LNX_CMD_GET_ALLOC_INFO size mismatch");
-				return true;
-		} else return false;
-	case IPA_LNX_CMD_GENERIC_STATS:
-		if (IPA_LNX_PG_RECYCLE_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_PG_RECYCLE_STATS_STRUCT_LEN ||
-			IPA_LNX_EXCEPTION_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_EXCEPTION_STATS_STRUCT_LEN ||
-			IPA_LNX_ODL_EP_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_ODL_EP_STATS_STRUCT_LEN ||
-			IPA_LNX_HOLB_DISCARD_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_HOLB_DISCARD_STATS_STRUCT_LEN ||
-			IPA_LNX_HOLB_MONITOR_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_HOLB_MONITOR_STATS_STRUCT_LEN ||
-			IPA_LNX_HOLB_DROP_AND_MON_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_HOLB_DROP_AND_MON_STATS_STRUCT_LEN ||
-			IPA_LNX_GENERIC_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GENERIC_STATS_STRUCT_LEN) {
-				IPA_STATS_ERR("IPA_LNX_CMD_GENERIC_STATS size mismatch");
-				return true;
-		} else return false;
-	case IPA_LNX_CMD_CLOCK_STATS:
-		if (IPA_LNX_PM_CLIENT_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_PM_CLIENT_STATS_STRUCT_LEN ||
-			IPA_LNX_CLOCK_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_CLOCK_STATS_STRUCT_LEN) {
-				IPA_STATS_ERR("IPA_LNX_CMD_CLOCK_STATS size mismatch");
-				return true;
-		} else return false;
-	case IPA_LNX_CMD_WLAN_INST_STATS:
-		if (IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN ||
-			IPA_LNX_WLAN_INSTANCE_INFO_STRUCT_LEN_INT !=
-				IPA_LNX_WLAN_INSTANCE_INFO_STRUCT_LEN ||
-			IPA_LNX_WLAN_INST_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_WLAN_INST_STATS_STRUCT_LEN) {
-				IPA_STATS_ERR("IPA_LNX_CMD_WLAN_INST_STATS size mismatch");
-				return true;
-		} else return false;
-	case IPA_LNX_CMD_ETH_INST_STATS:
-		if (IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN ||
-			IPA_LNX_ETH_INSTANCE_INFO_STRUCT_LEN_INT !=
-				IPA_LNX_ETH_INSTANCE_INFO_STRUCT_LEN ||
-			IPA_LNX_ETH_INST_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_ETH_INST_STATS_STRUCT_LEN) {
-				IPA_STATS_ERR("IPA_LNX_CMD_ETH_INST_STATS size mismatch");
-				return true;
-		} else return false;
-	case IPA_LNX_CMD_USB_INST_STATS:
-		if (IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN ||
-			IPA_LNX_USB_INSTANCE_INFO_STRUCT_LEN_INT !=
-				IPA_LNX_USB_INSTANCE_INFO_STRUCT_LEN ||
-			IPA_LNX_USB_INST_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_USB_INST_STATS_STRUCT_LEN) {
-				IPA_STATS_ERR("IPA_LNX_CMD_USB_INST_STATS ize mismatch");
-				return true;
-		} else return false;
-	case IPA_LNX_CMD_MHIP_INST_STATS:
-		if (IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN ||
-			IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN ||
-			IPA_LNX_MHIP_INSTANCE_INFO_STRUCT_LEN_INT !=
-				IPA_LNX_MHIP_INSTANCE_INFO_STRUCT_LEN ||
-			IPA_LNX_MHIP_INST_STATS_STRUCT_LEN_INT !=
-				IPA_LNX_MHIP_INST_STATS_STRUCT_LEN) {
-				IPA_STATS_ERR("IPA_LNX_CMD_MHIP_INST_STATS size mismatch");
-				return true;
-		} else return false;
-	default:
-		return true;
-	}
+	if (IPA_LNX_EACH_INST_ALLOC_INFO_STRUCT_LEN_INT != IPA_LNX_EACH_INST_ALLOC_INFO_STRUCT_LEN ||
+		IPA_LNX_STATS_ALL_INFO_STRUCT_LEN_INT != IPA_LNX_STATS_ALL_INFO_STRUCT_LEN ||
+		IPA_LNX_STATS_SPEARHEAD_CTX_STRUCT_LEN_INT != IPA_LNX_STATS_SPEARHEAD_CTX_STRUCT_LEN) {
+			IPA_STATS_ERR("IPA_LNX_CMD_GET_ALLOC_INFO structure size mismatch\n");
+			return true;
+	} else if (IPA_LNX_CONSOLIDATED_STATS_STRUCT_LEN_INT != IPA_LNX_CONSOLIDATED_STATS_STRUCT_LEN) {
+			IPA_STATS_ERR("IPA_LNX_CMD_GET_CONSOLIDATED_STATS structure size mismatch\n");
+			return true;
+	} else if (IPA_LNX_PG_RECYCLE_STATS_STRUCT_LEN_INT != IPA_LNX_PG_RECYCLE_STATS_STRUCT_LEN ||
+		IPA_LNX_EXCEPTION_STATS_STRUCT_LEN_INT != IPA_LNX_EXCEPTION_STATS_STRUCT_LEN ||
+		IPA_LNX_ODL_EP_STATS_STRUCT_LEN_INT != IPA_LNX_ODL_EP_STATS_STRUCT_LEN ||
+		IPA_LNX_HOLB_DISCARD_STATS_STRUCT_LEN_INT != IPA_LNX_HOLB_DISCARD_STATS_STRUCT_LEN ||
+		IPA_LNX_HOLB_MONITOR_STATS_STRUCT_LEN_INT != IPA_LNX_HOLB_MONITOR_STATS_STRUCT_LEN ||
+		IPA_LNX_HOLB_DROP_AND_MON_STATS_STRUCT_LEN_INT != IPA_LNX_HOLB_DROP_AND_MON_STATS_STRUCT_LEN ||
+		IPA_LNX_GENERIC_STATS_STRUCT_LEN_INT != IPA_LNX_GENERIC_STATS_STRUCT_LEN) {
+			IPA_STATS_ERR("IPA_LNX_CMD_GENERIC_STATS structure size mismatch\n");
+			return true;
+	} else if (IPA_LNX_PM_CLIENT_STATS_STRUCT_LEN_INT != IPA_LNX_PM_CLIENT_STATS_STRUCT_LEN ||
+		IPA_LNX_CLOCK_STATS_STRUCT_LEN_INT != IPA_LNX_CLOCK_STATS_STRUCT_LEN) {
+			IPA_STATS_ERR("IPA_LNX_CMD_CLOCK_STATS structure size mismatch\n");
+			return true;
+	} else if (IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN_INT != IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN ||
+		IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN_INT != IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN ||
+		IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN_INT != IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN ||
+		IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN_INT != IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN ||
+		IPA_LNX_WLAN_INSTANCE_INFO_STRUCT_LEN_INT != IPA_LNX_WLAN_INSTANCE_INFO_STRUCT_LEN ||
+		IPA_LNX_WLAN_INST_STATS_STRUCT_LEN_INT != IPA_LNX_WLAN_INST_STATS_STRUCT_LEN) {
+			IPA_STATS_ERR("IPA_LNX_CMD_WLAN_INST_STATS structure size mismatch\n");
+			return true;
+	} else if (IPA_LNX_ETH_INSTANCE_INFO_STRUCT_LEN_INT != IPA_LNX_ETH_INSTANCE_INFO_STRUCT_LEN ||
+		IPA_LNX_ETH_INST_STATS_STRUCT_LEN_INT != IPA_LNX_ETH_INST_STATS_STRUCT_LEN) {
+			IPA_STATS_ERR("IPA_LNX_CMD_ETH_INST_STATS structure size mismatch\n");
+			return true;
+	} else if (IPA_LNX_USB_INSTANCE_INFO_STRUCT_LEN_INT != IPA_LNX_USB_INSTANCE_INFO_STRUCT_LEN ||
+		IPA_LNX_USB_INST_STATS_STRUCT_LEN_INT != IPA_LNX_USB_INST_STATS_STRUCT_LEN) {
+			IPA_STATS_ERR("IPA_LNX_CMD_USB_INST_STATS structure size mismatch\n");
+			return true;
+	} else if (IPA_LNX_MHIP_INSTANCE_INFO_STRUCT_LEN_INT != IPA_LNX_MHIP_INSTANCE_INFO_STRUCT_LEN ||
+		IPA_LNX_MHIP_INST_STATS_STRUCT_LEN_INT != IPA_LNX_MHIP_INST_STATS_STRUCT_LEN) {
+			IPA_STATS_ERR("IPA_LNX_CMD_MHIP_INST_STATS structure size mismatch\n");
+			return true;
+	} else return false;
 }
 
 static int ipa_get_generic_stats(unsigned long arg)
@@ -416,7 +354,6 @@ static int ipa_get_clock_stats(unsigned long arg)
 	pm_stats_ptr = &clock_stats->pm_clnt_stats[0];
 	for (i = 1; i < ipa_lnx_agent_ctx.alloc_info.num_pm_clients; i++) {
 		if (ipa_get_pm_client_stats_filled(pm_stats_ptr, i)) {
-			pm_stats_ptr->pm_client_type = ipa3_get_client_by_pipe(i);
 			clock_stats->active_clients++;
 			pm_stats_ptr = (struct pm_client_stats *)((uint64_t)pm_stats_ptr +
 				sizeof(struct pm_client_stats));
@@ -938,9 +875,16 @@ static int ipa_get_eth_inst_stats(unsigned long arg)
 			if (instance_ptr->eth_mode == IPA_ETH_CLIENT_AQC107 ||
 				instance_ptr->eth_mode == IPA_ETH_CLIENT_AQC113 ||
 				instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN ||
+#if IPA_ETH_API_VER >= 2
+				instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3 ||
+#endif
 				instance_ptr->eth_mode == IPA_ETH_CLIENT_EMAC) {
 
-				if(instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN) {
+				if(instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN
+#if IPA_ETH_API_VER >= 2
+					|| instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3
+#endif
+					) {
 					if(ipa3_get_ntn_gsi_stats(&stats)) {
 						instance_ptr = (struct eth_instance_info *)((
 							uint64_t)instance_ptr +
@@ -969,11 +913,18 @@ static int ipa_get_eth_inst_stats(unsigned long arg)
 				}
 
 				if (instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN ||
+#if IPA_ETH_API_VER >= 2
+					instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3 ||
+#endif
 					instance_ptr->eth_mode == IPA_ETH_CLIENT_EMAC)
 					tx_instance_ptr_local->tx_client =
 						IPA_CLIENT_ETHERNET_CONS;
 				else tx_instance_ptr_local->tx_client =
 						IPA_CLIENT_AQC_ETHERNET_CONS;
+#if IPA_ETH_API_VER >= 2
+				if ((instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3) && (i == 1))
+					tx_instance_ptr_local->tx_client = IPA_CLIENT_ETHERNET2_CONS;
+#endif
 				client_type = tx_instance_ptr_local->tx_client;
 				instance_ptr->pm_bandwidth =
 					ipa_pm_get_pm_clnt_throughput(client_type);
@@ -1053,14 +1004,24 @@ static int ipa_get_eth_inst_stats(unsigned long arg)
 			if ((instance_ptr->eth_mode == IPA_ETH_CLIENT_AQC107 ||
 				instance_ptr->eth_mode == IPA_ETH_CLIENT_AQC113 ||
 				instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN ||
+#if IPA_ETH_API_VER >= 2
+				instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3 ||
+#endif
 				instance_ptr->eth_mode == IPA_ETH_CLIENT_EMAC)) {
 
 				if (instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN ||
+#if IPA_ETH_API_VER >= 2
+					instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3 ||
+#endif
 					instance_ptr->eth_mode == IPA_ETH_CLIENT_EMAC)
 					rx_instance_ptr_local->rx_client =
 					IPA_CLIENT_ETHERNET_PROD;
 				else rx_instance_ptr_local->rx_client =
 						IPA_CLIENT_AQC_ETHERNET_PROD;
+#if IPA_ETH_API_VER >= 2
+				if ((instance_ptr->eth_mode == IPA_ETH_CLIENT_NTN3) && (i == 1))
+					rx_instance_ptr_local->rx_client = IPA_CLIENT_ETHERNET2_PROD;
+#endif
 				client_type = rx_instance_ptr_local->rx_client;
 				rx_instance_ptr_local->num_rx_ring_100_perc_with_pack =
 					stats.u.ring[0].ringFull;
@@ -1603,19 +1564,25 @@ static int ipa_stats_get_alloc_info(unsigned long arg)
 					ipa_client_type =
 						ipa_eth_get_ipa_client_type_from_eth_type(
 							j, IPA_ETH_PIPE_DIR_TX);
-					if (ipa_client_type >= IPA_CLIENT_MAX) {
+					if (ipa_client_type >= IPA_CLIENT_MAX)
 						IPA_STATS_ERR("Eth tx client type not found");
-						ipa_assert();
-					}
+#if IPA_ETH_API_VER >= 2
+					/* Overwrite client type if it is NTN3 and 2nd instance */
+					if ((j == IPA_ETH_CLIENT_NTN3) && (i == 1))
+						ipa_client_type = IPA_CLIENT_ETHERNET2_CONS;
+#endif
 					ipa_lnx_agent_ctx.alloc_info.eth_inst_info[
 						i].pipes_client_type[k*2] = ipa_client_type;
 					ipa_client_type =
 						ipa_eth_get_ipa_client_type_from_eth_type(
 							j, IPA_ETH_PIPE_DIR_RX);
-					if (ipa_client_type >= IPA_CLIENT_MAX) {
+					if (ipa_client_type >= IPA_CLIENT_MAX)
 						IPA_STATS_ERR("Eth rx client type not found");
-						ipa_assert();
-					}
+#if IPA_ETH_API_VER >= 2
+					/* Overwrite client type if it is NTN3 and 2nd instance */
+					if ((j == IPA_ETH_CLIENT_NTN3) && (i == 1))
+						ipa_client_type = IPA_CLIENT_ETHERNET2_PROD;
+#endif
 					ipa_lnx_agent_ctx.alloc_info.eth_inst_info[
 						i].pipes_client_type[(k*2) + 1] = ipa_client_type;
 					ipa_lnx_agent_ctx.alloc_info.num_eth_instances++;
@@ -1682,6 +1649,10 @@ static int ipa_stats_get_alloc_info(unsigned long arg)
 	/* For MHIP instance */
 	if (ipa_lnx_agent_ctx.log_type_mask & SPRHD_IPA_LOG_TYPE_MHIP_STATS) {
 #if IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER)
+		if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0;
+			goto success;
+		}
 		if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS))
 			ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS;
 		else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET))
@@ -1713,6 +1684,7 @@ static int ipa_stats_get_alloc_info(unsigned long arg)
 #endif
 	}
 
+success:
 	if(copy_to_user((u8 *)arg,
 		&ipa_lnx_agent_ctx,
 		sizeof(struct ipa_lnx_stats_spearhead_ctx))) {
@@ -1726,8 +1698,8 @@ static long ipa_lnx_stats_ioctl(struct file *filp,
 	unsigned int cmd,
 	unsigned long arg)
 {
-	int retval = 0;
-	u8 *param = NULL;
+	int retval = IPA_LNX_STATS_SUCCESS;
+	struct ipa_lnx_consolidated_stats *consolidated_stats;
 
 	if (_IOC_TYPE(cmd) != IPA_LNX_STATS_IOC_MAGIC) {
 		IPA_STATS_ERR("IOC type mismatch %d\n", cmd);
@@ -1741,79 +1713,100 @@ static long ipa_lnx_stats_ioctl(struct file *filp,
 
 	switch (cmd) {
 	case IPA_LNX_IOC_GET_ALLOC_INFO:
-		if (!ipa_stats_struct_mismatch(IPA_LNX_CMD_GET_ALLOC_INFO)) {
-			retval = ipa_stats_get_alloc_info(arg);
-			if (retval) {
-				IPA_STATS_ERR("ipa get alloc info fail");
-				break;
-			}
-		}
-		else retval = -EPERM;
+		retval = ipa_stats_get_alloc_info(arg);
+		if (retval)
+			IPA_STATS_ERR("ipa get alloc info fail");
 		break;
 	case IPA_LNX_IOC_GET_GENERIC_STATS:
-		if (!ipa_stats_struct_mismatch(IPA_LNX_CMD_GENERIC_STATS)) {
-			retval = ipa_get_generic_stats(arg);
+		retval = ipa_get_generic_stats(arg);
+		if (retval)
+			IPA_STATS_ERR("ipa get generic stats fail");
+		break;
+	case IPA_LNX_IOC_GET_CLOCK_STATS:
+		retval = ipa_get_clock_stats(arg);
+		if (retval)
+			IPA_STATS_ERR("ipa get clock stats fail");
+		break;
+	case IPA_LNX_IOC_GET_WLAN_INST_STATS:
+		retval = ipa_get_wlan_inst_stats(arg);
+		if (retval)
+			IPA_STATS_ERR("ipa get wlan inst stats fail");
+		break;
+	case IPA_LNX_IOC_GET_ETH_INST_STATS:
+		retval = ipa_get_eth_inst_stats(arg);
+		if (retval)
+			IPA_STATS_ERR("ipa get eth inst stats fail");
+		break;
+	case IPA_LNX_IOC_GET_USB_INST_STATS:
+		retval = ipa_get_usb_inst_stats(arg);
+		if (retval)
+			IPA_STATS_ERR("ipa get usb inst stats fail");
+		break;
+	case IPA_LNX_IOC_GET_MHIP_INST_STATS:
+#if IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER)
+		retval = ipa_get_mhip_inst_stats(arg);
+		if (retval)
+			IPA_STATS_ERR("ipa get mhip inst stats fail");
+#else
+		retval = IPA_LNX_STATS_SUCCESS;
+#endif
+		break;
+	case IPA_LNX_IOC_GET_CONSOLIDATED_STATS:
+		consolidated_stats = (struct ipa_lnx_consolidated_stats *) memdup_user((
+				const void __user *)arg, sizeof(struct ipa_lnx_consolidated_stats));
+		if (IS_ERR(consolidated_stats)) {
+			IPA_STATS_ERR("copy from user failed");
+			return -ENOMEM;
+		}
+
+		if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_GENERIC_STATS) {
+			retval = ipa_get_generic_stats((unsigned long) consolidated_stats->generic_stats);
 			if (retval) {
 				IPA_STATS_ERR("ipa get generic stats fail");
 				break;
 			}
 		}
-		else retval = -EPERM;
-		break;
-	case IPA_LNX_IOC_GET_CLOCK_STATS:
-		if (!ipa_stats_struct_mismatch(IPA_LNX_CMD_CLOCK_STATS)) {
-			retval = ipa_get_clock_stats(arg);
+		if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_CLOCK_STATS) {
+			retval = ipa_get_clock_stats((unsigned long) consolidated_stats->clock_stats);
 			if (retval) {
 				IPA_STATS_ERR("ipa get clock stats fail");
 				break;
 			}
-		} else retval = -EPERM;
-		break;
-	case IPA_LNX_IOC_GET_WLAN_INST_STATS:
-		if (!ipa_stats_struct_mismatch(IPA_LNX_CMD_WLAN_INST_STATS)) {
-			retval = ipa_get_wlan_inst_stats(arg);
+		}
+		if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_WLAN_STATS) {
+			retval = ipa_get_wlan_inst_stats((unsigned long) consolidated_stats->wlan_stats);
 			if (retval) {
 				IPA_STATS_ERR("ipa get wlan inst stats fail");
 				break;
 			}
-		} else retval = -EPERM;
-		break;
-	case IPA_LNX_IOC_GET_ETH_INST_STATS:
-		if (!ipa_stats_struct_mismatch(IPA_LNX_CMD_ETH_INST_STATS)) {
-			retval = ipa_get_eth_inst_stats(arg);
+		}
+		if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_ETH_STATS) {
+			retval = ipa_get_eth_inst_stats((unsigned long) consolidated_stats->eth_stats);
 			if (retval) {
 				IPA_STATS_ERR("ipa get eth inst stats fail");
 				break;
 			}
-		} else retval = -EPERM;
-		break;
-	case IPA_LNX_IOC_GET_USB_INST_STATS:
-		if (!ipa_stats_struct_mismatch(IPA_LNX_CMD_USB_INST_STATS)) {
-			retval = ipa_get_usb_inst_stats(arg);
+		}
+		if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_USB_STATS) {
+			retval = ipa_get_usb_inst_stats((unsigned long) consolidated_stats->usb_stats);
 			if (retval) {
 				IPA_STATS_ERR("ipa get usb inst stats fail");
 				break;
 			}
-		} else retval = -EPERM;
-		break;
-	case IPA_LNX_IOC_GET_MHIP_INST_STATS:
+		}
+		if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_MHIP_STATS) {
 #if IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER)
-		if (!ipa_stats_struct_mismatch(IPA_LNX_CMD_MHIP_INST_STATS)) {
-			retval = ipa_get_mhip_inst_stats(arg);
+			retval = ipa_get_mhip_inst_stats((unsigned long) consolidated_stats->mhip_stats);
 			if (retval) {
 				IPA_STATS_ERR("ipa get mhip inst stats fail");
 				break;
 			}
-		} else retval = -EPERM;
-#else
-		retval = IPA_LNX_STATS_SUCCESS;
 #endif
+		}
 		break;
 	default:
 		retval = -ENOTTY;
 	}
-	if (param)
-		vfree(param);
 	return retval;
 }
 
@@ -1877,6 +1870,11 @@ int ipa_spearhead_stats_init()
 {
 	int ret;
 
+	if (ipa_stats_struct_mismatch()) {
+		IPA_STATS_ERR("ipa stats structure mismatch\n");
+		return -1;
+	}
+
 	ret = ipa_spearhead_stats_ioctl_init();
 	if(ret) {
 		IPA_STATS_ERR("IPA_LNX_STATS_IOCTL init failure = %d\n", ret);

+ 38 - 344
drivers/platform/msm/ipa/ipa_v3/ipa_stats.h

@@ -41,6 +41,10 @@
 	IPA_LNX_CMD_MHIP_INST_STATS, \
 	struct ipa_lnx_mhip_inst_stats)
 
+#define IPA_LNX_IOC_GET_CONSOLIDATED_STATS _IOWR(IPA_LNX_STATS_IOC_MAGIC, \
+	IPA_LNX_CMD_CONSOLIDATED_STATS, \
+	struct ipa_lnx_consolidated_stats)
+
 #define IPA_LNX_STATS_SUCCESS 0
 #define IPA_LNX_STATS_FAILURE -1
 
@@ -62,6 +66,27 @@
 #define SPRHD_IPA_LOG_TYPE_USB_STATS       0x00010
 #define SPRHD_IPA_LOG_TYPE_MHIP_STATS      0x00020
 
+
+/**
+ * Look up table for pm stats client names.
+ * New entry to be added when new client
+ * registers with pm
+ */
+struct pm_client_name_lookup { char *name; int idx_hdl;};
+static struct pm_client_name_lookup client_lookup_table[] = {
+	{"ODL", 1},
+	{"IPA_CLIENT_APPS_LAN_CONS", 2},
+	{"EMB MODEM", 3},
+	{"TETH MODEM", 4},
+	{"rmnet_ipa%d", 5},
+	{"USB", 6},
+	{"USB DPL", 7},
+	{"MODEM (USB RMNET)", 8},
+	{"IPA_CLIENT_APPS_WAN_CONS", 9}
+};
+
+#define NUM_PM_CLIENT_NAMES (sizeof(client_lookup_table)/sizeof(struct pm_client_name_lookup))
+
 /**
  * Every structure is associated with the underlying macro
  * for it's length and that has to be updated every time there
@@ -313,6 +338,18 @@ struct ipa_lnx_mhip_inst_stats {
 };
 #define IPA_LNX_MHIP_INST_STATS_STRUCT_LEN_INT (8 + 248)
 
+
+struct ipa_lnx_consolidated_stats {
+	uint64_t log_type_mask;
+	struct ipa_lnx_generic_stats *generic_stats;
+	struct ipa_lnx_clock_stats *clock_stats;
+	struct ipa_lnx_wlan_inst_stats *wlan_stats;
+	struct ipa_lnx_eth_inst_stats *eth_stats;
+	struct ipa_lnx_usb_inst_stats *usb_stats;
+	struct ipa_lnx_mhip_inst_stats *mhip_stats;
+};
+#define IPA_LNX_CONSOLIDATED_STATS_STRUCT_LEN_INT (8 + 48)
+
 /* Explain below structures */
 struct ipa_lnx_each_inst_alloc_info {
 	uint32_t pipes_client_type[SPEARHEAD_NUM_MAX_PIPES];
@@ -353,356 +390,13 @@ struct ipa_lnx_stats_spearhead_ctx {
  */
 enum ipa_lnx_stats_ioc_cmd_type {
 	IPA_LNX_CMD_GET_ALLOC_INFO,
-	/**
-	 * IPA_LNX_CMD_GENERIC_STATS - Includes following fields (in bytes)
-	 *							(min - 296 bytes, max - 300 bytes)
-	 *
-	 * tx_dma_pkts(4)		- Packets sent to IPA with IP_PACKET_INIT command
-	 * tx_hw_pkts(4)		- Packets sent to IPA without PACKET_INIT.
-	 *							These packets go through IPA HW processing
-	 * tx_non_linear(4)		- Non linear TX packets
-	 * tx_pkts_compl(4)		- No of TX packets processed by IPA
-	 * stats_compl(4)		- No of TX commands and LAN packets processed by IPA
-	 * active_eps(4)		- No of active end points
-	 * wan_rx_empty(4) 		- No of times WAN_CONS/COAL pipes have buffers less than threshold of 32
-	 * wan_repl_rx_empty(4)	- No of times there are no pages in temp cache for WAN pipe
-	 * lan_rx_empty(4)		- No of times LAN_CONS pipe has buffers less than threshold of 32
-	 * lan_repl_rx_empty(4)	- No of times LAN_CONS pipe has replinished buffers
-	 * pg_recycle_stats(32)	- Page recycling stats
-	 *		|______	coal_total_repl_buff(8)	- Total no of buffers replenished for coal pipe
-	 *				coal_temp_repl_buff(8)	- Total no of buffers replenished from temp cache
-	 *				def_total_repl_buff(8)	- Total no of buffers replenished for default pipe
-	 *				def_temp_repl_buff(8)	- Total no of buffers replenished from temp cache
-	 * exception_stats(40)	- Exception path stats
-	 *		|______	excptn_type_none(4)	- No of packets with exception type as None
-	 *				excptn_type_deaggr(4)- No of packets with exception type as deaggr
-	 *				excptn_type_iptype(4)- No of packets with exception type as IP type
-	 *				excptn_type_pkt_len(4)- No of packets with exception type as packet length
-	 *				excptn_type_pkt_thrshld(4)- No of packets with exception type as packet threshold
-	 *				excptn_type_frag_rule_miss(4)- No of packets with exception type as frag rule
-	 *				excptn_type_sw_flt(4)- No of packets with exception type as sw filter
-	 *				excptn_type_nat(4)- No of packets with exception type as NAT
-	 *				excptn_type_ipv6_ct(4)- No of packets with exception type as IPv6 CT
-	 *				excptn_type_csum(4)- No of packets with exception type as checksum
-	 * odl_stats(16)		- ODL stats
-	 *		|______	rx_pkt(4)		- Total no of packets received
-	 *				processed_pkt(4)- Total no of processed packets
-	 *				dropped_pkt(4)	- Total no of dropped packets
-	 *				num_queue_pkt(4)- Total no of packets in queue
-	 * holb_stats(168+)	- HOLB stats
-	 *		|______	num_pipes(4)			- Total num of pipes for which HOLB is enabled(currently 5)
-	 *		|______	num_holb_mon_clients(4)	- Total num of pipes for which HOLB is enabled(currently 5)
-	 *		|______	holb_discard_stats(80)	- HOLB Discard Stats
-	 *		|				|______ client_type(4)	- IPA Client type
-	 *		|						num_drp_cnt(4)	- Total number of dropped pkts
-	 *		|						num_drp_bytes(4)- Total number of dropped bytes
-	 *		|						reserved(4)	- Reserved.
-	 *		|______	holb_monitor_stats(80)	- No of clients for which HOLB monitrng is enabled(currently 5)
-	 *						|______ client_type(4)	- IPA Client type
-	 *								curr_index(4)	- Current HOLB monitoring index
-	 *								num_en_cnt(4)	- Number of times peripheral went to bad state
-	 *								num_dis_cnt(4)	- Number of times peripheral was recovered
-	 */
 	IPA_LNX_CMD_GENERIC_STATS,
-	/**
-	 * IPA_LNX_CMD_CLOCK_STATS - Includes following fields (in bytes)
-	 *							(min - 888 bytes, max - 900 bytes)
-	 *
-	 * active_clients(4)	- No of active clock votes
-	 * scale_thresh_svs(4)	- BW threshold value to be met for voting for SVS
-	 * scale_thresh_nom(4)	- BW threshold value to be met for voting for nominal
-	 * scale_thresh_tur(4)	- BW threshold value to be met for voting for turbo
-	 * aggr_bw(4)			- Total BW required from the clients for caculating the vote.
-	 * curr_clk_vote(4)		- Current active clock vote
-	 * pm_client_stats(864+)	- Power Management stats (36 clients)
-	 *		|______	pm_client_state(4)	- State of the PM client
-	 *				pm_client_group(4)	- Group of the PM client
-	 *				pm_client_bw(4)		- BW requested by PM client
-	 *				pm_client_hdl(4)	- PM Client hdl
-	 *				pm_client_type(4)	- Client type of the PM client
-	 *				reserved(4)			- Reserved.
-	 */
 	IPA_LNX_CMD_CLOCK_STATS,
-	/**
-	 * IPA_LNX_CMD_WLAN_INST_STATS - Includes following fields (in bytes)
-	 *							(min - 558 bytes, max - 600 bytes)
-	 *
-	 * num_wlan_instance(4)		- No of WLAN attaches
-	 * reserved(4)				- Reserved.
-	 * wlan_instance_info(550)	- Each WLAN Instance Info
-	 *		|______	instance_id(4)	- Instance id of the WLAN
-	 *		|		wdi_ver(4)		- WDI version in use
-	 *		|		wlan_mode(4)	- Indicates the WLAN mode
-	 *		|		wdi_over_gsi(4)	- Indicates whether communication is over GSI or uC
-	 *		|		dbs_mode(4)		- Indicates whether DBS mode is enabled
-	 *		|		pm_bandwidth(4)	- Bandwidth voted by the client
-	 *		|		num_pipes(4)	- Number of pipes associated with WLAN
-	 *		|		reserved(4)		- Reserved.
-	 *		|______ pipe_info(360)	- Pipe Information (120 x 3 pipes)
-	 *		|				|______ gsi_chan_ring_bp(8)	- Gsi channel ring base pointer address
-	 *		|						gsi_chan_ring_rp(8)	- Transfer Ring Current read pointer address
-	 *		|						gsi_chan_ring_wp(8)	- Transfer Ring Current write pointer address
-	 *		|						gsi_evt_ring_bp(8)	- Event ring base pointer address
-	 *		|						gsi_evt_ring_rp(8)	- Event Ring Current read pointer address
-	 *		|						gsi_evt_ring_wp(8)	- Event Ring Current write pointer address
-	 *		|						gsi_evt_ring_len(4)	- Transfer Ring length
-	 *		|						gsi_chan_ring_len(4)- Transfer Ring length
-	 *		|						buff_size(4)	- Size of buffer
-	 *		|						num_free_buff(4)- Number of free credits with HW
-	 *		|						gsi_ipa_if_tlv(4)	- Number of IPA_IF TLV
-	 *		|						gsi_ipa_if_aos(4)	- Number of IPA_IF AOS
-	 *		|						gsi_desc_size(4)	- Descriptor Size
-	 *		|						pipe_num(4)	- Pipe number of the client
-	 *		|						direction(4)	- Pipe direction(0 – IPA Consumer, 1 – IPA Producer)
-	 *		|						client_type(4)	- Client type
-	 *		|						gsi_chan_num(4)	- GSI channel number associated with Pipe
-	 *		|						gsi_evt_num(4)	- GSI event number associated with Pipe
-	 *		|						is_common_evt_ring(4)- Indicates whether common evt ring is used
-	 *		|						gsi_prot_type(4)- GSI Protocol type
-	 *		|						gsi_chan_state(4)-GSI Channel state
-	 *		|						gsi_chan_stop_stm(4)- GSI channel stop state machine
-	 *		|						gsi_poll_mode(4)- GSI Current Mode:- Polling/Interrupt
-	 *		|						gsi_db_in_bytes(4)	- Indicates whether DB in bytes
-	 *		|______ gsi_debug_stats(158)- GSI debug information
-	 *						|______ num_tx_instances(4)	- Number of tx instances
-	 *						|______ num_rx_instances(4)	- Number of rx instances
-	 *						|______	gsi_tx_debug_stats(102)- GSI TX Debug Stats Info (2 X 56)
-	 *						|			|______ tx_client(4) - TX client type
-	 *						|					num_tx_ring_100_perc_with_cred(4) - Total number of times the ring is full of free credits
-	 *						|					num_tx_ring_0_perc_with_cred(4) - Total number of times the ring has empty credits
-	 *						|					num_tx_ring_above_75_perc_cred(4) - Total number of times ring has > 75% free credits
-	 *						|					num_tx_ring_above_25_perc_cred(4) - Total number of times ring has < 25% of free credits
-	 *						|					num_tx_ring_stats_polled(4) - Total number of times TX ring stats are counted
-	 *						|					num_tx_oob(4) - Number of times GSI encountered OOB
-	 *						|					num_tx_oob_time(4) - Total time GSI was in OOB state i.e no credits available
-	 *						|					gsi_debug1(4) - Additional GSI Debug information
-	 *						|					gsi_debug2(4) - Additional GSI Debug information
-	 *						|					gsi_debug3(4) - Additional GSI Debug information
-	 *						|					gsi_debug4(4) - Additional GSI Debug information
-	 *						|					tx_summary(4) - 1 – Peripheral is bad in replenishing credits, 2 – IPA is not giving packets fast enough
-	 *						|					reserved(4)	- Reserved.
-	 *						|______	gsi_rx_debug_stats(48)- GSI RX Debug Stats Info (1 X 48)
-	 *									|______ rx_client(4) - RX client type
-	 *											num_rx_ring_100_perc_with_pack(4) - Total number of times the ring is full of packets
-	 *											num_rx_ring_0_perc_with_pack(4) - Total number of times the ring has 0 packets
-	 *											num_rx_ring_above_75_perc_pack(4) - Total number of times ring has > 75% packets
-	 *											num_rx_ring_above_25_perc_pack(4) - Total number of times ring has < 25% packets
-	 *											num_rx_ring_stats_polled(4) - Total number of times RX ring stats are counted
-	 *											num_rx_drop_stats(4) - Total number of times GSI dropped packets
-	 *											gsi_debug1(4) - Additional GSI Debug information
-	 *											gsi_debug2(4) - Additional GSI Debug information
-	 *											gsi_debug3(4) - Additional GSI Debug information
-	 *											gsi_debug4(4) - Additional GSI Debug information
-	 *											rx_summary(4) - 1 – Peripheral is bad in providing packets, 2 – IPA is not processing packets fast enough
-	 */
 	IPA_LNX_CMD_WLAN_INST_STATS,
-	/**
-	 * IPA_LNX_CMD_ETH_INST_STATS - Includes following fields (in bytes)
-	 *							(min - 724 bytes, max - 800 bytes)
-	 *
-	 * num_eth_instance(4)		- No of ETH attaches
-	 * reserved(4)				- Reserved.
-	 * eth_instance_info(716)	- Each ETH Instance Info (358 x 2)
-	 *		|______	instance_id(4)	- Instance id of the ETH
-	 *		|		eth_mode(4)		- Ethernet mode
-	 *		|		pm_bandwidth(4)	- Bandwidth voted by the client
-	 *		|		num_pipes(4)	- Number of pipes associated with ETH
-	 *		|______ pipe_info(240)	- Pipe Information (120 x 2 pipes)
-	 *		|				|______ gsi_chan_ring_bp(8)	- Gsi channel ring base pointer address
-	 *		|						gsi_chan_ring_rp(8)	- Transfer Ring Current read pointer address
-	 *		|						gsi_chan_ring_wp(8)	- Transfer Ring Current write pointer address
-	 *		|						gsi_evt_ring_bp(8)	- Event ring base pointer address
-	 *		|						gsi_evt_ring_rp(8)	- Event Ring Current read pointer address
-	 *		|						gsi_evt_ring_wp(8)	- Event Ring Current write pointer address
-	 *		|						gsi_evt_ring_len(4)	- Transfer Ring length
-	 *		|						gsi_chan_ring_len(4)- Transfer Ring length
-	 *		|						buff_size(4)	- Size of buffer
-	 *		|						num_free_buff(4)- Number of free credits with HW
-	 *		|						gsi_ipa_if_tlv(4)	- Number of IPA_IF TLV
-	 *		|						gsi_ipa_if_aos(4)	- Number of IPA_IF AOS
-	 *		|						gsi_desc_size(4)	- Descriptor Size
-	 *		|						pipe_num(4)	- Pipe number of the client
-	 *		|						direction(4)	- Pipe direction(0 – IPA Consumer, 1 – IPA Producer)
-	 *		|						client_type(4)	- Client type
-	 *		|						gsi_chan_num(4)	- GSI channel number associated with Pipe
-	 *		|						gsi_evt_num(4)	- GSI event number associated with Pipe
-	 *		|						is_common_evt_ring(4)- Indicates whether common evt ring is used
-	 *		|						gsi_prot_type(4)- GSI Protocol type
-	 *		|						gsi_chan_state(4)-GSI Channel state
-	 *		|						gsi_chan_stop_stm(4)- GSI channel stop state machine
-	 *		|						gsi_poll_mode(4)- GSI Current Mode:- Polling/Interrupt
-	 *		|						gsi_db_in_bytes(4)	- Indicates whether DB in bytes
-	 *		|______ gsi_debug_stats(102)- GSI debug information
-	 *						|______ num_tx_instances(4)	- Number of tx instances
-	 *						|______ num_rx_instances(4)	- Number of rx instances
-	 *						|______	gsi_tx_debug_stats(56)- GSI TX Debug Stats Info (1 X 56)
-	 *						|			|______ tx_client(4) - TX client type
-	 *						|					num_tx_ring_100_perc_with_cred(4) - Total number of times the ring is full of free credits
-	 *						|					num_tx_ring_0_perc_with_cred(4) - Total number of times the ring has empty credits
-	 *						|					num_tx_ring_above_75_perc_cred(4) - Total number of times ring has > 75% free credits
-	 *						|					num_tx_ring_above_25_perc_cred(4) - Total number of times ring has < 25% of free credits
-	 *						|					num_tx_ring_stats_polled(4) - Total number of times TX ring stats are counted
-	 *						|					num_tx_oob(4) - Number of times GSI encountered OOB
-	 *						|					num_tx_oob_time(4) - Total time GSI was in OOB state i.e no credits available
-	 *						|					gsi_debug1(4) - Additional GSI Debug information
-	 *						|					gsi_debug2(4) - Additional GSI Debug information
-	 *						|					gsi_debug3(4) - Additional GSI Debug information
-	 *						|					gsi_debug4(4) - Additional GSI Debug information
-	 *						|					tx_summary(4) - 1 – Peripheral is bad in replenishing credits, 2 – IPA is not giving packets fast enough
-	 *						|					reserved(4)	- Reserved.
-	 *						|______	gsi_rx_debug_stats(48)- GSI RX Debug Stats Info (1 X 48)
-	 *									|______ rx_client(4) - RX client type
-	 *											num_rx_ring_100_perc_with_pack(4) - Total number of times the ring is full of packets
-	 *											num_rx_ring_0_perc_with_pack(4) - Total number of times the ring has 0 packets
-	 *											num_rx_ring_above_75_perc_pack(4) - Total number of times ring has > 75% packets
-	 *											num_rx_ring_above_25_perc_pack(4) - Total number of times ring has < 25% packets
-	 *											num_rx_ring_stats_polled(4) - Total number of times RX ring stats are counted
-	 *											num_rx_drop_stats(4) - Total number of times GSI dropped packets
-	 *											gsi_debug1(4) - Additional GSI Debug information
-	 *											gsi_debug2(4) - Additional GSI Debug information
-	 *											gsi_debug3(4) - Additional GSI Debug information
-	 *											gsi_debug4(4) - Additional GSI Debug information
-	 *											rx_summary(4) - 1 – Peripheral is bad in providing packets, 2 – IPA is not processing packets fast enough
-	 */
 	IPA_LNX_CMD_ETH_INST_STATS,
-	/**
-	 * IPA_LNX_CMD_USB_INST_STATS - Includes following fields (in bytes)
-	 *							(min - 366 bytes, max - 400 bytes)
-	 *
-	 * num_usb_instance(4)	- No of USB attaches
-	 * reserved(4)			- Reserved.
-	 * usb_instance_info(358)	- Each USB Instance Info
-	 *		|______	instance_id(4)	- Instance id of the USB
-	 *		|		usb_mode(4)		- USB mode
-	 *		|		pm_bandwidth(4)	- Bandwidth voted by the client
-	 *		|		num_pipes(4)	- Number of pipes associated with USB
-	 *		|______ pipe_info(240)	- Pipe Information (120 x 2 pipes)
-	 *		|				|______ gsi_chan_ring_bp(8)	- Gsi channel ring base pointer address
-	 *		|						gsi_chan_ring_rp(8)	- Transfer Ring Current read pointer address
-	 *		|						gsi_chan_ring_wp(8)	- Transfer Ring Current write pointer address
-	 *		|						gsi_evt_ring_bp(8)	- Event ring base pointer address
-	 *		|						gsi_evt_ring_rp(8)	- Event Ring Current read pointer address
-	 *		|						gsi_evt_ring_wp(8)	- Event Ring Current write pointer address
-	 *		|						gsi_evt_ring_len(4)	- Transfer Ring length
-	 *		|						gsi_chan_ring_len(4)- Transfer Ring length
-	 *		|						buff_size(4)	- Size of buffer
-	 *		|						num_free_buff(4)- Number of free credits with HW
-	 *		|						gsi_ipa_if_tlv(4)	- Number of IPA_IF TLV
-	 *		|						gsi_ipa_if_aos(4)	- Number of IPA_IF AOS
-	 *		|						gsi_desc_size(4)	- Descriptor Size
-	 *		|						pipe_num(4)	- Pipe number of the client
-	 *		|						direction(4)	- Pipe direction(0 – IPA Consumer, 1 – IPA Producer)
-	 *		|						client_type(4)	- Client type
-	 *		|						gsi_chan_num(4)	- GSI channel number associated with Pipe
-	 *		|						gsi_evt_num(4)	- GSI event number associated with Pipe
-	 *		|						is_common_evt_ring(4)- Indicates whether common evt ring is used
-	 *		|						gsi_prot_type(4)- GSI Protocol type
-	 *		|						gsi_chan_state(4)-GSI Channel state
-	 *		|						gsi_chan_stop_stm(4)- GSI channel stop state machine
-	 *		|						gsi_poll_mode(4)- GSI Current Mode:- Polling/Interrupt
-	 *		|						gsi_db_in_bytes(4)	- Indicates whether DB in bytes
-	 *		|______ gsi_debug_stats(102)- GSI debug information
-	 *						|______ num_tx_instances(4)	- Number of tx instances
-	 *						|______ num_rx_instances(4)	- Number of rx instances
-	 *						|______	gsi_tx_debug_stats(56)- GSI TX Debug Stats Info (1 X 56)
-	 *						|			|______ tx_client(4) - TX client type
-	 *						|					num_tx_ring_100_perc_with_cred(4) - Total number of times the ring is full of free credits
-	 *						|					num_tx_ring_0_perc_with_cred(4) - Total number of times the ring has empty credits
-	 *						|					num_tx_ring_above_75_perc_cred(4) - Total number of times ring has > 75% free credits
-	 *						|					num_tx_ring_above_25_perc_cred(4) - Total number of times ring has < 25% of free credits
-	 *						|					num_tx_ring_stats_polled(4) - Total number of times TX ring stats are counted
-	 *						|					num_tx_oob(4) - Number of times GSI encountered OOB
-	 *						|					num_tx_oob_time(4) - Total time GSI was in OOB state i.e no credits available
-	 *						|					gsi_debug1(4) - Additional GSI Debug information
-	 *						|					gsi_debug2(4) - Additional GSI Debug information
-	 *						|					gsi_debug3(4) - Additional GSI Debug information
-	 *						|					gsi_debug4(4) - Additional GSI Debug information
-	 *						|					tx_summary(4) - 1 – Peripheral is bad in replenishing credits, 2 – IPA is not giving packets fast enough
-	 *						|					reserved(4)	- Reserved.
-	 *						|______	gsi_rx_debug_stats(48)- GSI RX Debug Stats Info (1 X 48)
-	 *									|______ rx_client(4) - RX client type
-	 *											num_rx_ring_100_perc_with_pack(4) - Total number of times the ring is full of packets
-	 *											num_rx_ring_0_perc_with_pack(4) - Total number of times the ring has 0 packets
-	 *											num_rx_ring_above_75_perc_pack(4) - Total number of times ring has > 75% packets
-	 *											num_rx_ring_above_25_perc_pack(4) - Total number of times ring has < 25% packets
-	 *											num_rx_ring_stats_polled(4) - Total number of times RX ring stats are counted
-	 *											num_rx_drop_stats(4) - Total number of times GSI dropped packets
-	 *											gsi_debug1(4) - Additional GSI Debug information
-	 *											gsi_debug2(4) - Additional GSI Debug information
-	 *											gsi_debug3(4) - Additional GSI Debug information
-	 *											gsi_debug4(4) - Additional GSI Debug information
-	 *											rx_summary(4) - 1 – Peripheral is bad in providing packets, 2 – IPA is not processing packets fast enough
-	 */
 	IPA_LNX_CMD_USB_INST_STATS,
-	/**
-	 * IPA_LNX_CMD_MHIP_INST_STATS - Includes following fields (in bytes)
-	 *							(min - 710 bytes, max - 800 bytes)
-	 *
-	 * num_mhip_instance(4)	- No of MHIP attaches
-	 * reserved(4)			- Reserved.
-	 * mhip_instance_info(702)	- Each MHIP Instance Info
-	 *		|______	instance_id(4)	- Instance id of the MHIP
-	 *		|		mhip_mode(4)	- MHIP mode
-	 *		|		pm_bandwidth(4)	- Bandwidth voted by the client
-	 *		|		num_pipes(4)	- Number of pipes associated with USB
-	 *		|______ pipe_info(480)	- Pipe Information (120 x 4 pipes)
-	 *		|				|______ gsi_chan_ring_bp(8)	- Gsi channel ring base pointer address
-	 *		|						gsi_chan_ring_rp(8)	- Transfer Ring Current read pointer address
-	 *		|						gsi_chan_ring_wp(8)	- Transfer Ring Current write pointer address
-	 *		|						gsi_evt_ring_bp(8)	- Event ring base pointer address
-	 *		|						gsi_evt_ring_rp(8)	- Event Ring Current read pointer address
-	 *		|						gsi_evt_ring_wp(8)	- Event Ring Current write pointer address
-	 *		|						gsi_evt_ring_len(4)	- Transfer Ring length
-	 *		|						gsi_chan_ring_len(4)- Transfer Ring length
-	 *		|						buff_size(4)	- Size of buffer
-	 *		|						num_free_buff(4)- Number of free credits with HW
-	 *		|						gsi_ipa_if_tlv(4)	- Number of IPA_IF TLV
-	 *		|						gsi_ipa_if_aos(4)	- Number of IPA_IF AOS
-	 *		|						gsi_desc_size(4)	- Descriptor Size
-	 *		|						pipe_num(4)	- Pipe number of the client
-	 *		|						direction(4)	- Pipe direction(0 – IPA Consumer, 1 – IPA Producer)
-	 *		|						client_type(4)	- Client type
-	 *		|						gsi_chan_num(4)	- GSI channel number associated with Pipe
-	 *		|						gsi_evt_num(4)	- GSI event number associated with Pipe
-	 *		|						is_common_evt_ring(4)- Indicates whether common evt ring is used
-	 *		|						gsi_prot_type(4)- GSI Protocol type
-	 *		|						gsi_chan_state(4)-GSI Channel state
-	 *		|						gsi_chan_stop_stm(4)- GSI channel stop state machine
-	 *		|						gsi_poll_mode(4)- GSI Current Mode:- Polling/Interrupt
-	 *		|						gsi_db_in_bytes(4)	- Indicates whether DB in bytes
-	 *		|______ gsi_debug_stats(206)- GSI debug information
-	 *						|______ num_tx_instances(4)	- Number of tx instances
-	 *						|______ num_rx_instances(4)	- Number of rx instances
-	 *						|______	gsi_tx_debug_stats(102)- GSI TX Debug Stats Info (2 X 56)
-	 *						|			|______ tx_client(4) - TX client type
-	 *						|					num_tx_ring_100_perc_with_cred(4) - Total number of times the ring is full of free credits
-	 *						|					num_tx_ring_0_perc_with_cred(4) - Total number of times the ring has empty credits
-	 *						|					num_tx_ring_above_75_perc_cred(4) - Total number of times ring has > 75% free credits
-	 *						|					num_tx_ring_above_25_perc_cred(4) - Total number of times ring has < 25% of free credits
-	 *						|					num_tx_ring_stats_polled(4) - Total number of times TX ring stats are counted
-	 *						|					num_tx_oob(4) - Number of times GSI encountered OOB
-	 *						|					num_tx_oob_time(4) - Total time GSI was in OOB state i.e no credits available
-	 *						|					gsi_debug1(4) - Additional GSI Debug information
-	 *						|					gsi_debug2(4) - Additional GSI Debug information
-	 *						|					gsi_debug3(4) - Additional GSI Debug information
-	 *						|					gsi_debug4(4) - Additional GSI Debug information
-	 *						|					tx_summary(4) - 1 – Peripheral is bad in replenishing credits, 2 – IPA is not giving packets fast enough
-	 *						|					reserved(4)	- Reserved.
-	 *						|______	gsi_rx_debug_stats(96)- GSI RX Debug Stats Info (2 X 48)
-	 *									|______ rx_client(4) - RX client type
-	 *											num_rx_ring_100_perc_with_pack(4) - Total number of times the ring is full of packets
-	 *											num_rx_ring_0_perc_with_pack(4) - Total number of times the ring has 0 packets
-	 *											num_rx_ring_above_75_perc_pack(4) - Total number of times ring has > 75% packets
-	 *											num_rx_ring_above_25_perc_pack(4) - Total number of times ring has < 25% packets
-	 *											num_rx_ring_stats_polled(4) - Total number of times RX ring stats are counted
-	 *											num_rx_drop_stats(4) - Total number of times GSI dropped packets
-	 *											gsi_debug1(4) - Additional GSI Debug information
-	 *											gsi_debug2(4) - Additional GSI Debug information
-	 *											gsi_debug3(4) - Additional GSI Debug information
-	 *											gsi_debug4(4) - Additional GSI Debug information
-	 *											rx_summary(4) - 1 – Peripheral is bad in providing packets, 2 – IPA is not processing packets fast enough
-	 */
 	IPA_LNX_CMD_MHIP_INST_STATS,
+	IPA_LNX_CMD_CONSOLIDATED_STATS,
 	IPA_LNX_CMD_STATS_MAX,
 };
 

+ 2 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_uc.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -1588,7 +1588,7 @@ int ipa3_uc_debug_stats_alloc(
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 		command,
 		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
-		false, 10 * HZ);
+		false, 20 * HZ);
 	if (result) {
 		IPAERR("fail to alloc offload stats\n");
 		goto cleanup;

+ 112 - 42
drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -63,10 +63,12 @@ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
  */
 int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
 {
-#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+#define TX_STATS(x, y) stats->tx_ch_stats[x].y = \
 	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
-#define RX_STATS(y) stats->rx_ch_stats[0].y = \
-	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+#define RX_STATS(x, y) stats->rx_ch_stats[x].y = \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[x].y
+
+	int i = 0;
 
 	if (unlikely(!ipa3_ctx)) {
 		IPAERR("IPA driver was not initialized\n");
@@ -82,35 +84,39 @@ int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
-	TX_STATS(num_pkts_processed);
-	TX_STATS(ring_stats.ringFull);
-	TX_STATS(ring_stats.ringEmpty);
-	TX_STATS(ring_stats.ringUsageHigh);
-	TX_STATS(ring_stats.ringUsageLow);
-	TX_STATS(ring_stats.RingUtilCount);
-	TX_STATS(gsi_stats.bamFifoFull);
-	TX_STATS(gsi_stats.bamFifoEmpty);
-	TX_STATS(gsi_stats.bamFifoUsageHigh);
-	TX_STATS(gsi_stats.bamFifoUsageLow);
-	TX_STATS(gsi_stats.bamUtilCount);
-	TX_STATS(num_db);
-	TX_STATS(num_qmb_int_handled);
-	TX_STATS(ipa_pipe_number);
-
-	RX_STATS(num_pkts_processed);
-	RX_STATS(ring_stats.ringFull);
-	RX_STATS(ring_stats.ringEmpty);
-	RX_STATS(ring_stats.ringUsageHigh);
-	RX_STATS(ring_stats.ringUsageLow);
-	RX_STATS(ring_stats.RingUtilCount);
-	RX_STATS(gsi_stats.bamFifoFull);
-	RX_STATS(gsi_stats.bamFifoEmpty);
-	RX_STATS(gsi_stats.bamFifoUsageHigh);
-	RX_STATS(gsi_stats.bamFifoUsageLow);
-	RX_STATS(gsi_stats.bamUtilCount);
-	RX_STATS(num_db);
-	RX_STATS(num_qmb_int_handled);
-	RX_STATS(ipa_pipe_number);
+	for (i = 0; i < IPA_UC_MAX_NTN_TX_CHANNELS; i++) {
+		TX_STATS(i, num_pkts_processed);
+		TX_STATS(i, ring_stats.ringFull);
+		TX_STATS(i, ring_stats.ringEmpty);
+		TX_STATS(i, ring_stats.ringUsageHigh);
+		TX_STATS(i, ring_stats.ringUsageLow);
+		TX_STATS(i, ring_stats.RingUtilCount);
+		TX_STATS(i, gsi_stats.bamFifoFull);
+		TX_STATS(i, gsi_stats.bamFifoEmpty);
+		TX_STATS(i, gsi_stats.bamFifoUsageHigh);
+		TX_STATS(i, gsi_stats.bamFifoUsageLow);
+		TX_STATS(i, gsi_stats.bamUtilCount);
+		TX_STATS(i, num_db);
+		TX_STATS(i, num_qmb_int_handled);
+		TX_STATS(i, ipa_pipe_number);
+	}
+
+	for (i = 0; i < IPA_UC_MAX_NTN_RX_CHANNELS; i++) {
+		RX_STATS(i, num_pkts_processed);
+		RX_STATS(i, ring_stats.ringFull);
+		RX_STATS(i, ring_stats.ringEmpty);
+		RX_STATS(i, ring_stats.ringUsageHigh);
+		RX_STATS(i, ring_stats.ringUsageLow);
+		RX_STATS(i, ring_stats.RingUtilCount);
+		RX_STATS(i, gsi_stats.bamFifoFull);
+		RX_STATS(i, gsi_stats.bamFifoEmpty);
+		RX_STATS(i, gsi_stats.bamFifoUsageHigh);
+		RX_STATS(i, gsi_stats.bamFifoUsageLow);
+		RX_STATS(i, gsi_stats.bamUtilCount);
+		RX_STATS(i, num_db);
+		RX_STATS(i, num_qmb_int_handled);
+		RX_STATS(i, ipa_pipe_number);
+	}
 
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
@@ -173,6 +179,12 @@ int ipa3_ntn_init(void)
 
 	ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
 
+	/* ntn_init */
+	ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL;
+	ipa3_ctx->uc_ntn_ctx.priv = NULL;
+	ipa3_ctx->uc_ntn_ctx.ntn_reg_base_ptr_pa_rd = 0x0;
+	ipa3_ctx->uc_ntn_ctx.smmu_mapped = 0;
+
 	return 0;
 }
 
@@ -209,6 +221,7 @@ static int ipa3_uc_send_ntn_setup_pipe_cmd(
 	IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
 	IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
 	IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+	IPADBG("db_mode = %d\n", ntn_info->db_mode);
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 		cmd.size = sizeof(*cmd_data_v4_0);
 	else
@@ -244,6 +257,7 @@ static int ipa3_uc_send_ntn_setup_pipe_cmd(
 	Ntn_params->num_buffers = ntn_info->num_buffers;
 	Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
 	Ntn_params->data_buff_size = ntn_info->data_buff_size;
+	Ntn_params->db_mode = ntn_info->db_mode;
 	Ntn_params->ipa_pipe_number = ipa_ep_idx;
 	Ntn_params->dir = dir;
 
@@ -262,25 +276,61 @@ static int ipa3_smmu_map_uc_ntn_pipes(struct ipa_ntn_setup_info *params,
 	bool map)
 {
 	struct iommu_domain *smmu_domain;
-	int result;
+	int result = 0;
 	int i;
 	u64 iova;
 	phys_addr_t pa;
 	u64 iova_p;
 	phys_addr_t pa_p;
 	u32 size_p;
+	bool map_unmap_once;
 
 	if (params->data_buff_size > PAGE_SIZE) {
 		IPAERR("invalid data buff size\n");
 		return -EINVAL;
 	}
 
-	result = ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa,
-		PAGE_SIZE), map, IPA_SMMU_CB_UC);
-	if (result) {
-		IPAERR("failed to %s uC regs %d\n",
-			map ? "map" : "unmap", result);
-		goto fail;
+	/* only map/unmap once the ntn_reg_base_ptr_pa */
+	map_unmap_once = (map && ipa3_ctx->uc_ntn_ctx.smmu_mapped == 0)
+	|| (!map && ipa3_ctx->uc_ntn_ctx.smmu_mapped == 1);
+
+	IPADBG(" %s uC regs, smmu_mapped %d\n",
+		map ? "map" : "unmap", ipa3_ctx->uc_ntn_ctx.smmu_mapped);
+
+	if (map_unmap_once) {
+		result = ipa3_smmu_map_peer_reg(rounddown(
+				params->ntn_reg_base_ptr_pa, PAGE_SIZE),
+				map, IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR("failed to %s uC regs %d\n",
+				map ? "map" : "unmap", result);
+			goto fail;
+		}
+		/* backup the ntn_reg_base_ptr_pa_r */
+		ipa3_ctx->uc_ntn_ctx.ntn_reg_base_ptr_pa_rd =
+			rounddown(params->ntn_reg_base_ptr_pa,
+			PAGE_SIZE);
+		IPADBG(" %s ntn_reg_base_ptr_pa regs 0X%0x smmu_mapped %d\n",
+			map ? "map" : "unmap",
+			(unsigned long long)
+			ipa3_ctx->uc_ntn_ctx.ntn_reg_base_ptr_pa_rd,
+			ipa3_ctx->uc_ntn_ctx.smmu_mapped);
+	}
+	/* update smmu_mapped reference count */
+	if (map) {
+		ipa3_ctx->uc_ntn_ctx.smmu_mapped++;
+		IPADBG("uc_ntn_ctx.smmu_mapped %d\n",
+			ipa3_ctx->uc_ntn_ctx.smmu_mapped);
+	} else {
+		if (ipa3_ctx->uc_ntn_ctx.smmu_mapped == 0) {
+			IPAERR("Invalid smmu_mapped %d\n",
+				ipa3_ctx->uc_ntn_ctx.smmu_mapped);
+			goto fail;
+		} else {
+			ipa3_ctx->uc_ntn_ctx.smmu_mapped--;
+			IPADBG("uc_ntn_ctx.smmu_mapped %d\n",
+				ipa3_ctx->uc_ntn_ctx.smmu_mapped);
+		}
 	}
 
 	if (params->smmu_enabled) {
@@ -473,7 +523,10 @@ int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
 		goto fail_disable_dp_ul;
 	}
 	ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
-	outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+	/* Rx: IPA_UC_MAILBOX_m_n m = 1, n =3 mmio*/
+	outp->ul_uc_db_iomem = ipa3_ctx->mmio +
+		ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+		1, 3);
 	ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
 	IPADBG("client %d (ep: %d) connected\n", in->ul.client,
 		ipa_ep_idx_ul);
@@ -511,7 +564,10 @@ int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
 		result = -EFAULT;
 		goto fail_disable_dp_dl;
 	}
-	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+	/* Tx: IPA_UC_MAILBOX_m_n m = 1, n =4 mmio */
+	outp->dl_uc_db_iomem = ipa3_ctx->mmio +
+		ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+		1, 4);
 	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
 
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
@@ -551,6 +607,20 @@ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
 	IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
 	IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
 
+	if (ipa_ep_idx_ul == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx_ul >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("ipa_ep_idx_ul %d invalid\n",
+			ipa_ep_idx_ul);
+		return -EFAULT;
+	}
+
+	if (ipa_ep_idx_dl == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx_dl >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("ep ipa_ep_idx_dl %d invalid\n",
+			ipa_ep_idx_dl);
+		return -EFAULT;
+	}
+
 	ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
 	ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
 

+ 8 - 3
drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPA_UC_OFFLOAD_I_H_
@@ -13,8 +13,8 @@
  * Neutrino protocol related data structures
  */
 
-#define IPA_UC_MAX_NTN_TX_CHANNELS 1
-#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+#define IPA_UC_MAX_NTN_TX_CHANNELS 2
+#define IPA_UC_MAX_NTN_RX_CHANNELS 2
 
 #define IPA_NTN_TX_DIR 1
 #define IPA_NTN_RX_DIR 2
@@ -287,6 +287,8 @@ struct ipa3_uc_ntn_ctx {
 	struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
 	void *priv;
 	ipa_uc_ready_cb uc_ready_cb;
+	phys_addr_t ntn_reg_base_ptr_pa_rd;
+	u32 smmu_mapped;
 };
 
 /**
@@ -365,6 +367,9 @@ struct Ipa3HwNtnSetUpCmdData_t {
 	u8  ipa_pipe_number;
 	u8  dir;
 	u16 data_buff_size;
+	u8 db_mode;
+	u8 reserved1;
+	u16 reserved2;
 
 } __packed;
 

+ 97 - 12
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -4176,6 +4176,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 2, 28, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 },
 			IPA_TX_INSTANCE_NA },
+	[IPA_5_0][IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD] = {
+			true, IPA_v5_0_GROUP_URLLC,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 8, 28, 32, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 3 },
+			IPA_TX_INSTANCE_UL },
 	[IPA_5_0][IPA_CLIENT_TEST_PROD] = {
 			true, IPA_v5_0_GROUP_UL,
 			true,
@@ -4495,6 +4502,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 2, 28, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 },
 			IPA_TX_INSTANCE_NA },
+	[IPA_5_0_MHI][IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD] = {
+			true, IPA_v5_0_GROUP_URLLC,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 8, 28, 32, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 3 },
+			IPA_TX_INSTANCE_UL },
 	[IPA_5_0_MHI][IPA_CLIENT_TEST_PROD] = {
 			true, IPA_v5_0_GROUP_UL,
 			true,
@@ -5782,9 +5796,9 @@ static struct ipa3_mem_partition ipa_5_0_mem_part = {
 	.apps_hdr_proc_ctx_size = 0x200,
 	.apps_hdr_proc_ctx_size_ddr = 0x0,
 	.stats_quota_q6_ofst = 0x2868,
-	.stats_quota_q6_size = 0x48,
-	.stats_quota_ap_ofst = 0x28B0,
-	.stats_quota_ap_size = 0x60,
+	.stats_quota_q6_size = 0x60,
+	.stats_quota_ap_ofst = 0x28C8,
+	.stats_quota_ap_size = 0x48,
 	.stats_tethering_ofst = 0x2910,
 	.stats_tethering_size = 0x0,
 	.apps_v4_flt_nhash_ofst = 0x2918,
@@ -5879,9 +5893,9 @@ static struct ipa3_mem_partition ipa_5_1_mem_part = {
 	.apps_hdr_proc_ctx_size = 0x200,
 	.apps_hdr_proc_ctx_size_ddr = 0x0,
 	.stats_quota_q6_ofst = 0x2868,
-	.stats_quota_q6_size = 0x48,
-	.stats_quota_ap_ofst = 0x28B0,
-	.stats_quota_ap_size = 0x60,
+	.stats_quota_q6_size = 0x60,
+	.stats_quota_ap_ofst = 0x28C8,
+	.stats_quota_ap_size = 0x48,
 	.stats_tethering_ofst = 0x2910,
 	.stats_tethering_size = 0x3c0,
 	.stats_flt_v4_ofst = 0,
@@ -8381,8 +8395,8 @@ int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
 	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
 		ep_holb);
 
-	/* IPA4.5 issue requires HOLB_EN to be written twice */
-	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+	/* For targets > IPA_4.0 issue requires HOLB_EN to be written twice */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 		ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 			clnt_hdl, ep_holb);
 
@@ -8585,6 +8599,8 @@ int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
 	/* copy over EP cfg */
 	ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
 
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
 	if (ipa3_ctx->eogre_enabled) {
 		/* reconfigure ep metadata reg to override mux-id */
 		ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_ofst_metadata_valid = 0;
@@ -8594,8 +8610,6 @@ int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
 			&ipa3_ctx->ep[clnt_hdl].cfg.hdr);
 	}
 
-	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
-
 	ep_md_reg_wrt = *ep_md;
 	qmap_id = (ep_md->qmap_id <<
 		IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) &
@@ -9807,6 +9821,10 @@ retry_alloc:
 		WARN_ON(1);
 		if (atomic_dec_return(&comp->cnt) == 0)
 			kfree(comp);
+		if (cmd.base) {
+			dma_free_coherent(ipa3_ctx->pdev, cmd.size,
+				cmd.base, cmd.phys_base);
+		}
 		return -ETIME;
 	}
 
@@ -9814,6 +9832,11 @@ retry_alloc:
 	if (atomic_dec_return(&comp->cnt) == 0)
 		kfree(comp);
 
+	if (cmd.base) {
+		dma_free_coherent(ipa3_ctx->pdev, cmd.size,
+			cmd.base, cmd.phys_base);
+	}
+
 	/*
 	 * sleep for short period to ensure IPA wrote all packets to
 	 * the transport
@@ -9841,7 +9864,7 @@ fail_free_desc:
 			tag_desc[i].callback(tag_desc[i].user1,
 				tag_desc[i].user2);
 	if (cmd.base) {
-		dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		dma_free_coherent(ipa3_ctx->pdev, cmd.size,
 			cmd.base, cmd.phys_base);
 	}
 fail_free_tag_desc:
@@ -12322,6 +12345,68 @@ int ipa3_get_prot_id(enum ipa_client_type client)
 	return prot_id;
 }
 
+void __ipa_ntn3_cons_stats_get(struct ipa_ntn3_stats_rx *stats, enum ipa_client_type client)
+{
+	int ch_id, ipa_ep_idx;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
+		return;
+	ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
+
+	stats->pending_db_after_rollback = gsi_ntn3_client_stats_get(ipa_ep_idx, 4, ch_id);
+	stats->msi_db_idx = gsi_ntn3_client_stats_get(ipa_ep_idx, 5, ch_id);
+	stats->chain_cnt = gsi_ntn3_client_stats_get(ipa_ep_idx, 6, ch_id);
+	stats->err_cnt = gsi_ntn3_client_stats_get(ipa_ep_idx, 7, ch_id);
+	stats->tres_handled = gsi_ntn3_client_stats_get(ipa_ep_idx, 8, ch_id);
+	stats->rollbacks_cnt = gsi_ntn3_client_stats_get(ipa_ep_idx, 9, ch_id);
+	stats->msi_db_cnt = gsi_ntn3_client_stats_get(ipa_ep_idx, -1, ch_id);
+
+	stats->wp = gsi_get_refetch_reg(ch_id, false);
+	stats->rp = gsi_get_refetch_reg(ch_id, true);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+}
+
+void __ipa_ntn3_prod_stats_get(struct ipa_ntn3_stats_tx *stats, enum ipa_client_type client)
+{
+	int ch_id, ipa_ep_idx;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
+		return;
+	ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
+
+	stats->pending_db_after_rollback = gsi_ntn3_client_stats_get(ipa_ep_idx, 4, ch_id);
+	stats->msi_db_idx = gsi_ntn3_client_stats_get(ipa_ep_idx, 5, ch_id);
+	stats->derr_cnt = gsi_ntn3_client_stats_get(ipa_ep_idx, 6, ch_id);
+	stats->oob_cnt = gsi_ntn3_client_stats_get(ipa_ep_idx, 7, ch_id);
+	stats->tres_handled = gsi_ntn3_client_stats_get(ipa_ep_idx, 8, ch_id);
+	stats->rollbacks_cnt = gsi_ntn3_client_stats_get(ipa_ep_idx, 9, ch_id);
+	stats->msi_db_cnt = gsi_ntn3_client_stats_get(ipa_ep_idx, -1, ch_id);
+
+	stats->wp = gsi_get_refetch_reg(ch_id, false);
+	stats->rp = gsi_get_refetch_reg(ch_id, true);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+}
+
+void ipa_eth_ntn3_get_status(struct ipa_ntn3_client_stats *s, unsigned inst_id)
+{
+	if (inst_id == 0) {
+		__ipa_ntn3_cons_stats_get(&s->rx_stats, IPA_CLIENT_ETHERNET_CONS);
+		__ipa_ntn3_prod_stats_get(&s->tx_stats, IPA_CLIENT_ETHERNET_PROD);
+	} else {
+		__ipa_ntn3_cons_stats_get(&s->rx_stats, IPA_CLIENT_ETHERNET2_CONS);
+		__ipa_ntn3_prod_stats_get(&s->tx_stats, IPA_CLIENT_ETHERNET2_PROD);
+	}
+
+}
+
 void ipa3_eth_get_status(u32 client, int scratch_id,
 	struct ipa3_eth_error_stats *stats)
 {
@@ -12364,7 +12449,7 @@ void ipa3_eth_get_status(u32 client, int scratch_id,
 		stats->rp = gsi_get_refetch_reg(ch_id, true);
 		break;
 	case IPA_CLIENT_ETHERNET_PROD:
-		stats->wp = gsi_get_wp(ch_id);
+		stats->wp = gsi_get_refetch_reg(ch_id, false);
 		stats->rp = gsi_get_refetch_reg(ch_id, true);
 		break;
 	case IPA_CLIENT_ETHERNET_CONS:

+ 14 - 6
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c

@@ -4429,19 +4429,25 @@ nhash_alloc_fail:
 	return -ENOMEM;
 }
 
-u32 ipa_fltrt_get_aligned_lcl_bdy_size(u32 total_sz_lcl_tbls)
+u32 ipa_fltrt_get_aligned_lcl_bdy_size(u32 num_lcl_tbls, u32 total_sz_lcl_tbls)
 {
 	u32 result = total_sz_lcl_tbls;
 	struct ipahal_fltrt_obj *obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
 
 	/* for table terminator */
-	result += obj->tbl_width * total_sz_lcl_tbls;
+	result += obj->tbl_width * num_lcl_tbls;
 	/* align the start of local rule-set */
-	result += obj->lcladdr_alignment * total_sz_lcl_tbls;
+	result += obj->lcladdr_alignment * num_lcl_tbls;
 	/* SRAM block size alignment */
 	result += obj->blk_sz_alignment;
 	result &= ~(obj->blk_sz_alignment);
 
+	IPAHAL_DBG_LOW("num_lcl_tbls = %u total_sz_lcl_tbls = %u tbl_width = %u"
+		       " lcladdr_alignment = %u blk_sz_alignment = %u result = %u\n",
+		num_lcl_tbls, total_sz_lcl_tbls,
+		obj->tbl_width, obj->lcladdr_alignment, obj->blk_sz_alignment,
+		result);
+
 	return result;
 }
 
@@ -4476,7 +4482,8 @@ static int ipa_fltrt_alloc_lcl_bdy(
 	 */
 	if (params->total_sz_lcl_nhash_tbls + params->num_lcl_nhash_tbls > 0) {
 		params->nhash_bdy.size =
-			ipa_fltrt_get_aligned_lcl_bdy_size(params->total_sz_lcl_nhash_tbls);
+			ipa_fltrt_get_aligned_lcl_bdy_size(params->num_lcl_nhash_tbls,
+				params->total_sz_lcl_nhash_tbls);
 
 		IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
 			params->nhash_bdy.size);
@@ -4502,8 +4509,9 @@ alloc1:
 	}
 
 	if (obj->support_hash && params->hash_bdy.size) {
-		params->hash_bdy.size = 
-			ipa_fltrt_get_aligned_lcl_bdy_size(params->total_sz_lcl_hash_tbls);
+		params->hash_bdy.size =
+			ipa_fltrt_get_aligned_lcl_bdy_size(params->num_lcl_hash_tbls,
+				params->total_sz_lcl_hash_tbls);
 
 		IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
 			params->hash_bdy.size);

+ 2 - 1
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h

@@ -310,9 +310,10 @@ int ipahal_flt_parse_hw_rule(u8 *rule_addr,
 /*
  * ipa_fltrt_get_aligned_lcl_bdy_size() - Calculate real SRAM block aligned size
  *  required for flt table bodies
+ * @num_lcl_tbls: [in] Number of the tables
  * @total_sz_lcl_tbls: [in] The size in driver cashe
  */
-u32 ipa_fltrt_get_aligned_lcl_bdy_size(u32 total_sz_lcl_tbls);
+u32 ipa_fltrt_get_aligned_lcl_bdy_size(u32 num_lcl_tbls, u32 total_sz_lcl_tbls);
 
 
 #endif /* _IPAHAL_FLTRT_H_ */

+ 130 - 27
drivers/platform/msm/ipa/ipa_v3/rmnet_ll_ipa.c

@@ -26,7 +26,14 @@ enum ipa_rmnet_ll_state {
 
 
 #define IPA_WWAN_CONS_DESC_FIFO_SZ 256
-#define RMNET_LL_QUEUE_MAX ((2 * IPA_WWAN_CONS_DESC_FIFO_SZ) - 1)
+/* Allow max -2 packets, to account for any frags. */
+#define RMNET_LL_QUEUE_MAX ((2 * IPA_WWAN_CONS_DESC_FIFO_SZ) - 2)
+
+#define IPA_RMNET_LL_RECEIVE 1
+#define IPA_RMNET_LL_FLOW_EVT 2
+
+#define IPA_RMNET_LL_FREE_CREDIT_THRSHLD 64
+#define IPA_RMNET_LL_FREE_CREDIT_THRSHLD_MAX 128
 
 struct ipa3_rmnet_ll_cb_info {
 	ipa_rmnet_ll_ready_cb ready_cb;
@@ -70,6 +77,8 @@ struct rmnet_ll_ipa3_context {
 	u32 ipa3_to_apps_low_lat_data_hdl;
 	spinlock_t tx_lock;
 	struct ipa3_rmnet_ll_cb_info cb_info;
+	atomic_t under_flow_controlled_state;
+	u32 free_credit_thrshld;
 	struct sk_buff_head tx_queue;
 	u32 rmnet_ll_pm_hdl;
 	struct rmnet_ll_ipa3_debugfs dbgfs;
@@ -125,12 +134,54 @@ static ssize_t rmnet_ll_ipa3_read_stats(struct file *file, char __user *ubuf,
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
 }
 
+static ssize_t rmnet_ll_ipa3_read_free_credit_threshld
+(struct file *file, char __user *buf, size_t count, loff_t *ppos) {
+
+	int nbytes;
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Free credit Threshold = %d\n",
+				rmnet_ll_ipa3_ctx->free_credit_thrshld);
+	return simple_read_from_buffer(buf, count, ppos, dbg_buff, nbytes);
+
+}
+static ssize_t rmnet_ll_ipa3_write_free_credit_threshld
+(struct file *file, const char __user *buf, size_t count, loff_t *ppos) {
+
+	int ret;
+	u32 free_credit_thrshld =0;
+
+	if (count >= sizeof(dbg_buff))
+		return -EFAULT;
+
+	ret = kstrtou32_from_user(buf, count, 0, &free_credit_thrshld);
+	if(ret)
+		return ret;
+
+	if(free_credit_thrshld != 0 &&
+		free_credit_thrshld <= IPA_RMNET_LL_FREE_CREDIT_THRSHLD_MAX)
+		rmnet_ll_ipa3_ctx->free_credit_thrshld = free_credit_thrshld;
+	else
+		IPAERR("Invalid value \n");
+
+	IPADBG("Updated free credit threshold = %d",
+		rmnet_ll_ipa3_ctx->free_credit_thrshld);
+
+	return count;
+}
+
+
+#define READ_WRITE_MODE 0664
 #define READ_ONLY_MODE  0444
 static const struct rmnet_ll_ipa3_debugfs_file debugfs_files[] = {
 	{
 		"stats", READ_ONLY_MODE, NULL, {
 			.read = rmnet_ll_ipa3_read_stats
 		}
+	}, {
+		"free_credit_threshld", READ_WRITE_MODE, NULL, {
+			.read = rmnet_ll_ipa3_read_free_credit_threshld,
+			.write = rmnet_ll_ipa3_write_free_credit_threshld,
+		}
 	},
 };
 
@@ -219,6 +270,7 @@ int ipa3_rmnet_ll_init(void)
 	mutex_init(&rmnet_ll_ipa3_ctx->lock);
 	spin_lock_init(&rmnet_ll_ipa3_ctx->tx_lock);
 	rmnet_ll_ipa3_ctx->pipe_state = IPA_RMNET_LL_PIPE_NOT_READY;
+	rmnet_ll_ipa3_ctx->free_credit_thrshld = IPA_RMNET_LL_FREE_CREDIT_THRSHLD;
 	rmnet_ll_ipa3_debugfs_init();
 	return 0;
 }
@@ -427,6 +479,7 @@ int ipa3_setup_apps_low_lat_data_cons_pipe(
 		rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_PIPE_READY;
 	else
 		rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_START;
+	atomic_set(&rmnet_ll_ipa3_ctx->under_flow_controlled_state, 0);
 	mutex_unlock(&rmnet_ll_ipa3_ctx->lock);
 
 	return 0;
@@ -578,29 +631,16 @@ int ipa3_teardown_apps_low_lat_data_pipes(void)
 int ipa3_rmnet_ll_xmit(struct sk_buff *skb)
 {
 	int ret;
-	int len;
+	int len, free_desc = 0;
 	unsigned long flags;
 
 	if (!ipa3_ctx->rmnet_ll_enable) {
 		IPAERR("low lat data pipe not supported\n");
 		kfree_skb(skb);
-		return 0;
+		return -ENODEV;
 	}
 
 	spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
-	/* we cannot infinitely queue the packet */
-	if ((atomic_read(
-		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)
-		>= RMNET_LL_QUEUE_MAX)) {
-		IPAERR_RL("IPA LL TX queue full\n");
-		rmnet_ll_ipa3_ctx->stats.tx_pkt_dropped++;
-		rmnet_ll_ipa3_ctx->stats.tx_byte_dropped +=
-			skb->len;
-		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
-			flags);
-		kfree_skb(skb);
-		return -EAGAIN;
-	}
 
 	if (rmnet_ll_ipa3_ctx->state != IPA_RMNET_LL_START) {
 		IPAERR("bad rmnet_ll state %d\n",
@@ -611,15 +651,37 @@ int ipa3_rmnet_ll_xmit(struct sk_buff *skb)
 		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
 			flags);
 		kfree_skb(skb);
-		return 0;
+		return -EINVAL;
+	}
+
+	/* Letting RMNET LL layer to do the flow control. */
+	if (!atomic_read(
+		&rmnet_ll_ipa3_ctx->under_flow_controlled_state) &&
+		(atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts) >= 0) &&
+		((atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)+
+		skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue))
+		>= RMNET_LL_QUEUE_MAX)) {
+		IPADBG("IPA LL TX queue full, %d + %d\n",
+			atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts),
+			skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue));
+		atomic_set(&rmnet_ll_ipa3_ctx->under_flow_controlled_state, 1);
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+		return -EAGAIN;
 	}
 
 	/* if queue is not empty, means we still have pending wq */
 	if (skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue) != 0) {
 		skb_queue_tail(&rmnet_ll_ipa3_ctx->tx_queue, skb);
+		free_desc = (RMNET_LL_QUEUE_MAX - (atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)+
+		skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue)));
 		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
 			flags);
-		return 0;
+		return (free_desc > 0) ? free_desc : 0;
 	}
 
 	/* rmnet_ll is calling from atomic context */
@@ -632,9 +694,12 @@ int ipa3_rmnet_ll_xmit(struct sk_buff *skb)
 		 */
 		queue_delayed_work(rmnet_ll_ipa3_ctx->wq,
 			&rmnet_ll_wakeup_work, 0);
+		free_desc = (RMNET_LL_QUEUE_MAX - (atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)+
+		skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue)));
 		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
 			flags);
-		return 0;
+		return (free_desc > 0) ? free_desc : 0;
 	} else if (ret) {
 		IPAERR("[%s] fatal: ipa pm activate failed %d\n",
 			__func__, ret);
@@ -644,7 +709,7 @@ int ipa3_rmnet_ll_xmit(struct sk_buff *skb)
 		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
 			flags);
 		kfree_skb(skb);
-		return 0;
+		return -EPERM;
 	}
 	spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock, flags);
 
@@ -665,21 +730,24 @@ int ipa3_rmnet_ll_xmit(struct sk_buff *skb)
 			spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
 				flags);
 			kfree_skb(skb);
-			return 0;
+			return ret;
 		}
 		spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
 		skb_queue_head(&rmnet_ll_ipa3_ctx->tx_queue, skb);
 		queue_delayed_work(rmnet_ll_ipa3_ctx->wq,
 			&rmnet_ll_wakeup_work, 0);
-		ret = 0;
+		free_desc = (RMNET_LL_QUEUE_MAX - (atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)+
+		skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue)));
 		goto out;
 	}
-
 	spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock, flags);
 	atomic_inc(&rmnet_ll_ipa3_ctx->stats.outstanding_pkts);
+	free_desc = (RMNET_LL_QUEUE_MAX - (atomic_read(
+	&rmnet_ll_ipa3_ctx->stats.outstanding_pkts)+
+	skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue)));
 	rmnet_ll_ipa3_ctx->stats.tx_pkt_sent++;
 	rmnet_ll_ipa3_ctx->stats.tx_byte_sent += len;
-	ret = 0;
 
 out:
 	if (atomic_read(
@@ -687,7 +755,7 @@ out:
 		== 0)
 		ipa_pm_deferred_deactivate(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
 	spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock, flags);
-	return ret;
+	return (free_desc > 0) ? free_desc : 0;
 }
 
 static void rmnet_ll_wakeup_ipa(struct work_struct *work)
@@ -775,6 +843,7 @@ static void apps_rmnet_ll_tx_complete_notify(void *priv,
 {
 	struct sk_buff *skb = (struct sk_buff *)data;
 	unsigned long flags;
+	u32 pending_credits = 0;
 
 	if (evt != IPA_WRITE_DONE) {
 		IPAERR("unsupported evt on Tx callback, Drop the packet\n");
@@ -789,13 +858,47 @@ static void apps_rmnet_ll_tx_complete_notify(void *priv,
 		return;
 	}
 
+	dev_kfree_skb_any(skb);
+	spin_lock_irqsave(&rmnet_ll_ipa3_ctx->tx_lock,
+		flags);
 	atomic_dec(&rmnet_ll_ipa3_ctx->stats.outstanding_pkts);
 
 	if (atomic_read(
 		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts) == 0)
 		ipa_pm_deferred_deactivate(rmnet_ll_ipa3_ctx->rmnet_ll_pm_hdl);
 
-	dev_kfree_skb_any(skb);
+	if (atomic_read(
+		&rmnet_ll_ipa3_ctx->under_flow_controlled_state)) {
+		pending_credits = (atomic_read(
+		&rmnet_ll_ipa3_ctx->stats.outstanding_pkts) +
+		skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue));
+
+		if ((RMNET_LL_QUEUE_MAX >= pending_credits) &&
+			((RMNET_LL_QUEUE_MAX - pending_credits) >=
+			rmnet_ll_ipa3_ctx->free_credit_thrshld)) {
+
+			atomic_set(&rmnet_ll_ipa3_ctx->under_flow_controlled_state, 0);
+			IPADBG("IPA LL flow control lifted, %d + %d, %d\n",
+				atomic_read(
+			&rmnet_ll_ipa3_ctx->stats.outstanding_pkts),
+				skb_queue_len(&rmnet_ll_ipa3_ctx->tx_queue),
+				atomic_read(
+			&rmnet_ll_ipa3_ctx->under_flow_controlled_state));
+			spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+				flags);
+			if (rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb) {
+				(*(rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb))(
+				(void *)(uintptr_t)(IPA_RMNET_LL_FLOW_EVT),
+				NULL);
+			}
+		} else {
+			spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+				flags);
+		}
+	} else {
+		spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock,
+			flags);
+	}
 }
 
 /**
@@ -825,7 +928,7 @@ static void apps_rmnet_ll_receive_notify(void *priv,
 		rx_notify_cb_rx_data = (void *)data;
 		if (rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb) {
 			(*(rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb))(
-			rmnet_ll_ipa3_ctx->cb_info.rx_notify_cb_user_data,
+			(void *)(uintptr_t)(IPA_RMNET_LL_RECEIVE),
 			rx_notify_cb_rx_data);
 		} else
 			goto fail;

+ 1 - 1
kernel-tests/network_traffic/UlsoPacket.h

@@ -148,7 +148,7 @@ public:
     UlsoPacket(){
         mQmapHeader.setmPacketLength(mInternetHeader.size() + mTransportHeader.size() + mPayload.size());
         mInternetHeader.adjust(mTransportHeader.size() + mPayload.size(), mTransportHeader.protocolNum());
-        adjustHeader(mTransportHeader);
+        adjustHeader(mTransportHeader, 0, true);
     }
 
     vector<bool> asVector() const {

+ 4 - 4
kernel-tests/network_traffic/main.cpp

@@ -121,7 +121,7 @@ void testIpv4Tcp(){
     using L3Type = TcpHeader;
     using PacketType = UlsoPacket<L3Type, L2Type>;
     size_t payloadSize = 91, segmentSize = 32;
-    size_t packetSize = QmapHeader::size() + L2Type::size() + L3Type::size() + payloadSize;
+    size_t packetSize = QmapHeader::mSize + L2Type::mSize + L3Type::mSize + payloadSize;
 
     PacketType p1(segmentSize, payloadSize);
     cout << p1 << endl;
@@ -143,13 +143,13 @@ void testIpv4Udp(){
     using L3Type = UdpHeader;
     using PacketType = UlsoPacket<L3Type, L2Type>;
     size_t payloadSize = 80, segmentSize = 32;
-    size_t packetSize = QmapHeader::size() + L2Type::size() + L3Type::size() + payloadSize;
+    size_t packetSize = QmapHeader::mSize + L2Type::mSize + L3Type::mSize + payloadSize;
 
     PacketType p1(segmentSize, payloadSize);
     cout << p1 << endl;
     uint8_t ipv4UdpHeaderBuf[packetSize];
     p1.asArray(ipv4UdpHeaderBuf);
-    uint8_t *udpHeaderPtr = ipv4UdpHeaderBuf + QmapHeader::size() + L2Type::size();
+    uint8_t *udpHeaderPtr = ipv4UdpHeaderBuf + QmapHeader::mSize + L2Type::mSize;
     uint8_t *goldBuf = udpPacket.l3Packet();
     for(unsigned int i=0; i<udpPacket.l3PacketSize(); i++){
         if(udpHeaderPtr[i] != goldBuf[i]){
@@ -162,7 +162,7 @@ void testIpv4Udp(){
 
 template<typename L3Type, typename L2Type>
 size_t packetTestOffset(const struct Packet& p){
-    return QmapHeader::size() + (p.l2Size == 0) * L2Type::size() + (p.l3Size == 0) * L3Type::size();
+    return QmapHeader::mSize + (p.l2Size == 0) * L2Type::mSize + (p.l3Size == 0) * L3Type::mSize;
 }
 
 template<typename L3Type, typename L2Type>